diff --git a/common.gypi b/common.gypi index b5df26fd2be34f..2e59636b1718f1 100644 --- a/common.gypi +++ b/common.gypi @@ -39,13 +39,10 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.19', + 'v8_embedder_string': '-node.20', ##### V8 defaults for Node.js ##### - # Old time default, now explicitly stated. - 'v8_use_snapshot': 1, - # Turn on SipHash for hash seed generation, addresses HashWick 'v8_use_siphash': 'true', @@ -76,52 +73,27 @@ # TODO(refack): make v8-perfetto happen 'v8_use_perfetto': 0, + 'v8_enable_pointer_compression': 0, + 'v8_enable_31bit_smis_on_64bit_arch': 0, + ##### end V8 defaults ##### 'conditions': [ ['OS == "win"', { 'os_posix': 0, 'v8_postmortem_support%': 0, + 'obj_dir': '<(PRODUCT_DIR)/obj', + 'v8_base': '<(PRODUCT_DIR)/lib/libv8_snapshot.a', }, { 'os_posix': 1, 'v8_postmortem_support%': 1, }], - ['v8_use_snapshot==1', { - 'conditions': [ - ['GENERATOR == "ninja"', { - 'obj_dir': '<(PRODUCT_DIR)/obj', - 'v8_base': '<(PRODUCT_DIR)/obj/tools/v8_gypfiles/libv8_snapshot.a', - }, { - 'obj_dir%': '<(PRODUCT_DIR)/obj.target', - 'v8_base': '<(PRODUCT_DIR)/obj.target/tools/v8_gypfiles/libv8_snapshot.a', - }], - ['OS == "win"', { - 'obj_dir': '<(PRODUCT_DIR)/obj', - 'v8_base': '<(PRODUCT_DIR)/lib/libv8_snapshot.a', - }], - ['OS == "mac"', { - 'obj_dir%': '<(PRODUCT_DIR)/obj.target', - 'v8_base': '<(PRODUCT_DIR)/libv8_snapshot.a', - }], - ], + ['GENERATOR == "ninja"', { + 'obj_dir': '<(PRODUCT_DIR)/obj', + 'v8_base': '<(PRODUCT_DIR)/obj/tools/v8_gypfiles/libv8_snapshot.a', }, { - 'conditions': [ - ['GENERATOR == "ninja"', { - 'obj_dir': '<(PRODUCT_DIR)/obj', - 'v8_base': '<(PRODUCT_DIR)/obj/tools/v8_gypfiles/libv8_nosnapshot.a', - }, { - 'obj_dir%': '<(PRODUCT_DIR)/obj.target', - 'v8_base': '<(PRODUCT_DIR)/obj.target/tools/v8_gypfiles/libv8_nosnapshot.a', - }], - ['OS == "win"', { - 'obj_dir': '<(PRODUCT_DIR)/obj', - 'v8_base': '<(PRODUCT_DIR)/lib/libv8_nosnapshot.a', - }], - ['OS == "mac"', { - 'obj_dir%': '<(PRODUCT_DIR)/obj.target', - 'v8_base': '<(PRODUCT_DIR)/libv8_nosnapshot.a', - }], - ], + 'obj_dir%': '<(PRODUCT_DIR)/obj.target', + 'v8_base': '<(PRODUCT_DIR)/obj.target/tools/v8_gypfiles/libv8_snapshot.a', }], ['openssl_fips != ""', { 'openssl_product': '<(STATIC_LIB_PREFIX)crypto<(STATIC_LIB_SUFFIX)', @@ -130,6 +102,8 @@ }], ['OS=="mac"', { 'clang%': 1, + 'obj_dir%': '<(PRODUCT_DIR)/obj.target', + 'v8_base': '<(PRODUCT_DIR)/libv8_snapshot.a', }], ], }, @@ -334,6 +308,12 @@ }], ], }], + ['v8_enable_pointer_compression == 1', { + 'defines': ['V8_COMPRESS_POINTERS'], + }], + ['v8_enable_pointer_compression == 1 or v8_enable_31bit_smis_on_64bit_arch == 1', { + 'defines': ['V8_31BIT_SMIS_ON_64BIT_ARCH'], + }], ['OS == "win"', { 'defines': [ 'WIN32', diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 6a9bbd67997311..c7f3cba05c576f 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -79,8 +79,6 @@ /tools/jsfunfuzz/jsfunfuzz /tools/jsfunfuzz/jsfunfuzz.tar.gz /tools/luci-go -/tools/mips_toolchain -/tools/mips_toolchain.tar.gz /tools/oom_dump/oom_dump /tools/oom_dump/oom_dump.o /tools/swarming_client diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 1198de8f358fbc..40c4f16c813964 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -111,6 +111,7 @@ Johan Bergström Jonathan Liu Julien Brianceau JunHo Seo +Junming Huang Kang-Hao (Kenny) Lu Karl Skomski Kevin Gibbons @@ -124,6 +125,7 @@ Marcin Cieślak Marcin Wiącek Mateusz Czeladka Matheus Marchini +Matheus Marchini Mathias Bynens Matt Hanselman Matthew Sporleder diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index a026749a31db52..0a1f12b5de9cba 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -156,9 +156,7 @@ declare_args() { # List of extra files to snapshot. They will be snapshotted in order so # if files export symbols used by later files, they should go first. - # - # This default is used by cctests. Projects using V8 will want to override. - v8_extra_library_files = [ "//test/cctest/test-extra.js" ] + v8_extra_library_files = [] v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64") && @@ -204,6 +202,15 @@ declare_args() { v8_enable_regexp_interpreter_threaded_dispatch = true } +# Toggle pointer compression for correctness fuzzing when building the +# clang_x64_pointer_compression toolchain. We'll correctness-compare the +# default build with the clang_x64_pointer_compression build. +if (v8_multi_arch_build && + rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) == + "clang_x64_pointer_compression") { + v8_enable_pointer_compression = !v8_enable_pointer_compression +} + # Derived defaults. if (v8_enable_verify_heap == "") { v8_enable_verify_heap = v8_enable_debugging_features @@ -231,7 +238,7 @@ if (v8_enable_snapshot_native_code_counters == "") { v8_enable_snapshot_native_code_counters = v8_enable_debugging_features } if (v8_enable_shared_ro_heap == "") { - v8_enable_shared_ro_heap = !v8_enable_pointer_compression && v8_use_snapshot + v8_enable_shared_ro_heap = !v8_enable_pointer_compression } if (v8_enable_fast_torque == "") { v8_enable_fast_torque = v8_enable_fast_mksnapshot @@ -249,14 +256,14 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, assert(!v8_enable_lite_mode || v8_enable_embedded_builtins, "Lite mode requires embedded builtins") -assert(!v8_enable_lite_mode || v8_use_snapshot, - "Lite mode requires a snapshot build") assert( !v8_enable_pointer_compression || !v8_enable_shared_ro_heap, "Pointer compression is not supported with shared read-only heap enabled") -assert(v8_use_snapshot || !v8_enable_shared_ro_heap, - "Shared read-only heap requires snapshot") + +assert(v8_extra_library_files == [], + "v8_extra_library_files is no longer supported. Consider implementing " + + "custom API in C++ instead.") v8_random_seed = "314159265" v8_toolset_for_shell = "host" @@ -431,11 +438,8 @@ config("features") { if (v8_enable_handle_zapping) { defines += [ "ENABLE_HANDLE_ZAPPING" ] } - if (v8_use_snapshot) { - defines += [ "V8_USE_SNAPSHOT" ] - if (v8_enable_snapshot_native_code_counters) { - defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ] - } + if (v8_enable_snapshot_native_code_counters) { + defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ] } if (v8_enable_single_generation) { defines += [ "V8_ENABLE_SINGLE_GENERATION" ] @@ -644,6 +648,30 @@ config("toolchain") { defines += [ "V8_ANDROID_LOG_STDOUT" ] } + # V8_TARGET_OS_ defines. The target OS may differ from host OS e.g. in + # mksnapshot. We additionally set V8_HAVE_TARGET_OS to determine that a + # target OS has in fact been set; otherwise we internally assume that target + # OS == host OS (see v8config.h). + if (target_os == "android") { + defines += [ "V8_HAVE_TARGET_OS" ] + defines += [ "V8_TARGET_OS_ANDROID" ] + } else if (target_os == "fuchsia") { + defines += [ "V8_HAVE_TARGET_OS" ] + defines += [ "V8_TARGET_OS_FUCHSIA" ] + } else if (target_os == "ios") { + defines += [ "V8_HAVE_TARGET_OS" ] + defines += [ "V8_TARGET_OS_IOS" ] + } else if (target_os == "linux") { + defines += [ "V8_HAVE_TARGET_OS" ] + defines += [ "V8_TARGET_OS_LINUX" ] + } else if (target_os == "mac") { + defines += [ "V8_HAVE_TARGET_OS" ] + defines += [ "V8_TARGET_OS_MACOSX" ] + } else if (target_os == "win") { + defines += [ "V8_HAVE_TARGET_OS" ] + defines += [ "V8_TARGET_OS_WIN" ] + } + # TODO(jochen): Support v8_enable_prof on Windows. # TODO(jochen): Add support for compiling with simulators. @@ -895,6 +923,8 @@ action("postmortem-metadata") { "src/objects/code.h", "src/objects/data-handler.h", "src/objects/data-handler-inl.h", + "src/objects/descriptor-array.h", + "src/objects/descriptor-array-inl.h", "src/objects/feedback-cell.h", "src/objects/feedback-cell-inl.h", "src/objects/fixed-array-inl.h", @@ -913,6 +943,7 @@ action("postmortem-metadata") { "src/objects/js-promise-inl.h", "src/objects/js-promise.h", "src/objects/js-regexp-inl.h", + "src/objects/js-regexp.cc", "src/objects/js-regexp.h", "src/objects/js-regexp-string-iterator-inl.h", "src/objects/js-regexp-string-iterator.h", @@ -924,6 +955,8 @@ action("postmortem-metadata") { "src/objects/name-inl.h", "src/objects/oddball-inl.h", "src/objects/oddball.h", + "src/objects/primitive-heap-object.h", + "src/objects/primitive-heap-object-inl.h", "src/objects/scope-info.h", "src/objects/script.h", "src/objects/script-inl.h", @@ -936,6 +969,7 @@ action("postmortem-metadata") { "src/objects/string-inl.h", "src/objects/struct.h", "src/objects/struct-inl.h", + "$target_gen_dir/torque-generated/instance-types-tq.h", ] outputs = [ @@ -944,6 +978,10 @@ action("postmortem-metadata") { args = rebase_path(outputs, root_build_dir) + rebase_path(sources, root_build_dir) + + deps = [ + ":run_torque", + ] } torque_files = [ @@ -993,9 +1031,13 @@ torque_files = [ "src/builtins/proxy-set-prototype-of.tq", "src/builtins/proxy.tq", "src/builtins/reflect.tq", + "src/builtins/regexp-exec.tq", + "src/builtins/regexp-match-all.tq", "src/builtins/regexp-match.tq", "src/builtins/regexp-replace.tq", + "src/builtins/regexp-search.tq", "src/builtins/regexp-source.tq", + "src/builtins/regexp-split.tq", "src/builtins/regexp-test.tq", "src/builtins/regexp.tq", "src/builtins/string.tq", @@ -1047,6 +1089,7 @@ action("run_torque") { outputs = [ "$target_gen_dir/torque-generated/builtin-definitions-tq.h", + "$target_gen_dir/torque-generated/interface-descriptors-tq.inc", "$target_gen_dir/torque-generated/field-offsets-tq.h", "$target_gen_dir/torque-generated/class-verifiers-tq.cc", "$target_gen_dir/torque-generated/class-verifiers-tq.h", @@ -1280,31 +1323,29 @@ template("run_mksnapshot") { } } -if (v8_use_snapshot) { - run_mksnapshot("default") { +run_mksnapshot("default") { + args = [] + if (v8_enable_embedded_builtins) { + embedded_variant = "Default" + } +} +if (emit_builtins_as_inline_asm) { + asm_to_inline_asm("default") { args = [] + } +} +if (v8_use_multi_snapshots) { + run_mksnapshot("trusted") { + args = [ "--no-untrusted-code-mitigations" ] if (v8_enable_embedded_builtins) { - embedded_variant = "Default" + embedded_variant = "Trusted" } } if (emit_builtins_as_inline_asm) { - asm_to_inline_asm("default") { + asm_to_inline_asm("trusted") { args = [] } } - if (v8_use_multi_snapshots) { - run_mksnapshot("trusted") { - args = [ "--no-untrusted-code-mitigations" ] - if (v8_enable_embedded_builtins) { - embedded_variant = "Trusted" - } - } - if (emit_builtins_as_inline_asm) { - asm_to_inline_asm("trusted") { - args = [] - } - } - } } action("v8_dump_build_config") { @@ -1334,7 +1375,6 @@ action("v8_dump_build_config") { "v8_enable_i18n_support=$v8_enable_i18n_support", "v8_enable_verify_predictable=$v8_enable_verify_predictable", "v8_target_cpu=\"$v8_target_cpu\"", - "v8_use_snapshot=$v8_use_snapshot", "v8_enable_embedded_builtins=$v8_enable_embedded_builtins", "v8_enable_verify_csa=$v8_enable_verify_csa", "v8_enable_lite_mode=$v8_enable_lite_mode", @@ -1355,19 +1395,13 @@ action("v8_dump_build_config") { # source_set("v8_maybe_snapshot") { - if (v8_use_snapshot && v8_use_external_startup_data) { + if (v8_use_external_startup_data) { public_deps = [ ":v8_external_snapshot", ] - } else if (v8_use_snapshot) { - public_deps = [ - ":v8_snapshot", - ] } else { - # Ignore v8_use_external_startup_data setting if no snapshot is used. public_deps = [ - ":v8_init", - ":v8_nosnapshot", + ":v8_snapshot", ] } } @@ -1393,7 +1427,7 @@ v8_source_set("v8_nosnapshot") { configs = [ ":internal_config" ] } -if (v8_use_snapshot && !v8_use_external_startup_data) { +if (!v8_use_external_startup_data) { v8_source_set("v8_snapshot") { # Only targets in this file and the top-level visibility target can # depend on this. @@ -1435,7 +1469,7 @@ if (v8_use_snapshot && !v8_use_external_startup_data) { } } -if (v8_use_snapshot && v8_use_external_startup_data) { +if (v8_use_external_startup_data) { v8_source_set("v8_external_snapshot") { visibility = [ ":*" ] # Only targets in this file can depend on this. @@ -1852,6 +1886,8 @@ v8_compiler_sources = [ "src/compiler/machine-operator.h", "src/compiler/map-inference.cc", "src/compiler/map-inference.h", + "src/compiler/memory-lowering.cc", + "src/compiler/memory-lowering.h", "src/compiler/memory-optimizer.cc", "src/compiler/memory-optimizer.h", "src/compiler/node-aux-data.h", @@ -2047,6 +2083,7 @@ v8_source_set("v8_base_without_compiler") { "src/builtins/builtins-api.cc", "src/builtins/builtins-array.cc", "src/builtins/builtins-arraybuffer.cc", + "src/builtins/builtins-async-module.cc", "src/builtins/builtins-bigint.cc", "src/builtins/builtins-call.cc", "src/builtins/builtins-callsite.cc", @@ -2143,6 +2180,7 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/string-constants.h", "src/codegen/tick-counter.cc", "src/codegen/tick-counter.h", + "src/codegen/tnode.h", "src/codegen/turbo-assembler.cc", "src/codegen/turbo-assembler.h", "src/codegen/unoptimized-compilation-info.cc", @@ -2316,6 +2354,8 @@ v8_source_set("v8_base_without_compiler") { "src/heap/mark-compact.h", "src/heap/marking.cc", "src/heap/marking.h", + "src/heap/memory-measurement.cc", + "src/heap/memory-measurement.h", "src/heap/memory-reducer.cc", "src/heap/memory-reducer.h", "src/heap/object-stats.cc", @@ -2337,9 +2377,6 @@ v8_source_set("v8_base_without_compiler") { "src/heap/spaces-inl.h", "src/heap/spaces.cc", "src/heap/spaces.h", - "src/heap/store-buffer-inl.h", - "src/heap/store-buffer.cc", - "src/heap/store-buffer.h", "src/heap/stress-marking-observer.cc", "src/heap/stress-marking-observer.h", "src/heap/stress-scavenge-observer.cc", @@ -2461,6 +2498,8 @@ v8_source_set("v8_base_without_compiler") { "src/objects/api-callbacks.h", "src/objects/arguments-inl.h", "src/objects/arguments.h", + "src/objects/backing-store.cc", + "src/objects/backing-store.h", "src/objects/bigint.cc", "src/objects/bigint.h", "src/objects/cell-inl.h", @@ -2515,6 +2554,7 @@ v8_source_set("v8_base_without_compiler") { "src/objects/heap-object.h", "src/objects/instance-type-inl.h", "src/objects/instance-type.h", + "src/objects/internal-index.h", "src/objects/intl-objects.cc", "src/objects/intl-objects.h", "src/objects/js-array-buffer-inl.h", @@ -2558,6 +2598,7 @@ v8_source_set("v8_base_without_compiler") { "src/objects/js-regexp-inl.h", "src/objects/js-regexp-string-iterator-inl.h", "src/objects/js-regexp-string-iterator.h", + "src/objects/js-regexp.cc", "src/objects/js-regexp.h", "src/objects/js-relative-time-format-inl.h", "src/objects/js-relative-time-format.cc", @@ -2613,6 +2654,11 @@ v8_source_set("v8_base_without_compiler") { "src/objects/ordered-hash-table-inl.h", "src/objects/ordered-hash-table.cc", "src/objects/ordered-hash-table.h", + "src/objects/osr-optimized-code-cache-inl.h", + "src/objects/osr-optimized-code-cache.cc", + "src/objects/osr-optimized-code-cache.h", + "src/objects/primitive-heap-object-inl.h", + "src/objects/primitive-heap-object.h", "src/objects/promise-inl.h", "src/objects/promise.h", "src/objects/property-array-inl.h", @@ -2738,6 +2784,9 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/regexp-bytecode-generator-inl.h", "src/regexp/regexp-bytecode-generator.cc", "src/regexp/regexp-bytecode-generator.h", + "src/regexp/regexp-bytecode-peephole.cc", + "src/regexp/regexp-bytecode-peephole.h", + "src/regexp/regexp-bytecodes.cc", "src/regexp/regexp-bytecodes.h", "src/regexp/regexp-compiler-tonode.cc", "src/regexp/regexp-compiler.cc", @@ -2754,13 +2803,13 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/regexp-nodes.h", "src/regexp/regexp-parser.cc", "src/regexp/regexp-parser.h", - "src/regexp/regexp-special-case.h", "src/regexp/regexp-stack.cc", "src/regexp/regexp-stack.h", "src/regexp/regexp-utils.cc", "src/regexp/regexp-utils.h", "src/regexp/regexp.cc", "src/regexp/regexp.h", + "src/regexp/special-case.h", "src/roots/roots-inl.h", "src/roots/roots.cc", "src/roots/roots.h", @@ -2953,8 +3002,6 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/wasm-js.h", "src/wasm/wasm-limits.h", "src/wasm/wasm-linkage.h", - "src/wasm/wasm-memory.cc", - "src/wasm/wasm-memory.h", "src/wasm/wasm-module-builder.cc", "src/wasm/wasm-module-builder.h", "src/wasm/wasm-module-sourcemap.cc", @@ -3386,6 +3433,7 @@ v8_source_set("torque_base") { "src/torque/global-context.h", "src/torque/implementation-visitor.cc", "src/torque/implementation-visitor.h", + "src/torque/instance-type-generator.cc", "src/torque/instructions.cc", "src/torque/instructions.h", "src/torque/server-data.cc", @@ -3482,7 +3530,6 @@ v8_source_set("torque_ls_base") { v8_component("v8_libbase") { sources = [ - "src/base/adapters.h", "src/base/address-region.h", "src/base/atomic-utils.h", "src/base/atomicops.h", @@ -3668,6 +3715,12 @@ v8_component("v8_libbase") { ] } + if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm" || + v8_current_cpu == "mips")) { + # Special UBSan 32-bit requirement. + sources += [ "src/base/ubsan.cc" ] + } + if (is_tsan && !build_with_chromium) { data += [ "tools/sanitizers/tsan_suppressions.txt" ] } @@ -3840,7 +3893,7 @@ if (current_toolchain == v8_generator_toolchain) { } } -if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) { +if (current_toolchain == v8_snapshot_toolchain) { v8_executable("mksnapshot") { visibility = [ ":*" ] # Only targets in this file can depend on this. @@ -4037,6 +4090,7 @@ group("v8_clusterfuzz") { ":d8(//build/toolchain/linux:clang_x64_v8_arm64)", ":d8(//build/toolchain/linux:clang_x86)", ":d8(//build/toolchain/linux:clang_x86_v8_arm)", + ":d8(tools/clusterfuzz/toolchain:clang_x64_pointer_compression)", ] } } @@ -4115,13 +4169,10 @@ if (is_component_build) { ":torque_ls_base", ":v8_base", ":v8_headers", + ":v8_initializers", ":v8_maybe_snapshot", ] - if (v8_use_snapshot) { - public_deps += [ ":v8_initializers" ] - } - configs = [ ":internal_config" ] public_configs = [ ":external_config" ] @@ -4143,13 +4194,10 @@ if (is_component_build) { ":torque_base", ":torque_ls_base", ":v8_base", + ":v8_initializers", ":v8_maybe_snapshot", ] - if (v8_use_snapshot) { - public_deps += [ ":v8_initializers" ] - } - public_configs = [ ":external_config" ] } } diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS index 79f14286583397..542c5abd3e044b 100644 --- a/deps/v8/COMMON_OWNERS +++ b/deps/v8/COMMON_OWNERS @@ -4,7 +4,7 @@ bbudge@chromium.org binji@chromium.org bmeurer@chromium.org cbruni@chromium.org -clemensh@chromium.org +clemensb@chromium.org danno@chromium.org delphick@chromium.org gdeepti@chromium.org diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index be6a58859c5394..b3ca3548e865d2 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,1618 @@ +2019-10-16: Version 7.9.317 + + Performance and stability improvements on all platforms. + + +2019-10-16: Version 7.9.316 + + Performance and stability improvements on all platforms. + + +2019-10-16: Version 7.9.315 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.314 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.313 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.312 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.311 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.310 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.309 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.308 + + Performance and stability improvements on all platforms. + + +2019-10-15: Version 7.9.307 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.306 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.305 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.304 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.303 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.302 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.301 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.300 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.299 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.298 + + Performance and stability improvements on all platforms. + + +2019-10-14: Version 7.9.297 + + Performance and stability improvements on all platforms. + + +2019-10-13: Version 7.9.296 + + Performance and stability improvements on all platforms. + + +2019-10-12: Version 7.9.295 + + Performance and stability improvements on all platforms. + + +2019-10-12: Version 7.9.294 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.293 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.292 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.291 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.290 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.289 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.288 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.287 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.286 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.285 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.284 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.283 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.282 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.281 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.280 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.279 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.278 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.277 + + Performance and stability improvements on all platforms. + + +2019-10-11: Version 7.9.276 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.275 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.274 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.273 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.272 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.271 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.270 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.269 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.268 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.267 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.266 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.265 + + Performance and stability improvements on all platforms. + + +2019-10-10: Version 7.9.264 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.263 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.262 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.261 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.260 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.259 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.258 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.257 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.256 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.255 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.254 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.253 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.252 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.251 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.250 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.249 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.248 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.247 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.246 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.245 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.244 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.243 + + Performance and stability improvements on all platforms. + + +2019-10-09: Version 7.9.242 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.241 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.240 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.239 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.238 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.237 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.236 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.235 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.234 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.233 + + Performance and stability improvements on all platforms. + + +2019-10-08: Version 7.9.232 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.231 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.230 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.229 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.228 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.227 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.226 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.225 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.224 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.223 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.222 + + Performance and stability improvements on all platforms. + + +2019-10-07: Version 7.9.221 + + Performance and stability improvements on all platforms. + + +2019-10-06: Version 7.9.220 + + Performance and stability improvements on all platforms. + + +2019-10-05: Version 7.9.219 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.218 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.217 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.216 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.215 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.214 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.213 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.212 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.211 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.210 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.209 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.208 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.207 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.206 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.205 + + Performance and stability improvements on all platforms. + + +2019-10-04: Version 7.9.204 + + Performance and stability improvements on all platforms. + + +2019-10-03: Version 7.9.203 + + Performance and stability improvements on all platforms. + + +2019-10-03: Version 7.9.202 + + Performance and stability improvements on all platforms. + + +2019-10-03: Version 7.9.201 + + Performance and stability improvements on all platforms. + + +2019-10-03: Version 7.9.200 + + Performance and stability improvements on all platforms. + + +2019-10-03: Version 7.9.199 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.198 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.197 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.196 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.195 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.194 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.193 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.192 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.191 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.190 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.189 + + Performance and stability improvements on all platforms. + + +2019-10-02: Version 7.9.188 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.187 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.186 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.185 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.184 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.183 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.182 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.181 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.180 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.179 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.178 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.177 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.176 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.175 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.174 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.173 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.172 + + Performance and stability improvements on all platforms. + + +2019-10-01: Version 7.9.171 + + Performance and stability improvements on all platforms. + + +2019-09-30: Version 7.9.170 + + Performance and stability improvements on all platforms. + + +2019-09-30: Version 7.9.169 + + Performance and stability improvements on all platforms. + + +2019-09-30: Version 7.9.168 + + Performance and stability improvements on all platforms. + + +2019-09-30: Version 7.9.167 + + Performance and stability improvements on all platforms. + + +2019-09-30: Version 7.9.166 + + Performance and stability improvements on all platforms. + + +2019-09-30: Version 7.9.165 + + Performance and stability improvements on all platforms. + + +2019-09-30: Version 7.9.164 + + Performance and stability improvements on all platforms. + + +2019-09-29: Version 7.9.163 + + Performance and stability improvements on all platforms. + + +2019-09-28: Version 7.9.162 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.161 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.160 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.159 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.158 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.157 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.156 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.155 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.154 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.153 + + Performance and stability improvements on all platforms. + + +2019-09-27: Version 7.9.152 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.151 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.150 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.149 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.148 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.147 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.146 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.145 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.144 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.143 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.142 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.141 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.140 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.139 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.138 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.137 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.136 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.135 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.134 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.133 + + Performance and stability improvements on all platforms. + + +2019-09-26: Version 7.9.132 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.131 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.130 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.129 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.128 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.127 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.126 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.125 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.124 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.123 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.122 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.121 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.120 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.119 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.118 + + Performance and stability improvements on all platforms. + + +2019-09-25: Version 7.9.117 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.116 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.115 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.114 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.113 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.112 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.111 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.110 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.109 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.108 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.107 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.106 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.105 + + Performance and stability improvements on all platforms. + + +2019-09-24: Version 7.9.104 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.103 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.102 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.101 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.100 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.99 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.98 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.97 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.96 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.95 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.94 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.93 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.92 + + Performance and stability improvements on all platforms. + + +2019-09-23: Version 7.9.91 + + Performance and stability improvements on all platforms. + + +2019-09-22: Version 7.9.90 + + Performance and stability improvements on all platforms. + + +2019-09-21: Version 7.9.89 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.88 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.87 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.86 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.85 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.84 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.83 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.82 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.81 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.80 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.79 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.78 + + Performance and stability improvements on all platforms. + + +2019-09-20: Version 7.9.77 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.76 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.75 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.74 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.73 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.72 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.71 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.70 + + Performance and stability improvements on all platforms. + + +2019-09-19: Version 7.9.69 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.68 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.67 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.66 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.65 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.64 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.63 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.62 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.61 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.60 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.59 + + Performance and stability improvements on all platforms. + + +2019-09-18: Version 7.9.58 + + Performance and stability improvements on all platforms. + + +2019-09-17: Version 7.9.57 + + Performance and stability improvements on all platforms. + + +2019-09-17: Version 7.9.56 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.55 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.54 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.53 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.52 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.51 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.50 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.49 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.48 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.47 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.46 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.45 + + Performance and stability improvements on all platforms. + + +2019-09-16: Version 7.9.44 + + Performance and stability improvements on all platforms. + + +2019-09-14: Version 7.9.43 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.42 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.41 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.40 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.39 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.38 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.37 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.36 + + Performance and stability improvements on all platforms. + + +2019-09-13: Version 7.9.35 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.34 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.33 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.32 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.31 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.30 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.29 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.28 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.27 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.26 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.25 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.24 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.23 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.22 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.21 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.20 + + Performance and stability improvements on all platforms. + + +2019-09-12: Version 7.9.19 + + Performance and stability improvements on all platforms. + + +2019-09-11: Version 7.9.18 + + Performance and stability improvements on all platforms. + + +2019-09-11: Version 7.9.17 + + Performance and stability improvements on all platforms. + + +2019-09-11: Version 7.9.16 + + Performance and stability improvements on all platforms. + + +2019-09-11: Version 7.9.15 + + Performance and stability improvements on all platforms. + + +2019-09-11: Version 7.9.14 + + Performance and stability improvements on all platforms. + + +2019-09-11: Version 7.9.13 + + Performance and stability improvements on all platforms. + + +2019-09-11: Version 7.9.12 + + Performance and stability improvements on all platforms. + + +2019-09-10: Version 7.9.11 + + Performance and stability improvements on all platforms. + + +2019-09-10: Version 7.9.10 + + Performance and stability improvements on all platforms. + + +2019-09-10: Version 7.9.9 + + Performance and stability improvements on all platforms. + + +2019-09-09: Version 7.9.8 + + Performance and stability improvements on all platforms. + + +2019-09-09: Version 7.9.7 + + Performance and stability improvements on all platforms. + + +2019-09-09: Version 7.9.6 + + Performance and stability improvements on all platforms. + + +2019-09-09: Version 7.9.5 + + Performance and stability improvements on all platforms. + + +2019-09-09: Version 7.9.4 + + Performance and stability improvements on all platforms. + + +2019-09-09: Version 7.9.3 + + Performance and stability improvements on all platforms. + + +2019-09-09: Version 7.9.2 + + Performance and stability improvements on all platforms. + + +2019-09-08: Version 7.9.1 + + Performance and stability improvements on all platforms. + + +2019-09-05: Version 7.8.285 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.284 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.283 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.282 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.281 + + Performance and stability improvements on all platforms. + + +2019-09-04: Version 7.8.280 + + Performance and stability improvements on all platforms. + + 2019-09-04: Version 7.8.279 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index a7d4081edb856c..0faa57e5b05c04 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -23,11 +23,10 @@ vars = { 'android_url': 'https://android.googlesource.com', 'download_gcmole': False, 'download_jsfunfuzz': False, - 'download_mips_toolchain': False, 'check_v8_header_includes': False, # GN CIPD package version. - 'gn_version': 'git_revision:152c5144ceed9592c20f0c8fd55769646077569b', + 'gn_version': 'git_revision:ad9e442d92dcd9ee73a557428cfc336b55cbd533', # luci-go CIPD package version. 'luci_go': 'git_revision:7d11fd9e66407c49cb6c8546a2ae45ea993a240c', @@ -72,15 +71,15 @@ vars = { deps = { 'v8/build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '693faeda4ee025796c7e473d953a5a7b6ad64c93', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '082f11b29976c3be67dddd74bd75c6d1793201c7', 'v8/third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'f38bc1796282c61087dcf15abc61b8fd18a68402', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ba97f6065ed1e9336585468dd85e680cf09d5166', 'v8/third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '53f6b233a41ec982d8445996247093f7aaf41639', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '5005010d694e16571b8dfbf07d70817841f80a69', 'v8/third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b1c3ca20848c117eb935b02c25d441f03e6fbc5e', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e2897773b97b65f70b0bb15b753c73d9f6e3afdb', 'v8/buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '74cfb57006f83cfe050817526db359d5c8a11628', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'cf454b247c611167388742c7a31ef138a6031172', 'v8/buildtools/clang_format/script': Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917', 'v8/buildtools/linux64': { @@ -122,7 +121,7 @@ deps = { 'v8/base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '5e4fce17a9d2439c44a7b57ceecef6df9287ec2f', 'v8/third_party/android_ndk': { - 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '62582753e869484bf0cc7f7e8d184ce0077033c2', + 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '89e8db0cdf323af8bc24de875d7d2a43a66bf10e', 'condition': 'checkout_android', }, 'v8/third_party/android_sdk/public': { @@ -168,7 +167,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e7c719c3e85f76938bf4fef0ba37c27f89246f71', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'b9fad2fbcc499b984d88f4c4aec26d162297efae', 'condition': 'checkout_android', }, 'v8/third_party/colorama/src': { @@ -180,19 +179,19 @@ deps = { 'condition': 'checkout_fuchsia', }, 'v8/third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '565f1b848215b77c3732bca345fe76a0431d8b34', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'f2fb48c3b3d79a75a88a99fba6576b25d42ec528', 'v8/third_party/jinja2': Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25', 'v8/third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783', 'v8/tools/swarming_client': - Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '96f125709acfd0b48fc1e5dae7d6ea42291726ac', + Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '885b3febcc170a60f25795304e60927b77d1e92d', 'v8/test/benchmarks/data': Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f', 'v8/test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'v8/test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '59a1a016b7cf5cf43f66b274c7d1db4ec6066935', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd49777de27240262fa65c3b49dc014839e6897da', 'v8/test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b', 'v8/third_party/qemu-linux-x64': { @@ -216,7 +215,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '2fef805e5b05b26a8c87c47865590b5f43218611', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c5d85f1e9d3a01e4de2ccf4dfaa7847653ae9121', 'v8/tools/luci-go': { 'packages': [ { @@ -246,7 +245,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/third_party/perfetto': - Var('android_url') + '/platform/external/perfetto.git' + '@' + '01615892494a9a8dc84414962d0a817bf97de2c2', + Var('android_url') + '/platform/external/perfetto.git' + '@' + '28b633cd961b50c4c75bfb7f62eeac79e27c1a79', 'v8/third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91', } @@ -391,13 +390,6 @@ hooks = [ 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py', '--arch=x86'], }, - { - 'name': 'sysroot_mips', - 'pattern': '.', - 'condition': '(checkout_linux and checkout_mips)', - 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py', - '--arch=mips'], - }, { 'name': 'sysroot_x64', 'pattern': '.', @@ -495,19 +487,6 @@ hooks = [ 'condition': 'host_os == "mac" and checkout_fuchsia', 'action': ['python', 'v8/tools/clang/scripts/download_objdump.py'], }, - { - 'name': 'mips_toolchain', - 'pattern': '.', - 'condition': 'download_mips_toolchain', - 'action': [ 'download_from_google_storage', - '--no_resume', - '--platform=linux', - '--no_auth', - '-u', - '--bucket', 'chromium-v8', - '-s', 'v8/tools/mips_toolchain.tar.gz.sha1', - ], - }, # Download and initialize "vpython" VirtualEnv environment packages. { 'name': 'vpython_common', diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index 9ab84b1e2759de..e096d3c950fc50 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -16,7 +16,8 @@ per-file BUILD.gn=file:COMMON_OWNERS per-file DEPS=file:INFRA_OWNERS # For Test262 rolls. per-file DEPS=mathias@chromium.org -per-file PRESUBMIT=file:INFRA_OWNERS +per-file DEPS=syg@chromium.org +per-file PRESUBMIT.py=file:INFRA_OWNERS per-file codereview.settings=file:INFRA_OWNERS per-file AUTHORS=file:COMMON_OWNERS diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index 201bf55f714b5a..67986d83031c38 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -32,6 +32,7 @@ """ import json +import os import re import sys @@ -134,8 +135,68 @@ def _CheckUnwantedDependencies(input_api, output_api): # Restore sys.path to what it was before. sys.path = original_sys_path + def _FilesImpactedByDepsChange(files): + all_files = [f.AbsoluteLocalPath() for f in files] + deps_files = [p for p in all_files if IsDepsFile(p)] + impacted_files = union([_CollectImpactedFiles(path) for path in deps_files]) + impacted_file_objs = [ImpactedFile(path) for path in impacted_files] + return impacted_file_objs + + def IsDepsFile(p): + return os.path.isfile(p) and os.path.basename(p) == 'DEPS' + + def union(list_of_lists): + """Ensure no duplicates""" + return set(sum(list_of_lists, [])) + + def _CollectImpactedFiles(deps_file): + # TODO(liviurau): Do not walk paths twice. Then we have no duplicates. + # Higher level DEPS changes may dominate lower level DEPS changes. + # TODO(liviurau): Check if DEPS changed in the right way. + # 'include_rules' impact c++ files but 'vars' or 'deps' do not. + # Maybe we just eval both old and new DEPS content and check + # if the list are the same. + result = [] + parent_dir = os.path.dirname(deps_file) + for relative_f in input_api.change.AllFiles(parent_dir): + abs_f = os.path.join(parent_dir, relative_f) + if CppChecker.IsCppFile(abs_f): + result.append(abs_f) + return result + + class ImpactedFile(object): + """Duck type version of AffectedFile needed to check files under directories + where a DEPS file changed. Extend the interface along the line of + AffectedFile if you need it for other checks.""" + + def __init__(self, path): + self._path = path + + def LocalPath(self): + path = self._path.replace(os.sep, '/') + return os.path.normpath(path) + + def ChangedContents(self): + with open(self._path) as f: + # TODO(liviurau): read only '#include' lines + lines = f.readlines() + return enumerate(lines, start=1) + + def _FilterDuplicates(impacted_files, affected_files): + """"We include all impacted files but exclude affected files that are also + impacted. Files impacted by DEPS changes take precedence before files + affected by direct changes.""" + result = impacted_files[:] + only_paths = set([imf.LocalPath() for imf in impacted_files]) + for af in affected_files: + if not af.LocalPath() in only_paths: + result.append(af) + return result + added_includes = [] - for f in input_api.AffectedFiles(): + affected_files = input_api.AffectedFiles() + impacted_by_deps = _FilesImpactedByDepsChange(affected_files) + for f in _FilterDuplicates(impacted_by_deps, affected_files): if not CppChecker.IsCppFile(f.LocalPath()): continue @@ -301,39 +362,43 @@ def FilterFile(affected_file): return [] +def _CheckGenderNeutralInLicenses(input_api, output_api): + # License files are taken as is, even if they include gendered pronouns. + def LicenseFilter(path): + input_api.FilterSourceFile(path, black_list=_LICENSE_FILE) + + return input_api.canned_checks.CheckGenderNeutral( + input_api, output_api, source_file_filter=LicenseFilter) + + +def _RunTestsWithVPythonSpec(input_api, output_api): + return input_api.RunTests( + input_api.canned_checks.CheckVPythonSpec(input_api, output_api)) + + def _CommonChecks(input_api, output_api): """Checks common to both upload and commit.""" - results = [] # TODO(machenbach): Replace some of those checks, e.g. owners and copyright, # with the canned PanProjectChecks. Need to make sure that the checks all # pass on all existing files. - results.extend(input_api.canned_checks.CheckOwnersFormat( - input_api, output_api)) - results.extend(input_api.canned_checks.CheckOwners( - input_api, output_api)) - results.extend(_CheckCommitMessageBugEntry(input_api, output_api)) - results.extend(input_api.canned_checks.CheckPatchFormatted( - input_api, output_api)) - - # License files are taken as is, even if they include gendered pronouns. - license_filter = lambda path: input_api.FilterSourceFile( - path, black_list=_LICENSE_FILE) - results.extend(input_api.canned_checks.CheckGenderNeutral( - input_api, output_api, source_file_filter=license_filter)) - - results.extend(_V8PresubmitChecks(input_api, output_api)) - results.extend(_CheckUnwantedDependencies(input_api, output_api)) - results.extend( - _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api)) - results.extend(_CheckHeadersHaveIncludeGuards(input_api, output_api)) - results.extend( - _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api)) - results.extend(_CheckJSONFiles(input_api, output_api)) - results.extend(_CheckMacroUndefs(input_api, output_api)) - results.extend(_CheckNoexceptAnnotations(input_api, output_api)) - results.extend(input_api.RunTests( - input_api.canned_checks.CheckVPythonSpec(input_api, output_api))) - return results + checks = [ + input_api.canned_checks.CheckOwnersFormat, + input_api.canned_checks.CheckOwners, + _CheckCommitMessageBugEntry, + input_api.canned_checks.CheckPatchFormatted, + _CheckGenderNeutralInLicenses, + _V8PresubmitChecks, + _CheckUnwantedDependencies, + _CheckNoProductionCodeUsingTestOnlyFunctions, + _CheckHeadersHaveIncludeGuards, + _CheckNoInlineHeaderIncludesInNormalHeaders, + _CheckJSONFiles, + _CheckMacroUndefs, + _CheckNoexceptAnnotations, + _RunTestsWithVPythonSpec, + ] + + return sum([check(input_api, output_api) for check in checks], []) def _SkipTreeCheck(input_api, output_api): @@ -395,7 +460,7 @@ def _CheckMacroUndefs(input_api, output_api): """ Checks that each #define in a .cc file is eventually followed by an #undef. - TODO(clemensh): This check should eventually be enabled for all cc files via + TODO(clemensb): This check should eventually be enabled for all cc files via tools/presubmit.py (https://crbug.com/v8/6811). """ def FilterFile(affected_file): @@ -404,13 +469,29 @@ def FilterFile(affected_file): white_list = (r'.+\.cc',r'.+\.cpp',r'.+\.c') return input_api.FilterSourceFile(affected_file, white_list=white_list) + def Touches(line): + return line.startswith('+') or line.startswith('-') + + def InvolvesMacros(text): + return define_pattern.match(text) or undef_pattern.match(text) + def TouchesMacros(f): - for line in f.GenerateScmDiff().splitlines(): - if not line.startswith('+') and not line.startswith('-'): - continue - if define_pattern.match(line[1:]) or undef_pattern.match(line[1:]): - return True - return False + return any(Touches(line) and InvolvesMacros(line[1:]) + for line in f.GenerateScmDiff().splitlines()) + + def CollectUndefsWithNoDef(defined_macros, errors, f, line, line_nr): + define_match = define_pattern.match(line) + if define_match: + name = define_match.group(1) + defined_macros[name] = line_nr + undef_match = undef_pattern.match(line) + if undef_match and not "// NOLINT" in line: + name = undef_match.group(1) + if name in defined_macros: + del defined_macros[name] + else: + errors.append('{}:{}: Macro named \'{}\' was not defined before.' + .format(f.LocalPath(), line_nr, name)) define_pattern = input_api.re.compile(r'#define (\w+)') undef_pattern = input_api.re.compile(r'#undef (\w+)') @@ -422,25 +503,9 @@ def TouchesMacros(f): defined_macros = dict() with open(f.LocalPath()) as fh: - line_nr = 0 - for line in fh: - line_nr += 1 - - define_match = define_pattern.match(line) - if define_match: - name = define_match.group(1) - defined_macros[name] = line_nr - - undef_match = undef_pattern.match(line) - if undef_match: - if "// NOLINT" in line: - continue - name = undef_match.group(1) - if not name in defined_macros: - errors.append('{}:{}: Macro named \'{}\' was not defined before.' - .format(f.LocalPath(), line_nr, name)) - else: - del defined_macros[name] + for line_nr, line in enumerate(fh, start=1): + CollectUndefsWithNoDef(defined_macros, errors, f, line, line_nr) + for name, line_nr in sorted(defined_macros.items(), key=lambda e: e[1]): errors.append('{}:{}: Macro missing #undef: {}' .format(f.LocalPath(), line_nr, name)) @@ -463,7 +528,7 @@ def _CheckNoexceptAnnotations(input_api, output_api): Omitting it at some places can result in weird compiler errors if this is mixed with other classes that have the annotation. - TODO(clemensh): This check should eventually be enabled for all files via + TODO(clemensb): This check should eventually be enabled for all files via tools/presubmit.py (https://crbug.com/v8/8616). """ diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index e55c4cf3468460..2644dea36bffda 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -35,15 +35,6 @@ declare_args() { # on platform and embedder level. v8_enable_raw_heap_snapshots = false - # Enable the snapshot feature, for fast context creation. - # https://v8.dev/blog/custom-startup-snapshots - # TODO(thakis): Make snapshots work in 64-bit win/cross builds, - # https://803591 - # On Mac hosts, 32-bit builds targeting Windows can't use snapshots, see - # https://crbug.com/794838 - v8_use_snapshot = !(is_win && host_os != "win" && target_cpu == "x64") && - !(is_win && host_os == "mac" && target_cpu == "x86") - # Enable several snapshots side-by-side (e.g. default and for trusted code). v8_use_multi_snapshots = false @@ -71,8 +62,8 @@ declare_args() { if (v8_use_external_startup_data == "") { # If not specified as a gn arg, use external startup data by default if - # a snapshot is used and if we're not on ios. - v8_use_external_startup_data = v8_use_snapshot && !is_ios + # we're not on ios. + v8_use_external_startup_data = !is_ios } if (v8_use_multi_snapshots) { @@ -213,9 +204,17 @@ template("v8_executable") { template("v8_component") { component(target_name) { - forward_variables_from(invoker, "*", [ "configs" ]) + forward_variables_from(invoker, + "*", + [ + "configs", + "remove_configs", + ]) configs -= v8_remove_configs configs += v8_add_configs + if (defined(invoker.remove_configs)) { + configs -= invoker.remove_configs + } configs += invoker.configs } } diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS index b64069847bc1cc..1e0794df7a2796 100644 --- a/deps/v8/include/OWNERS +++ b/deps/v8/include/OWNERS @@ -1,6 +1,7 @@ adamk@chromium.org danno@chromium.org ulan@chromium.org +verwaest@chromium.org yangguo@chromium.org per-file *DEPS=file:../COMMON_OWNERS diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl index c4ff51b06078bf..51f3c6f68a113c 100644 --- a/deps/v8/include/js_protocol.pdl +++ b/deps/v8/include/js_protocol.pdl @@ -227,6 +227,15 @@ domain Debugger # Script source. string scriptSource + # Returns bytecode for the WebAssembly script with given id. + command getWasmBytecode + parameters + # Id of the Wasm script to get source for. + Runtime.ScriptId scriptId + returns + # Script source. + binary bytecode + # Returns stack trace with given `stackTraceId`. experimental command getStackTrace parameters @@ -237,7 +246,7 @@ domain Debugger # Stops on the next JavaScript statement. command pause - experimental command pauseOnAsyncCall + experimental deprecated command pauseOnAsyncCall parameters # Debugger will pause when async call with given stack trace is started. Runtime.StackTraceId parentStackTraceId @@ -435,7 +444,7 @@ domain Debugger # Steps into the function call. command stepInto parameters - # Debugger will issue additional Debugger.paused notification if any async task is scheduled + # Debugger will pause on the execution of the first async task which was scheduled # before next pause. experimental optional boolean breakOnAsyncCall @@ -479,9 +488,8 @@ domain Debugger optional Runtime.StackTrace asyncStackTrace # Async stack trace, if any. experimental optional Runtime.StackTraceId asyncStackTraceId - # Just scheduled async call will have this stack trace as parent stack during async execution. - # This field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag. - experimental optional Runtime.StackTraceId asyncCallStackTraceId + # Never present, will be removed. + experimental deprecated optional Runtime.StackTraceId asyncCallStackTraceId # Fired when the virtual machine resumed execution. event resumed @@ -1243,9 +1251,12 @@ domain Runtime # resolved. optional boolean awaitPromise # Whether to throw an exception if side effect cannot be ruled out during evaluation. + # This implies `disableBreaks` below. experimental optional boolean throwOnSideEffect # Terminate execution after timing out (number of milliseconds). experimental optional TimeDelta timeout + # Disable breakpoints during execution. + experimental optional boolean disableBreaks returns # Evaluation result. RemoteObject result diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h index 6908aeaa88a1b0..18d585d6d9e314 100644 --- a/deps/v8/include/libplatform/libplatform.h +++ b/deps/v8/include/libplatform/libplatform.h @@ -5,6 +5,8 @@ #ifndef V8_LIBPLATFORM_LIBPLATFORM_H_ #define V8_LIBPLATFORM_LIBPLATFORM_H_ +#include + #include "libplatform/libplatform-export.h" #include "libplatform/v8-tracing.h" #include "v8-platform.h" // NOLINT(build/include) @@ -70,11 +72,10 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform, * The |platform| has to be created using |NewDefaultPlatform|. * */ -V8_PLATFORM_EXPORT V8_DEPRECATE_SOON( - "Access the DefaultPlatform directly", - void SetTracingController( - v8::Platform* platform, - v8::platform::tracing::TracingController* tracing_controller)); +V8_DEPRECATE_SOON("Access the DefaultPlatform directly") +V8_PLATFORM_EXPORT void SetTracingController( + v8::Platform* platform, + v8::platform::tracing::TracingController* tracing_controller); } // namespace platform } // namespace v8 diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index cfa2aaba96d12e..5f53f21d55302d 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -24,6 +24,7 @@ namespace Runtime { namespace API { class RemoteObject; class StackTrace; +class StackTraceId; } } namespace Schema { @@ -229,12 +230,20 @@ class V8_EXPORT V8InspectorClient { struct V8_EXPORT V8StackTraceId { uintptr_t id; std::pair debugger_id; + bool should_pause = false; V8StackTraceId(); + V8StackTraceId(const V8StackTraceId&) = default; V8StackTraceId(uintptr_t id, const std::pair debugger_id); + V8StackTraceId(uintptr_t id, const std::pair debugger_id, + bool should_pause); + explicit V8StackTraceId(const StringView&); + V8StackTraceId& operator=(const V8StackTraceId&) = default; + V8StackTraceId& operator=(V8StackTraceId&&) noexcept = default; ~V8StackTraceId() = default; bool IsInvalid() const; + std::unique_ptr ToString(); }; class V8_EXPORT V8Inspector { diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index 6ecddf45d6ae92..29f391b673a1b8 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -112,6 +112,8 @@ using PlatformSmiTagging = SmiTagging; using PlatformSmiTagging = SmiTagging; #endif +// TODO(ishell): Consinder adding kSmiShiftBits = kSmiShiftSize + kSmiTagSize +// since it's used much more often than the inividual constants. const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; const int kSmiMinValue = static_cast(PlatformSmiTagging::kSmiMinValue); @@ -327,14 +329,11 @@ class Internals { #ifdef V8_COMPRESS_POINTERS // See v8:7703 or src/ptr-compr.* for details about pointer compression. static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32; - static constexpr size_t kPtrComprIsolateRootBias = - kPtrComprHeapReservationSize / 2; static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32; V8_INLINE static internal::Address GetRootFromOnHeapAddress( internal::Address addr) { - return (addr + kPtrComprIsolateRootBias) & - -static_cast(kPtrComprIsolateRootAlignment); + return addr & -static_cast(kPtrComprIsolateRootAlignment); } V8_INLINE static internal::Address DecompressTaggedAnyField( @@ -381,6 +380,10 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj); // language mode is strict. V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate); +// A base class for backing stores, which is needed due to vagaries of +// how static casts work with std::shared_ptr. +class BackingStoreBase {}; + } // namespace internal } // namespace v8 diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index b707fafc49229a..c6e78f238197df 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -367,9 +367,8 @@ class Platform { * |isolate|. Tasks posted for the same isolate should be execute in order of * scheduling. The definition of "foreground" is opaque to V8. */ - V8_DEPRECATE_SOON( - "Use a taskrunner acquired by GetForegroundTaskRunner instead.", - virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0; + V8_DEPRECATED("Use a taskrunner acquired by GetForegroundTaskRunner instead.") + virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0; /** * Schedules a task to be invoked on a foreground thread wrt a specific @@ -377,10 +376,9 @@ class Platform { * Tasks posted for the same isolate should be execute in order of * scheduling. The definition of "foreground" is opaque to V8. */ - V8_DEPRECATE_SOON( - "Use a taskrunner acquired by GetForegroundTaskRunner instead.", - virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task, - double delay_in_seconds)) = 0; + V8_DEPRECATED("Use a taskrunner acquired by GetForegroundTaskRunner instead.") + virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task, + double delay_in_seconds) = 0; /** * Schedules a task to be invoked on a foreground thread wrt a specific @@ -390,10 +388,8 @@ class Platform { * starved for an arbitrarily long time if no idle time is available. * The definition of "foreground" is opaque to V8. */ - V8_DEPRECATE_SOON( - "Use a taskrunner acquired by GetForegroundTaskRunner instead.", - virtual void CallIdleOnForegroundThread(Isolate* isolate, - IdleTask* task)) { + V8_DEPRECATED("Use a taskrunner acquired by GetForegroundTaskRunner instead.") + virtual void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) { // This must be overriden if |IdleTasksEnabled()|. abort(); } diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 360850b631c7f9..b58534c89d9ffb 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -6,8 +6,10 @@ #define V8_V8_PROFILER_H_ #include +#include #include #include + #include "v8.h" // NOLINT(build/include) /** @@ -143,9 +145,8 @@ class V8_EXPORT CpuProfileNode { unsigned GetHitCount() const; /** Returns function entry UID. */ - V8_DEPRECATE_SOON( - "Use GetScriptId, GetLineNumber, and GetColumnNumber instead.", - unsigned GetCallUid() const); + V8_DEPRECATED("Use GetScriptId, GetLineNumber, and GetColumnNumber instead.") + unsigned GetCallUid() const; /** Returns id of the node. The id is unique within the tree */ unsigned GetNodeId() const; @@ -375,14 +376,14 @@ class V8_EXPORT CpuProfiler { * Recording the forced sample does not contribute to the aggregated * profile statistics. */ - V8_DEPRECATED("Use static CollectSample(Isolate*) instead.", - void CollectSample()); + V8_DEPRECATED("Use static CollectSample(Isolate*) instead.") + void CollectSample(); /** * Tells the profiler whether the embedder is idle. */ - V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.", - void SetIdle(bool is_idle)); + V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.") + void SetIdle(bool is_idle); /** * Generate more detailed source positions to code objects. This results in @@ -989,7 +990,8 @@ struct HeapStatsUpdate { V(LazyCompile) \ V(RegExp) \ V(Script) \ - V(Stub) + V(Stub) \ + V(Relocation) /** * Note that this enum may be extended in the future. Please include a default @@ -1022,10 +1024,12 @@ class V8_EXPORT CodeEvent { const char* GetComment(); static const char* GetCodeEventTypeName(CodeEventType code_event_type); + + uintptr_t GetPreviousCodeStartAddress(); }; /** - * Interface to listen to code creation events. + * Interface to listen to code creation and code relocation events. */ class V8_EXPORT CodeEventHandler { public: @@ -1037,9 +1041,26 @@ class V8_EXPORT CodeEventHandler { explicit CodeEventHandler(Isolate* isolate); virtual ~CodeEventHandler(); + /** + * Handle is called every time a code object is created or moved. Information + * about each code event will be available through the `code_event` + * parameter. + * + * When the CodeEventType is kRelocationType, the code for this CodeEvent has + * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`. + */ virtual void Handle(CodeEvent* code_event) = 0; + /** + * Call `Enable()` to starts listening to code creation and code relocation + * events. These events will be handled by `Handle()`. + */ void Enable(); + + /** + * Call `Disable()` to stop listening to code creation and code relocation + * events. + */ void Disable(); private: diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index ef90963d2540d1..55d44e4bafab67 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 7 -#define V8_MINOR_VERSION 8 -#define V8_BUILD_NUMBER 279 -#define V8_PATCH_LEVEL 17 +#define V8_MINOR_VERSION 9 +#define V8_BUILD_NUMBER 317 +#define V8_PATCH_LEVEL 22 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 9d0b6a6c65479e..dc75012b2e4921 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -97,6 +97,10 @@ template class Global; template class TracedGlobal; +template +class TracedReference; +template +class TracedReferenceBase; template class PersistentValueMap; template class PersistentValueMapBase; @@ -282,7 +286,8 @@ class Local { V8_INLINE static Local New(Isolate* isolate, Local that); V8_INLINE static Local New(Isolate* isolate, const PersistentBase& that); - V8_INLINE static Local New(Isolate* isolate, const TracedGlobal& that); + V8_INLINE static Local New(Isolate* isolate, + const TracedReferenceBase& that); private: friend class Utils; @@ -312,7 +317,13 @@ class Local { template friend class ReturnValue; template + friend class Traced; + template friend class TracedGlobal; + template + friend class TracedReferenceBase; + template + friend class TracedReference; explicit V8_INLINE Local(T* that) : val_(that) {} V8_INLINE static Local New(Isolate* isolate, T* that); @@ -793,22 +804,10 @@ template using UniquePersistent = Global; /** - * Trait specifying behavior of |TracedGlobal|. + * Deprecated. Use |TracedReference| instead. */ template -struct TracedGlobalTrait { - /** - * Specifies whether |TracedGlobal| should clear its handle on destruction. - * - * V8 will *not* clear the embedder-side memory of the handle. The embedder is - * expected to report all |TracedGlobal| handles through - * |EmbedderHeapTracer| upon garabge collection. - * - * See |EmbedderHeapTracer::IsRootForNonTracingGC| for handling with - * non-tracing GCs in V8. - */ - static constexpr bool kRequiresExplicitDestruction = true; -}; +struct TracedGlobalTrait {}; /** * A traced handle with copy and move semantics. The handle is to be used @@ -821,15 +820,131 @@ struct TracedGlobalTrait { * |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should * be treated as root or not. * - * For destruction semantics see |TracedGlobalTrait|. + * Note that the base class cannot be instantiated itself. Choose from + * - TracedGlobal + * - TracedReference */ template -class TracedGlobal { +class TracedReferenceBase { public: + /** + * Returns true if this TracedReferenceBase is empty, i.e., has not been + * assigned an object. + */ + bool IsEmpty() const { return val_ == nullptr; } + + /** + * If non-empty, destroy the underlying storage cell. |IsEmpty| will return + * true after this call. + */ + V8_INLINE void Reset(); + + /** + * Construct a Local from this handle. + */ + Local Get(Isolate* isolate) const { return Local::New(isolate, *this); } + + template + V8_INLINE bool operator==(const TracedReferenceBase& that) const { + internal::Address* a = reinterpret_cast(val_); + internal::Address* b = reinterpret_cast(that.val_); + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; + return *a == *b; + } + + template + V8_INLINE bool operator==(const Local& that) const { + internal::Address* a = reinterpret_cast(val_); + internal::Address* b = reinterpret_cast(that.val_); + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; + return *a == *b; + } + + template + V8_INLINE bool operator!=(const TracedReferenceBase& that) const { + return !operator==(that); + } + + template + V8_INLINE bool operator!=(const Local& that) const { + return !operator==(that); + } + + /** + * Assigns a wrapper class ID to the handle. + */ + V8_INLINE void SetWrapperClassId(uint16_t class_id); + + /** + * Returns the class ID previously assigned to this handle or 0 if no class ID + * was previously assigned. + */ + V8_INLINE uint16_t WrapperClassId() const; + + /** + * Adds a finalization callback to the handle. The type of this callback is + * similar to WeakCallbackType::kInternalFields, i.e., it will pass the + * parameter and the first two internal fields of the object. + * + * The callback is then supposed to reset the handle in the callback. No + * further V8 API may be called in this callback. In case additional work + * involving V8 needs to be done, a second callback can be scheduled using + * WeakCallbackInfo::SetSecondPassCallback. + */ + V8_INLINE void SetFinalizationCallback( + void* parameter, WeakCallbackInfo::Callback callback); + + template + V8_INLINE TracedReferenceBase& As() const { + return reinterpret_cast&>( + const_cast&>(*this)); + } + + private: + enum DestructionMode { kWithDestructor, kWithoutDestructor }; + + /** + * An empty TracedReferenceBase without storage cell. + */ + TracedReferenceBase() = default; + + V8_INLINE static T* New(Isolate* isolate, T* that, void* slot, + DestructionMode destruction_mode); + + T* val_ = nullptr; + + friend class EmbedderHeapTracer; + template + friend class Local; + friend class Object; + template + friend class TracedGlobal; + template + friend class TracedReference; + template + friend class ReturnValue; +}; + +/** + * A traced handle with destructor that clears the handle. For more details see + * TracedReferenceBase. + */ +template +class TracedGlobal : public TracedReferenceBase { + public: + using TracedReferenceBase::Reset; + + /** + * Destructor resetting the handle. + */ + ~TracedGlobal() { this->Reset(); } + /** * An empty TracedGlobal without storage cell. */ - TracedGlobal() = default; + TracedGlobal() : TracedReferenceBase() {} /** * Construct a TracedGlobal from a Local. @@ -838,8 +953,9 @@ class TracedGlobal { * pointing to the same object. */ template - TracedGlobal(Isolate* isolate, Local that) - : val_(New(isolate, *that, &val_)) { + TracedGlobal(Isolate* isolate, Local that) : TracedReferenceBase() { + this->val_ = this->New(isolate, that.val_, &this->val_, + TracedReferenceBase::kWithDestructor); TYPE_CHECK(T, S); } @@ -905,18 +1021,6 @@ class TracedGlobal { template V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs); - /** - * Returns true if this TracedGlobal is empty, i.e., has not been assigned an - * object. - */ - bool IsEmpty() const { return val_ == nullptr; } - - /** - * If non-empty, destroy the underlying storage cell. |IsEmpty| will return - * true after this call. - */ - V8_INLINE void Reset(); - /** * If non-empty, destroy the underlying storage cell and create a new one with * the contents of other if other is non empty @@ -924,103 +1028,120 @@ class TracedGlobal { template V8_INLINE void Reset(Isolate* isolate, const Local& other); - /** - * Construct a Local from this handle. - */ - Local Get(Isolate* isolate) const { return Local::New(isolate, *this); } - template V8_INLINE TracedGlobal& As() const { return reinterpret_cast&>( const_cast&>(*this)); } +}; - template - V8_INLINE bool operator==(const TracedGlobal& that) const { - internal::Address* a = reinterpret_cast(**this); - internal::Address* b = reinterpret_cast(*that); - if (a == nullptr) return b == nullptr; - if (b == nullptr) return false; - return *a == *b; - } +/** + * A traced handle without destructor that clears the handle. The embedder needs + * to ensure that the handle is not accessed once the V8 object has been + * reclaimed. This can happen when the handle is not passed through the + * EmbedderHeapTracer. For more details see TracedReferenceBase. + */ +template +class TracedReference : public TracedReferenceBase { + public: + using TracedReferenceBase::Reset; + + /** + * An empty TracedReference without storage cell. + */ + TracedReference() : TracedReferenceBase() {} + /** + * Construct a TracedReference from a Local. + * + * When the Local is non-empty, a new storage cell is created + * pointing to the same object. + */ template - V8_INLINE bool operator==(const Local& that) const { - internal::Address* a = reinterpret_cast(**this); - internal::Address* b = reinterpret_cast(*that); - if (a == nullptr) return b == nullptr; - if (b == nullptr) return false; - return *a == *b; + TracedReference(Isolate* isolate, Local that) : TracedReferenceBase() { + this->val_ = this->New(isolate, that.val_, &this->val_, + TracedReferenceBase::kWithoutDestructor); + TYPE_CHECK(T, S); } - template - V8_INLINE bool operator!=(const TracedGlobal& that) const { - return !operator==(that); + /** + * Move constructor initializing TracedReference from an + * existing one. + */ + V8_INLINE TracedReference(TracedReference&& other) { + // Forward to operator=. + *this = std::move(other); } - template - V8_INLINE bool operator!=(const Local& that) const { - return !operator==(that); + /** + * Move constructor initializing TracedReference from an + * existing one. + */ + template + V8_INLINE TracedReference(TracedReference&& other) { + // Forward to operator=. + *this = std::move(other); } /** - * Assigns a wrapper class ID to the handle. + * Copy constructor initializing TracedReference from an + * existing one. */ - V8_INLINE void SetWrapperClassId(uint16_t class_id); + V8_INLINE TracedReference(const TracedReference& other) { + // Forward to operator=; + *this = other; + } /** - * Returns the class ID previously assigned to this handle or 0 if no class ID - * was previously assigned. + * Copy constructor initializing TracedReference from an + * existing one. */ - V8_INLINE uint16_t WrapperClassId() const; + template + V8_INLINE TracedReference(const TracedReference& other) { + // Forward to operator=; + *this = other; + } /** - * Adds a finalization callback to the handle. The type of this callback is - * similar to WeakCallbackType::kInternalFields, i.e., it will pass the - * parameter and the first two internal fields of the object. - * - * The callback is then supposed to reset the handle in the callback. No - * further V8 API may be called in this callback. In case additional work - * involving V8 needs to be done, a second callback can be scheduled using - * WeakCallbackInfo::SetSecondPassCallback. + * Move assignment operator initializing TracedGlobal from an existing one. */ - V8_INLINE void SetFinalizationCallback( - void* parameter, WeakCallbackInfo::Callback callback); + V8_INLINE TracedReference& operator=(TracedReference&& rhs); - private: - // Wrapping type used when clearing on destruction is required. - struct WrappedForDestruction { - T* value; - - explicit WrappedForDestruction(T* val) : value(val) {} - ~WrappedForDestruction(); - operator T*() const { return value; } - T* operator*() const { return value; } - T* operator->() const { return value; } - WrappedForDestruction& operator=(const WrappedForDestruction& other) { - value = other.value; - return *this; - } - WrappedForDestruction& operator=(T* val) { - value = val; - return *this; - } - }; + /** + * Move assignment operator initializing TracedGlobal from an existing one. + */ + template + V8_INLINE TracedReference& operator=(TracedReference&& rhs); - V8_INLINE static T* New(Isolate* isolate, T* that, void* slot); + /** + * Copy assignment operator initializing TracedGlobal from an existing one. + * + * Note: Prohibited when |other| has a finalization callback set through + * |SetFinalizationCallback|. + */ + V8_INLINE TracedReference& operator=(const TracedReference& rhs); - T* operator*() const { return this->val_; } + /** + * Copy assignment operator initializing TracedGlobal from an existing one. + * + * Note: Prohibited when |other| has a finalization callback set through + * |SetFinalizationCallback|. + */ + template + V8_INLINE TracedReference& operator=(const TracedReference& rhs); - typename std::conditional< - TracedGlobalTrait>::kRequiresExplicitDestruction, - WrappedForDestruction, T*>::type val_{nullptr}; + /** + * If non-empty, destroy the underlying storage cell and create a new one with + * the contents of other if other is non empty + */ + template + V8_INLINE void Reset(Isolate* isolate, const Local& other); - friend class EmbedderHeapTracer; - template - friend class Local; - friend class Object; - template - friend class ReturnValue; + template + V8_INLINE TracedReference& As() const { + return reinterpret_cast&>( + const_cast&>(*this)); + } }; /** @@ -1450,9 +1571,9 @@ class V8_EXPORT Module { "Use the preceding SetSyntheticModuleExport with an Isolate parameter, " "instead of the one that follows. The former will throw a runtime " "error if called for an export that doesn't exist (as per spec); " - "the latter will crash with a failed CHECK().", - void SetSyntheticModuleExport(Local export_name, - Local export_value)); + "the latter will crash with a failed CHECK().") + void SetSyntheticModuleExport(Local export_name, + Local export_value); }; /** @@ -1626,10 +1747,12 @@ class V8_EXPORT ScriptCompiler { public: enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 }; +#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ V8_DEPRECATE_SOON( "This class takes ownership of source_stream, so use the constructor " - "taking a unique_ptr to make these semantics clearer", - StreamedSource(ExternalSourceStream* source_stream, Encoding encoding)); + "taking a unique_ptr to make these semantics clearer") +#endif + StreamedSource(ExternalSourceStream* source_stream, Encoding encoding); StreamedSource(std::unique_ptr source_stream, Encoding encoding); ~StreamedSource(); @@ -3405,7 +3528,7 @@ enum class IndexFilter { kIncludeIndices, kSkipIndices }; * kConvertToString will convert integer indices to strings. * kKeepNumbers will return numbers for integer indices. */ -enum class KeyConversionMode { kConvertToString, kKeepNumbers }; +enum class KeyConversionMode { kConvertToString, kKeepNumbers, kNoNumbers }; /** * Integrity level for objects. @@ -3649,8 +3772,9 @@ class V8_EXPORT Object : public Value { return object.val_->InternalFieldCount(); } - /** Same as above, but works for TracedGlobal. */ - V8_INLINE static int InternalFieldCount(const TracedGlobal& object) { + /** Same as above, but works for TracedReferenceBase. */ + V8_INLINE static int InternalFieldCount( + const TracedReferenceBase& object) { return object.val_->InternalFieldCount(); } @@ -3675,7 +3799,7 @@ class V8_EXPORT Object : public Value { /** Same as above, but works for TracedGlobal. */ V8_INLINE static void* GetAlignedPointerFromInternalField( - const TracedGlobal& object, int index) { + const TracedReferenceBase& object, int index) { return object.val_->GetAlignedPointerFromInternalField(index); } @@ -3965,7 +4089,7 @@ class ReturnValue { template V8_INLINE void Set(const Global& handle); template - V8_INLINE void Set(const TracedGlobal& handle); + V8_INLINE void Set(const TracedReferenceBase& handle); template V8_INLINE void Set(const Local handle); // Fast primitive setters @@ -4720,6 +4844,45 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final { enum class ArrayBufferCreationMode { kInternalized, kExternalized }; +/** + * A wrapper around the backing store (i.e. the raw memory) of an array buffer. + * + * The allocation and destruction of backing stores is generally managed by + * V8. Clients should always use standard C++ memory ownership types (i.e. + * std::unique_ptr and std::shared_ptr) to manage lifetimes of backing stores + * properly, since V8 internal objects may alias backing stores. + * + * This object does not keep the underlying |ArrayBuffer::Allocator| alive by + * default. Use Isolate::CreateParams::array_buffer_allocator_shared when + * creating the Isolate to make it hold a reference to the allocator itself. + */ +class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase { + public: + ~BackingStore(); + + /** + * Return a pointer to the beginning of the memory block for this backing + * store. The pointer is only valid as long as this backing store object + * lives. + */ + void* Data() const; + + /** + * The length (in bytes) of this backing store. + */ + size_t ByteLength() const; + + private: + BackingStore(); +}; + +/** + * This callback is used only if the memory block for this backing store cannot + * be allocated with an ArrayBuffer::Allocator. In such cases the destructor + * of this backing store object invokes the callback to free the memory block. + */ +using BackingStoreDeleterCallback = void (*)(void* data, size_t length, + void* deleter_data); /** * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5). @@ -4856,6 +5019,44 @@ class V8_EXPORT ArrayBuffer : public Object { Isolate* isolate, void* data, size_t byte_length, ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized); + /** + * Create a new ArrayBuffer with an existing backing store. + * The created array keeps a reference to the backing store until the array + * is garbage collected. Note that the IsExternal bit does not affect this + * reference from the array to the backing store. + * + * In future IsExternal bit will be removed. Until then the bit is set as + * follows. If the backing store does not own the underlying buffer, then + * the array is created in externalized state. Otherwise, the array is created + * in internalized state. In the latter case the array can be transitioned + * to the externalized state using Externalize(backing_store). + */ + static Local New(Isolate* isolate, + std::shared_ptr backing_store); + + /** + * Returns a new standalone BackingStore that is allocated using the array + * buffer allocator of the isolate. The result can be later passed to + * ArrayBuffer::New. + * + * If the allocator returns nullptr, then the function may cause GCs in the + * given isolate and re-try the allocation. If GCs do not help, then the + * function will crash with an out-of-memory error. + */ + static std::unique_ptr NewBackingStore(Isolate* isolate, + size_t byte_length); + /** + * Returns a new standalone BackingStore that takes over the ownership of + * the given buffer. The destructor of the BackingStore invokes the given + * deleter callback. + * + * The result can be later passed to ArrayBuffer::New. The raw pointer + * to the buffer must not be passed again to any V8 API function. + */ + static std::unique_ptr NewBackingStore( + void* data, size_t byte_length, BackingStoreDeleterCallback deleter, + void* deleter_data); + /** * Returns true if ArrayBuffer is externalized, that is, does not * own its memory block. @@ -4868,8 +5069,8 @@ class V8_EXPORT ArrayBuffer : public Object { bool IsDetachable() const; // TODO(913887): fix the use of 'neuter' in the API. - V8_DEPRECATED("Use IsDetachable() instead.", - inline bool IsNeuterable() const) { + V8_DEPRECATED("Use IsDetachable() instead.") + inline bool IsNeuterable() const { return IsDetachable(); } @@ -4882,7 +5083,8 @@ class V8_EXPORT ArrayBuffer : public Object { void Detach(); // TODO(913887): fix the use of 'neuter' in the API. - V8_DEPRECATED("Use Detach() instead.", inline void Neuter()) { Detach(); } + V8_DEPRECATED("Use Detach() instead.") + inline void Neuter() { Detach(); } /** * Make this ArrayBuffer external. The pointer to underlying memory block @@ -4892,10 +5094,19 @@ class V8_EXPORT ArrayBuffer : public Object { * * The Data pointer of ArrayBuffer::Contents must be freed using the provided * deleter, which will call ArrayBuffer::Allocator::Free if the buffer - * was allocated with ArraryBuffer::Allocator::Allocate. + * was allocated with ArrayBuffer::Allocator::Allocate. */ Contents Externalize(); + /** + * Marks this ArrayBuffer external given a witness that the embedder + * has fetched the backing store using the new GetBackingStore() function. + * + * With the new lifetime management of backing stores there is no need for + * externalizing, so this function exists only to make the transition easier. + */ + void Externalize(const std::shared_ptr& backing_store); + /** * Get a pointer to the ArrayBuffer's underlying memory block without * externalizing it. If the ArrayBuffer is not externalized, this pointer @@ -4906,6 +5117,16 @@ class V8_EXPORT ArrayBuffer : public Object { */ Contents GetContents(); + /** + * Get a shared pointer to the backing store of this array buffer. This + * pointer coordinates the lifetime management of the internal storage + * with any live ArrayBuffers on the heap, even across isolates. The embedder + * should not attempt to manage lifetime of the storage through other means. + * + * This function replaces both Externalize() and GetContents(). + */ + std::shared_ptr GetBackingStore(); + V8_INLINE static ArrayBuffer* Cast(Value* obj); static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT; @@ -4914,6 +5135,7 @@ class V8_EXPORT ArrayBuffer : public Object { private: ArrayBuffer(); static void CheckCast(Value* obj); + Contents GetContents(bool externalize); }; @@ -5280,15 +5502,52 @@ class V8_EXPORT SharedArrayBuffer : public Object { Isolate* isolate, void* data, size_t byte_length, ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized); + /** + * Create a new SharedArrayBuffer with an existing backing store. + * The created array keeps a reference to the backing store until the array + * is garbage collected. Note that the IsExternal bit does not affect this + * reference from the array to the backing store. + * + * In future IsExternal bit will be removed. Until then the bit is set as + * follows. If the backing store does not own the underlying buffer, then + * the array is created in externalized state. Otherwise, the array is created + * in internalized state. In the latter case the array can be transitioned + * to the externalized state using Externalize(backing_store). + */ + static Local New( + Isolate* isolate, std::shared_ptr backing_store); + + /** + * Returns a new standalone BackingStore that is allocated using the array + * buffer allocator of the isolate. The result can be later passed to + * SharedArrayBuffer::New. + * + * If the allocator returns nullptr, then the function may cause GCs in the + * given isolate and re-try the allocation. If GCs do not help, then the + * function will crash with an out-of-memory error. + */ + static std::unique_ptr NewBackingStore(Isolate* isolate, + size_t byte_length); + /** + * Returns a new standalone BackingStore that takes over the ownership of + * the given buffer. The destructor of the BackingStore invokes the given + * deleter callback. + * + * The result can be later passed to SharedArrayBuffer::New. The raw pointer + * to the buffer must not be passed again to any V8 functions. + */ + static std::unique_ptr NewBackingStore( + void* data, size_t byte_length, BackingStoreDeleterCallback deleter, + void* deleter_data); + /** * Create a new SharedArrayBuffer over an existing memory block. Propagate * flags to indicate whether the underlying buffer can be grown. */ - V8_DEPRECATED("Use New method with data, and byte_length instead.", - static Local New( - Isolate* isolate, const SharedArrayBuffer::Contents&, - ArrayBufferCreationMode mode = - ArrayBufferCreationMode::kExternalized)); + V8_DEPRECATED("Use New method with data, and byte_length instead.") + static Local New( + Isolate* isolate, const SharedArrayBuffer::Contents&, + ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized); /** * Returns true if SharedArrayBuffer is externalized, that is, does not @@ -5310,6 +5569,15 @@ class V8_EXPORT SharedArrayBuffer : public Object { */ Contents Externalize(); + /** + * Marks this SharedArrayBuffer external given a witness that the embedder + * has fetched the backing store using the new GetBackingStore() function. + * + * With the new lifetime management of backing stores there is no need for + * externalizing, so this function exists only to make the transition easier. + */ + void Externalize(const std::shared_ptr& backing_store); + /** * Get a pointer to the ArrayBuffer's underlying memory block without * externalizing it. If the ArrayBuffer is not externalized, this pointer @@ -5324,6 +5592,16 @@ class V8_EXPORT SharedArrayBuffer : public Object { */ Contents GetContents(); + /** + * Get a shared pointer to the backing store of this array buffer. This + * pointer coordinates the lifetime management of the internal storage + * with any live ArrayBuffers on the heap, even across isolates. The embedder + * should not attempt to manage lifetime of the storage through other means. + * + * This function replaces both Externalize() and GetContents(). + */ + std::shared_ptr GetBackingStore(); + V8_INLINE static SharedArrayBuffer* Cast(Value* obj); static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT; @@ -5331,6 +5609,7 @@ class V8_EXPORT SharedArrayBuffer : public Object { private: SharedArrayBuffer(); static void CheckCast(Value* obj); + Contents GetContents(bool externalize); }; @@ -6655,34 +6934,26 @@ class V8_EXPORT ResourceConstraints { /** * Deprecated functions. Do not use in new code. */ - V8_DEPRECATE_SOON("Use code_range_size_in_bytes.", - size_t code_range_size() const) { - return code_range_size_ / kMB; - } - V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.", - void set_code_range_size(size_t limit_in_mb)) { + V8_DEPRECATE_SOON("Use code_range_size_in_bytes.") + size_t code_range_size() const { return code_range_size_ / kMB; } + V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.") + void set_code_range_size(size_t limit_in_mb) { code_range_size_ = limit_in_mb * kMB; } - V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.", - size_t max_semi_space_size_in_kb() const); - V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.", - void set_max_semi_space_size_in_kb(size_t limit_in_kb)); - V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.", - size_t max_old_space_size() const) { - return max_old_generation_size_ / kMB; - } - V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.", - void set_max_old_space_size(size_t limit_in_mb)) { + V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.") + size_t max_semi_space_size_in_kb() const; + V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.") + void set_max_semi_space_size_in_kb(size_t limit_in_kb); + V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.") + size_t max_old_space_size() const { return max_old_generation_size_ / kMB; } + V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.") + void set_max_old_space_size(size_t limit_in_mb) { max_old_generation_size_ = limit_in_mb * kMB; } - V8_DEPRECATE_SOON("Zone does not pool memory any more.", - size_t max_zone_pool_size() const) { - return max_zone_pool_size_; - } - V8_DEPRECATE_SOON("Zone does not pool memory any more.", - void set_max_zone_pool_size(size_t bytes)) { - max_zone_pool_size_ = bytes; - } + V8_DEPRECATE_SOON("Zone does not pool memory any more.") + size_t max_zone_pool_size() const { return max_zone_pool_size_; } + V8_DEPRECATE_SOON("Zone does not pool memory any more.") + void set_max_zone_pool_size(size_t bytes) { max_zone_pool_size_ = bytes; } private: static constexpr size_t kMB = 1048576u; @@ -6756,6 +7027,7 @@ enum class CrashKeyId { kReadonlySpaceFirstPageAddress, kMapSpaceFirstPageAddress, kCodeSpaceFirstPageAddress, + kDumpType, }; typedef void (*AddCrashKeyCallback)(CrashKeyId id, const std::string& value); @@ -7384,7 +7656,8 @@ class V8_EXPORT EmbedderHeapTracer { class V8_EXPORT TracedGlobalHandleVisitor { public: virtual ~TracedGlobalHandleVisitor() = default; - virtual void VisitTracedGlobalHandle(const TracedGlobal& value) = 0; + virtual void VisitTracedGlobalHandle(const TracedGlobal& handle) {} + virtual void VisitTracedReference(const TracedReference& handle) {} }; /** @@ -7422,13 +7695,12 @@ class V8_EXPORT EmbedderHeapTracer { virtual void RegisterV8References( const std::vector >& embedder_fields) = 0; - void RegisterEmbedderReference(const TracedGlobal& ref); + void RegisterEmbedderReference(const TracedReferenceBase& ref); /** * Called at the beginning of a GC cycle. */ - V8_DEPRECATED("Use version with flags.", virtual void TracePrologue()) {} - virtual void TracePrologue(TraceFlags flags); + virtual void TracePrologue(TraceFlags flags) {} /** * Called to advance tracing in the embedder. @@ -7455,8 +7727,7 @@ class V8_EXPORT EmbedderHeapTracer { * overriden to fill a |TraceSummary| that is used by V8 to schedule future * garbage collections. */ - V8_DEPRECATE_SOON("Use version with parameter.", - virtual void TraceEpilogue()) {} + V8_DEPRECATED("Use version with parameter.") virtual void TraceEpilogue() {} virtual void TraceEpilogue(TraceSummary* trace_summary); /** @@ -7483,32 +7754,35 @@ class V8_EXPORT EmbedderHeapTracer { * * If this returns false, then V8 may decide that the object referred to by * such a handle is reclaimed. In that case: - * - No action is required if handles are used with destructors. - * - When run without destructors (by specializing - * |TracedGlobalTrait::kRequiresExplicitDestruction|) V8 calls - * |ResetHandleInNonTracingGC|. + * - No action is required if handles are used with destructors, i.e., by just + * using |TracedGlobal|. + * - When run without destructors, i.e., by using + * |TracedReference|, V8 calls |ResetHandleInNonTracingGC|. * - * Note that the |handle| is different from the |TracedGlobal| handle that - * the embedder holds for retaining the object. The embedder may use - * |TracedGlobal::WrapperClassId()| to distinguish cases where it wants - * handles to be treated as roots from not being treated as roots. + * Note that the |handle| is different from the handle that the embedder holds + * for retaining the object. The embedder may use |WrapperClassId()| to + * distinguish cases where it wants handles to be treated as roots from not + * being treated as roots. */ virtual bool IsRootForNonTracingGC( - const v8::TracedGlobal& handle) { - return true; - } + const v8::TracedReference& handle); + virtual bool IsRootForNonTracingGC(const v8::TracedGlobal& handle); /** * Used in combination with |IsRootForNonTracingGC|. Called by V8 when an * object that is backed by a handle is reclaimed by a non-tracing garbage * collection. It is up to the embedder to reset the original handle. * - * Note that the |handle| is different from the |TracedGlobal| handle that - * the embedder holds for retaining the object. It is up to the embedder to - * find the orignal |TracedGlobal| handle via the object or class id. + * Note that the |handle| is different from the handle that the embedder holds + * for retaining the object. It is up to the embedder to find the original + * handle via the object or class id. */ virtual void ResetHandleInNonTracingGC( - const v8::TracedGlobal& handle) {} + const v8::TracedReference& handle); + V8_DEPRECATE_SOON( + "Use TracedReference version when not requiring destructors.") + virtual void ResetHandleInNonTracingGC( + const v8::TracedGlobal& handle); /* * Called by the embedder to immediately perform a full garbage collection. @@ -7575,6 +7849,8 @@ struct DeserializeInternalFieldsCallback { }; typedef DeserializeInternalFieldsCallback DeserializeEmbedderFieldsCallback; +enum class MeasureMemoryMode { kSummary, kDetailed }; + /** * Isolate represents an isolated instance of the V8 engine. V8 isolates have * completely separate states. Objects from one isolate must not be used in @@ -7635,6 +7911,11 @@ class V8_EXPORT Isolate { /** * The ArrayBuffer::Allocator to use for allocating and freeing the backing * store of ArrayBuffers. + * + * If the shared_ptr version is used, the Isolate instance and every + * |BackingStore| allocated using this allocator hold a std::shared_ptr + * to the allocator, in order to facilitate lifetime + * management for the allocator instance. */ ArrayBuffer::Allocator* array_buffer_allocator; @@ -7658,6 +7939,9 @@ class V8_EXPORT Isolate { bool only_terminate_in_safe_scope; }; + void SetArrayBufferAllocatorShared( + std::shared_ptr allocator); + /** * Stack-allocated class which sets the isolate for all operations @@ -8095,6 +8379,17 @@ class V8_EXPORT Isolate { */ bool GetHeapCodeAndMetadataStatistics(HeapCodeStatistics* object_statistics); + /** + * Enqueues a memory measurement request for the given context and mode. + * This API is experimental and may change significantly. + * + * \param mode Indicates whether the result should include per-context + * memory usage or just the total memory usage. + * \returns a promise that will be resolved with memory usage estimate. + */ + v8::MaybeLocal MeasureMemory(v8::Local context, + MeasureMemoryMode mode); + /** * Get a call stack sample from the isolate. * \param state Execution state. @@ -8156,8 +8451,8 @@ class V8_EXPORT Isolate { Local GetCurrentContext(); /** Returns the last context entered through V8's C++ API. */ - V8_DEPRECATED("Use GetEnteredOrMicrotaskContext().", - Local GetEnteredContext()); + V8_DEPRECATED("Use GetEnteredOrMicrotaskContext().") + Local GetEnteredContext(); /** * Returns either the last context entered through V8's C++ API, or the @@ -8469,18 +8764,16 @@ class V8_EXPORT Isolate { * Executing scripts inside the callback will not re-trigger microtasks and * the callback. */ - V8_DEPRECATE_SOON("Use *WithData version.", - void AddMicrotasksCompletedCallback( - MicrotasksCompletedCallback callback)); + V8_DEPRECATE_SOON("Use *WithData version.") + void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback); void AddMicrotasksCompletedCallback( MicrotasksCompletedCallbackWithData callback, void* data = nullptr); /** * Removes callback that was installed by AddMicrotasksCompletedCallback. */ - V8_DEPRECATE_SOON("Use *WithData version.", - void RemoveMicrotasksCompletedCallback( - MicrotasksCompletedCallback callback)); + V8_DEPRECATE_SOON("Use *WithData version.") + void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback); void RemoveMicrotasksCompletedCallback( MicrotasksCompletedCallbackWithData callback, void* data = nullptr); @@ -8910,6 +9203,7 @@ class V8_EXPORT V8 { * handled entirely on the embedders' side. * - The call will abort if the data is invalid. */ + V8_DEPRECATED("The natives blob is deprecated (https://crbug.com/v8/7624).") static void SetNativesDataBlob(StartupData* startup_blob); static void SetSnapshotDataBlob(StartupData* startup_blob); @@ -8922,8 +9216,8 @@ class V8_EXPORT V8 { */ static void SetFlagsFromString(const char* str); static void SetFlagsFromString(const char* str, size_t length); - V8_DEPRECATED("use size_t version", - static void SetFlagsFromString(const char* str, int length)); + V8_DEPRECATED("use size_t version") + static void SetFlagsFromString(const char* str, int length); /** * Sets V8 flags from the command line. @@ -9006,8 +9300,11 @@ class V8_EXPORT V8 { * not perform any file IO. */ static void InitializeExternalStartupData(const char* directory_path); + V8_DEPRECATED("The natives blob is deprecated (https://crbug.com/v8/7624).") static void InitializeExternalStartupData(const char* natives_blob, const char* snapshot_blob); + static void InitializeExternalStartupDataFromFile(const char* snapshot_blob); + /** * Sets the v8::Platform to use. This should be invoked before V8 is * initialized. @@ -9040,9 +9337,8 @@ class V8_EXPORT V8 { * \param context The third argument passed to the Linux signal handler, which * points to a ucontext_t structure. */ - V8_DEPRECATE_SOON("Use TryHandleWebAssemblyTrapPosix", - static bool TryHandleSignal(int signal_number, void* info, - void* context)); + V8_DEPRECATE_SOON("Use TryHandleWebAssemblyTrapPosix") + static bool TryHandleSignal(int signal_number, void* info, void* context); #endif // V8_OS_POSIX /** @@ -9111,8 +9407,12 @@ class V8_EXPORT V8 { template friend class Maybe; template + friend class TracedReferenceBase; + template friend class TracedGlobal; template + friend class TracedReference; + template friend class WeakCallbackInfo; template friend class Eternal; template friend class PersistentBase; @@ -9983,7 +10283,7 @@ Local Local::New(Isolate* isolate, const PersistentBase& that) { } template -Local Local::New(Isolate* isolate, const TracedGlobal& that) { +Local Local::New(Isolate* isolate, const TracedReferenceBase& that) { return New(isolate, that.val_); } @@ -10164,26 +10464,20 @@ Global& Global::operator=(Global&& rhs) { } template -TracedGlobal::WrappedForDestruction::~WrappedForDestruction() { - if (value == nullptr) return; - V8::DisposeTracedGlobal(reinterpret_cast(value)); - value = nullptr; -} - -template -T* TracedGlobal::New(Isolate* isolate, T* that, void* slot) { +T* TracedReferenceBase::New(Isolate* isolate, T* that, void* slot, + DestructionMode destruction_mode) { if (that == nullptr) return nullptr; internal::Address* p = reinterpret_cast(that); return reinterpret_cast(V8::GlobalizeTracedReference( reinterpret_cast(isolate), p, reinterpret_cast(slot), - TracedGlobalTrait>::kRequiresExplicitDestruction)); + destruction_mode == kWithDestructor)); } template -void TracedGlobal::Reset() { +void TracedReferenceBase::Reset() { if (IsEmpty()) return; - V8::DisposeTracedGlobal(reinterpret_cast(**this)); + V8::DisposeTracedGlobal(reinterpret_cast(val_)); val_ = nullptr; } @@ -10193,7 +10487,8 @@ void TracedGlobal::Reset(Isolate* isolate, const Local& other) { TYPE_CHECK(T, S); Reset(); if (other.IsEmpty()) return; - this->val_ = New(isolate, other.val_, &val_); + this->val_ = this->New(isolate, other.val_, &this->val_, + TracedReferenceBase::kWithDestructor); } template @@ -10241,28 +10536,83 @@ TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { } template -void TracedGlobal::SetWrapperClassId(uint16_t class_id) { +template +void TracedReference::Reset(Isolate* isolate, const Local& other) { + TYPE_CHECK(T, S); + Reset(); + if (other.IsEmpty()) return; + this->val_ = this->New(isolate, other.val_, &this->val_, + TracedReferenceBase::kWithoutDestructor); +} + +template +template +TracedReference& TracedReference::operator=(TracedReference&& rhs) { + TYPE_CHECK(T, S); + *this = std::move(rhs.template As()); + return *this; +} + +template +template +TracedReference& TracedReference::operator=( + const TracedReference& rhs) { + TYPE_CHECK(T, S); + *this = rhs.template As(); + return *this; +} + +template +TracedReference& TracedReference::operator=(TracedReference&& rhs) { + if (this != &rhs) { + this->Reset(); + if (rhs.val_ != nullptr) { + this->val_ = rhs.val_; + V8::MoveTracedGlobalReference( + reinterpret_cast(&rhs.val_), + reinterpret_cast(&this->val_)); + rhs.val_ = nullptr; + } + } + return *this; +} + +template +TracedReference& TracedReference::operator=(const TracedReference& rhs) { + if (this != &rhs) { + this->Reset(); + if (rhs.val_ != nullptr) { + V8::CopyTracedGlobalReference( + reinterpret_cast(&rhs.val_), + reinterpret_cast(&this->val_)); + } + } + return *this; +} + +template +void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) { typedef internal::Internals I; if (IsEmpty()) return; - internal::Address* obj = reinterpret_cast(**this); + internal::Address* obj = reinterpret_cast(val_); uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; *reinterpret_cast(addr) = class_id; } template -uint16_t TracedGlobal::WrapperClassId() const { +uint16_t TracedReferenceBase::WrapperClassId() const { typedef internal::Internals I; if (IsEmpty()) return 0; - internal::Address* obj = reinterpret_cast(**this); + internal::Address* obj = reinterpret_cast(val_); uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; return *reinterpret_cast(addr); } template -void TracedGlobal::SetFinalizationCallback( +void TracedReferenceBase::SetFinalizationCallback( void* parameter, typename WeakCallbackInfo::Callback callback) { - V8::SetFinalizationCallbackTraced( - reinterpret_cast(**this), parameter, callback); + V8::SetFinalizationCallbackTraced(reinterpret_cast(val_), + parameter, callback); } template @@ -10281,12 +10631,12 @@ void ReturnValue::Set(const Global& handle) { template template -void ReturnValue::Set(const TracedGlobal& handle) { +void ReturnValue::Set(const TracedReferenceBase& handle) { TYPE_CHECK(T, S); if (V8_UNLIKELY(handle.IsEmpty())) { *value_ = GetDefaultValue(); } else { - *value_ = *reinterpret_cast(*handle); + *value_ = *reinterpret_cast(handle.val_); } } diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 7670c0e449c7fd..882dc8a23c60af 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -54,7 +54,7 @@ // ----------------------------------------------------------------------------- -// Operating system detection +// Operating system detection (host) // // V8_OS_ANDROID - Android // V8_OS_BSD - BSDish (Mac OS X, Net/Free/Open/DragonFlyBSD) @@ -122,6 +122,67 @@ # define V8_OS_WIN 1 #endif +// ----------------------------------------------------------------------------- +// Operating system detection (target) +// +// V8_TARGET_OS_ANDROID +// V8_TARGET_OS_FUCHSIA +// V8_TARGET_OS_IOS +// V8_TARGET_OS_LINUX +// V8_TARGET_OS_MACOSX +// V8_TARGET_OS_WIN +// +// If not set explicitly, these fall back to corresponding V8_OS_ values. + +#ifdef V8_HAVE_TARGET_OS + +// The target OS is provided, just check that at least one known value is set. +# if !defined(V8_TARGET_OS_ANDROID) \ + && !defined(V8_TARGET_OS_FUCHSIA) \ + && !defined(V8_TARGET_OS_IOS) \ + && !defined(V8_TARGET_OS_LINUX) \ + && !defined(V8_TARGET_OS_MACOSX) \ + && !defined(V8_TARGET_OS_WIN) +# error No known target OS defined. +# endif + +#else // V8_HAVE_TARGET_OS + +# if defined(V8_TARGET_OS_ANDROID) \ + || defined(V8_TARGET_OS_FUCHSIA) \ + || defined(V8_TARGET_OS_IOS) \ + || defined(V8_TARGET_OS_LINUX) \ + || defined(V8_TARGET_OS_MACOSX) \ + || defined(V8_TARGET_OS_WIN) +# error A target OS is defined but V8_HAVE_TARGET_OS is unset. +# endif + +// Fall back to the detected host OS. +#ifdef V8_OS_ANDROID +# define V8_TARGET_OS_ANDROID +#endif + +#ifdef V8_OS_FUCHSIA +# define V8_TARGET_OS_FUCHSIA +#endif + +#ifdef V8_OS_IOS +# define V8_TARGET_OS_IOS +#endif + +#ifdef V8_OS_LINUX +# define V8_TARGET_OS_LINUX +#endif + +#ifdef V8_OS_MACOSX +# define V8_TARGET_OS_MACOSX +#endif + +#ifdef V8_OS_WIN +# define V8_TARGET_OS_WIN +#endif + +#endif // V8_HAVE_TARGET_OS // ----------------------------------------------------------------------------- // C library detection @@ -169,7 +230,7 @@ // // V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline)) // supported -// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported +// V8_HAS_ATTRIBUTE_NONNULL - __attribute__((nonnull)) supported // V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported // V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported // V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported @@ -188,10 +249,8 @@ // V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported // V8_HAS_COMPUTED_GOTO - computed goto/labels as values // supported -// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported // V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported // V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported -// V8_HAS_DECLSPEC_NORETURN - __declspec(noreturn) supported // V8_HAS___FORCEINLINE - __forceinline supported // // Note that testing for compilers and/or features must be done using #if @@ -207,9 +266,7 @@ #endif # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline)) -# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated)) -# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE \ - (__has_extension(attribute_deprecated_with_message)) +# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull)) # define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline)) # define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused)) # define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility)) @@ -255,8 +312,6 @@ // Works around "sorry, unimplemented: inlining failed" build errors with // older compilers. # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0)) -# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0)) -# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0)) # define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0)) # define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0)) # define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0)) @@ -278,10 +333,8 @@ #if defined(_MSC_VER) # define V8_CC_MSVC 1 -# define V8_HAS_DECLSPEC_DEPRECATED 1 # define V8_HAS_DECLSPEC_NOINLINE 1 # define V8_HAS_DECLSPEC_SELECTANY 1 -# define V8_HAS_DECLSPEC_NORETURN 1 # define V8_HAS___FORCEINLINE 1 @@ -306,9 +359,20 @@ # define V8_ASSUME_ALIGNED(ptr, alignment) \ __builtin_assume_aligned((ptr), (alignment)) #else -# define V8_ASSUME_ALIGNED(ptr) (ptr) +# define V8_ASSUME_ALIGNED(ptr, alignment) (ptr) #endif + +// A macro to mark specific arguments as non-null. +// Use like: +// int add(int* x, int y, int* z) V8_NONNULL(1, 3) { return *x + y + *z; } +#if V8_HAS_ATTRIBUTE_NONNULL +# define V8_NONNULL(...) __attribute__((nonnull(__VA_ARGS__))) +#else +# define V8_NONNULL(...) /* NOT SUPPORTED */ +#endif + + // A macro used to tell the compiler to never inline a particular function. // Don't bother for debug builds. // Use like: @@ -323,31 +387,18 @@ // A macro (V8_DEPRECATED) to mark classes or functions as deprecated. -#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE -#define V8_DEPRECATED(message, declarator) \ - declarator __attribute__((deprecated(message))) -#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED -#define V8_DEPRECATED(message, declarator) \ - declarator __attribute__((deprecated)) -#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED -#define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator +#if defined(V8_DEPRECATION_WARNINGS) +# define V8_DEPRECATED(message) [[deprecated(message)]] #else -#define V8_DEPRECATED(message, declarator) declarator +# define V8_DEPRECATED(message) #endif // A macro (V8_DEPRECATE_SOON) to make it easier to see what will be deprecated. -#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) && \ - V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE -#define V8_DEPRECATE_SOON(message, declarator) \ - declarator __attribute__((deprecated(message))) -#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED -#define V8_DEPRECATE_SOON(message, declarator) \ - declarator __attribute__((deprecated)) -#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED -#define V8_DEPRECATE_SOON(message, declarator) __declspec(deprecated) declarator +#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) +# define V8_DEPRECATE_SOON(message) [[deprecated(message)]] #else -#define V8_DEPRECATE_SOON(message, declarator) declarator +# define V8_DEPRECATE_SOON(message) #endif diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index d5d192fb20ca94..fed7fa24bfc098 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -99,8 +99,6 @@ 'release_x64_pointer_compression_without_dchecks', 'V8 Linux64 - arm64 - sim - pointer compression - builder': 'release_simulate_arm64_pointer_compression', - 'V8 Linux - noembed': 'release_x86_noembed', - 'V8 Linux - noembed - debug': 'debug_x86_noembed', 'V8 Fuchsia': 'release_x64_fuchsia', 'V8 Fuchsia - debug': 'debug_x64_fuchsia', 'V8 Linux64 - cfi': 'release_x64_cfi', @@ -162,7 +160,6 @@ 'debug_simulate_arm64_no_snap', 'V8 Linux - arm64 - sim - gc stress': 'debug_simulate_arm64', # Mips. - 'V8 Mips - builder': 'release_mips_no_snap_no_i18n', 'V8 Linux - mipsel - sim - builder': 'release_simulate_mipsel', 'V8 Linux - mips64el - sim - builder': 'release_simulate_mips64el', # IBM. @@ -197,7 +194,6 @@ 'v8_android_arm64_n5x_rel_ng': 'release_android_arm64', 'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot', 'v8_ios_simulator': 'release_x64_ios_simulator', - 'v8_linux_noembed_rel_ng': 'release_x86_noembed_trybot', 'v8_linux_rel_ng': 'release_x86_gcmole_trybot', 'v8_linux_optional_rel_ng': 'release_x86_trybot', 'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa', @@ -253,7 +249,6 @@ 'v8_mac64_dbg': 'debug_x64', 'v8_mac64_dbg_ng': 'debug_x64', 'v8_mac64_asan_rel': 'release_x64_asan_no_lsan', - 'v8_mips_compile_rel': 'release_mips_no_snap_no_i18n', 'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot', 'v8_linux_arm_lite_rel_ng': 'release_simulate_arm_lite_trybot', 'v8_linux_arm_dbg': 'debug_simulate_arm', @@ -434,7 +429,7 @@ 'release_x64_cfi_clusterfuzz': [ 'release_bot', 'x64', 'cfi_clusterfuzz'], 'release_x64_msvc': [ - 'release_bot', 'x64', 'msvc'], + 'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'], 'release_x64_correctness_fuzzer' : [ 'release_bot', 'x64', 'v8_correctness_fuzzer'], 'release_x64_fuchsia': [ @@ -515,8 +510,6 @@ # Debug configs for x86. 'debug_x86': [ 'debug_bot', 'x86'], - 'debug_x86_noembed': [ - 'debug_bot', 'x86', 'v8_no_enable_embedded_builtins'], 'debug_x86_minimal_symbols': [ 'debug_bot', 'x86', 'minimal_symbols'], 'debug_x86_no_i18n': [ @@ -535,10 +528,6 @@ # Release configs for x86. 'release_x86': [ 'release_bot', 'x86'], - 'release_x86_noembed': [ - 'release_bot', 'x86', 'v8_no_enable_embedded_builtins'], - 'release_x86_noembed_trybot': [ - 'release_trybot', 'x86', 'v8_no_enable_embedded_builtins'], 'release_x86_gcc': [ 'release_bot', 'x86', 'gcc', 'v8_check_header_includes'], 'release_x86_gcc_minimal_symbols': [ @@ -568,11 +557,6 @@ 'release_x86_verify_csa': [ 'release_bot', 'x86', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_csa'], - - # Release configs for mips. - 'release_mips_no_snap_no_i18n': [ - 'release', 'mips', 'no_sysroot', 'static', 'v8_no_i18n', - 'v8_snapshot_none'], }, 'mixins': { @@ -648,8 +632,7 @@ }, 'gcc': { - # TODO(machenbach): Remove cxx11 restriction when updating gcc version. - 'gn_args': 'is_clang=false use_cxx11=true', + 'gn_args': 'is_clang=false', }, 'gcmole': { @@ -685,28 +668,14 @@ 'gn_args': 'symbol_level=1', }, - 'mips': { - 'mixins': ['mips_bundled_toolchain'], - 'gn_args': 'target_cpu="mips"', - }, - - 'mips_bundled_toolchain': { - 'gn_args': 'custom_toolchain="tools/toolchain:mips-bundled" ' - 'ldso_path="tools/mips_toolchain/sysroot/usr/lib/ld.so.1" ' - 'gcc_target_rpath="tools/mips_toolchain/sysroot/usr/lib:' - 'tools/mips_toolchain/mips-mti-linux-gnu/lib:\$ORIGIN/."', - }, - 'msan': { 'mixins': ['v8_enable_test_features'], - 'gn_args': ('is_msan=true msan_track_origins=2 ' - 'use_prebuilt_instrumented_libraries=true'), + 'gn_args': 'is_msan=true msan_track_origins=2', }, 'msan_no_origins': { 'mixins': ['v8_enable_test_features'], - 'gn_args': ('is_msan=true msan_track_origins=0 ' - 'use_prebuilt_instrumented_libraries=true'), + 'gn_args': 'is_msan=true msan_track_origins=0', }, 'msvc': { @@ -717,6 +686,10 @@ 'gn_args': 'use_custom_libcxx=false', }, + 'no_goma': { + 'gn_args': 'use_goma=false', + }, + 'no_sysroot': { 'gn_args': 'use_sysroot=false', }, @@ -733,6 +706,10 @@ 'mixins': ['release', 'static', 'goma'], }, + 'release_bot_no_goma': { + 'mixins': ['release', 'static', 'no_goma'], + }, + 'release_trybot': { 'mixins': ['release_bot', 'minimal_symbols', 'dcheck_always_on'], }, @@ -803,10 +780,6 @@ 'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true', }, - 'v8_no_enable_embedded_builtins': { - 'gn_args': 'v8_enable_embedded_builtins=false', - }, - 'v8_enable_lite_mode': { 'gn_args': 'v8_enable_lite_mode=true', }, @@ -852,9 +825,9 @@ 'gn_args': 'v8_use_external_startup_data=false', }, - 'v8_snapshot_none': { - 'gn_args': 'v8_use_snapshot=false', - }, + # TODO(https://crbug.com/v8/8531): Remove this config and all bots, since + # no-snapshot was deprecated and removed. + 'v8_snapshot_none': {}, 'v8_verify_heap': { 'gn_args': 'v8_enable_verify_heap=true', diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 0d340db00e70de..1b805df9599d12 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -43,7 +43,7 @@ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, {'name': 'v8testing', 'variant': 'trusted', 'shards': 4}, {'name': 'mozilla', 'variant': 'default'}, - {'name': 'test262', 'variant': 'default', 'shards': 7}, + {'name': 'test262', 'variant': 'default', 'shards': 9}, ], }, ############################################################################## @@ -60,7 +60,7 @@ {'name': 'mozilla'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'test262', 'variant': 'default', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'test262', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'shards': 3}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], @@ -92,20 +92,12 @@ {'name': 'benchmarks', 'variant': 'extra'}, {'name': 'mozilla'}, {'name': 'mozilla', 'variant': 'extra'}, - {'name': 'test262_variants', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, + {'name': 'test262', 'shards': 2}, + {'name': 'test262', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, ], }, - 'v8_linux_noembed_rel_ng_triggered': { - 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 2}, - ], - }, 'v8_linux_noi18n_rel_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', @@ -145,8 +137,8 @@ {'name': 'mozilla'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, - {'name': 'test262_variants', 'shards': 4}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'test262', 'shards': 4}, + {'name': 'test262', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates'], 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, @@ -162,7 +154,7 @@ {'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1}, {'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1}, {'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1}, - {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1}, + {'name': 'test262', 'variant': 'code_serializer', 'shards': 1}, # No SSE3. { 'name': 'mozilla', @@ -229,7 +221,7 @@ {'name': 'mozilla'}, {'name': 'test262', 'variant': 'default'}, {'name': 'v8testing', 'shards': 7}, - {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 6}, {'name': 'v8testing', 'variant': 'trusted', 'shards': 2}, ], }, @@ -261,7 +253,7 @@ 'os': 'Ubuntu-16.04', }, 'tests': [ - {'name': 'test262_variants', 'shards': 7}, + {'name': 'test262', 'shards': 7}, {'name': 'v8testing', 'shards': 3}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'variant': 'slow_path'}, @@ -291,7 +283,7 @@ {'name': 'mozilla'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'test262', 'variant': 'default'}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'test262', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'shards': 3}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, @@ -318,7 +310,7 @@ {'name': 'mjsunit', 'variant': 'stress_sampling'}, {'name': 'webkit', 'variant': 'stress_sampling'}, # Infra staging. - {'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2}, + {'name': 'test262', 'variant': 'infra_staging', 'shards': 2}, {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2}, ], }, @@ -345,9 +337,9 @@ {'name': 'mozilla', 'variant': 'assert_types'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'perf_integration'}, - {'name': 'test262_variants', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'assert_types', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, + {'name': 'test262', 'shards': 2}, + {'name': 'test262', 'variant': 'assert_types', 'shards': 2}, + {'name': 'test262', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'assert_types'}, {'name': 'v8testing', 'variant': 'extra'}, @@ -379,8 +371,8 @@ # TODO(machenbach): Add mozilla tests. {'name': 'mjsunit_sp_frame_access'}, {'name': 'optimize_for_size'}, - {'name': 'test262_variants', 'shards': 4}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'test262', 'shards': 4}, + {'name': 'test262', 'variant': 'extra', 'shards': 3}, {'name': 'v8initializers'}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, @@ -658,8 +650,8 @@ {'name': 'mozilla'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, - {'name': 'test262_variants', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'extra'}, + {'name': 'test262', 'shards': 2}, + {'name': 'test262', 'variant': 'extra'}, {'name': 'v8initializers'}, {'name': 'v8testing'}, {'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates']}, @@ -712,9 +704,9 @@ {'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, - {'name': 'test262_variants', 'shards': 6}, - {'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'test262', 'shards': 6}, + {'name': 'test262', 'variant': 'code_serializer', 'shards': 2}, + {'name': 'test262', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'shards': 3}, { 'name': 'v8testing', @@ -761,22 +753,6 @@ }, ], }, - 'V8 Linux - noembed': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8testing'}, - ], - }, - 'V8 Linux - noembed - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 3}, - ], - }, 'V8 Linux - full debug': { 'swarming_dimensions': { 'os': 'Ubuntu-16.04', @@ -898,9 +874,9 @@ {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, {'name': 'perf_integration'}, - {'name': 'test262_variants', 'shards': 2}, - {'name': 'test262_variants', 'variant': 'assert_types'}, - {'name': 'test262_variants', 'variant': 'extra'}, + {'name': 'test262', 'shards': 2}, + {'name': 'test262', 'variant': 'assert_types'}, + {'name': 'test262', 'variant': 'extra'}, {'name': 'v8initializers'}, {'name': 'v8testing'}, {'name': 'v8testing', 'variant': 'assert_types'}, @@ -957,8 +933,8 @@ {'name': 'mozilla'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, - {'name': 'test262_variants', 'shards': 5}, - {'name': 'test262_variants', 'variant': 'extra', 'shards': 3}, + {'name': 'test262', 'shards': 5}, + {'name': 'test262', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, @@ -1067,7 +1043,7 @@ 'os': 'Ubuntu-16.04', }, 'tests': [ - {'name': 'test262_variants', 'shards': 5}, + {'name': 'test262', 'shards': 5}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra'}, {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, @@ -1271,7 +1247,7 @@ }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, - {'name': 'test262', 'variant': 'default', 'shards': 6}, + {'name': 'test262', 'variant': 'default', 'shards': 8}, {'name': 'v8testing', 'variant': 'default', 'shards': 3}, {'name': 'v8testing', 'variant': 'trusted', 'shards': 3}, ], @@ -1411,7 +1387,7 @@ {'name': 'mozilla', 'shards': 4}, {'name': 'test262', 'variant': 'default'}, {'name': 'v8testing', 'shards': 8}, - {'name': 'v8testing', 'variant': 'extra', 'shards': 4}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 6}, {'name': 'v8testing', 'variant': 'trusted', 'shards': 2}, # Armv8-a. { @@ -1585,19 +1561,6 @@ {'name': 'v8testing', 'shards': 7}, ], }, - 'V8 Mips - big endian - nosnap': { - 'swarming_dimensions': { - 'cpu': 'mips-32', - 'os': 'Debian-8.7', - }, - 'swarming_task_attrs': { - 'expiration': 18000, - 'hard_timeout': 18000, - }, - 'tests': [ - {'name': 'v8testing', 'variant': 'default', 'shards': 2}, - ], - }, ############################################################################## # Clusterfuzz. 'V8 NumFuzz': { @@ -1817,9 +1780,9 @@ 'os': 'Ubuntu-16.04', }, 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 8}, + {'name': 'mozilla', 'shards': 2}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, + {'name': 'v8testing', 'shards': 10}, ], }, 'V8 arm - sim - stable branch': { @@ -1837,9 +1800,9 @@ 'os': 'Ubuntu-16.04', }, 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 8}, + {'name': 'mozilla', 'shards': 2}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, + {'name': 'v8testing', 'shards': 10}, ], }, 'V8 mips64el - sim - beta branch': { diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS index ef5a56dbfcecf3..f51e220309656a 100644 --- a/deps/v8/src/api/OWNERS +++ b/deps/v8/src/api/OWNERS @@ -1,5 +1,5 @@ file:../../include/OWNERS -clemensh@chromium.org +clemensb@chromium.org ishell@chromium.org jkummerow@chromium.org leszeks@chromium.org diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 8be7f8558c8dde..c6fdeec9028fa9 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -127,6 +127,11 @@ #endif // V8_OS_WIN64 #endif // V8_OS_WIN +#define TRACE_BS(...) \ + do { \ + if (i::FLAG_trace_backing_store) PrintF(__VA_ARGS__); \ + } while (false) + namespace v8 { /* @@ -2631,7 +2636,7 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript( i::Isolate* isolate = reinterpret_cast(v8_isolate); i::ScriptStreamingData* data = source->impl(); std::unique_ptr task = - base::make_unique(data, isolate); + std::make_unique(data, isolate); data->task = std::move(task); return new ScriptCompiler::ScriptStreamingTask(data); } @@ -3743,6 +3748,42 @@ void v8::WasmModuleObject::CheckCast(Value* that) { "Could not convert to wasm module object"); } +v8::BackingStore::~BackingStore() { + auto i_this = reinterpret_cast(this); + i_this->~BackingStore(); // manually call internal destructor +} + +void* v8::BackingStore::Data() const { + return reinterpret_cast(this)->buffer_start(); +} + +size_t v8::BackingStore::ByteLength() const { + return reinterpret_cast(this)->byte_length(); +} + +std::shared_ptr v8::ArrayBuffer::GetBackingStore() { + i::Handle self = Utils::OpenHandle(this); + std::shared_ptr backing_store = self->GetBackingStore(); + if (!backing_store) { + backing_store = + i::BackingStore::EmptyBackingStore(i::SharedFlag::kNotShared); + } + i::GlobalBackingStoreRegistry::Register(backing_store); + std::shared_ptr bs_base = backing_store; + return std::static_pointer_cast(bs_base); +} + +std::shared_ptr v8::SharedArrayBuffer::GetBackingStore() { + i::Handle self = Utils::OpenHandle(this); + std::shared_ptr backing_store = self->GetBackingStore(); + if (!backing_store) { + backing_store = i::BackingStore::EmptyBackingStore(i::SharedFlag::kShared); + } + i::GlobalBackingStoreRegistry::Register(backing_store); + std::shared_ptr bs_base = backing_store; + return std::static_pointer_cast(bs_base); +} + void v8::ArrayBuffer::CheckCast(Value* that) { i::Handle obj = Utils::OpenHandle(that); Utils::ApiCheck( @@ -5307,7 +5348,7 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string, int end = start + length; if ((length == -1) || (length > str->length() - start)) end = str->length(); if (end < 0) return 0; - i::String::WriteToFlat(*str, buffer, start, end); + if (start < end) i::String::WriteToFlat(*str, buffer, start, end); if (!(options & String::NO_NULL_TERMINATION) && (length == -1 || end - start < length)) { buffer[end - start] = '\0'; @@ -5704,6 +5745,11 @@ void v8::V8::InitializeExternalStartupData(const char* natives_blob, i::InitializeExternalStartupData(natives_blob, snapshot_blob); } +// static +void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) { + i::InitializeExternalStartupDataFromFile(snapshot_blob); +} + const char* v8::V8::GetVersion() { return i::Version::GetVersion(); } template @@ -7219,20 +7265,78 @@ bool v8::ArrayBuffer::IsDetachable() const { return Utils::OpenHandle(this)->is_detachable(); } -v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() { - i::Handle self = Utils::OpenHandle(this); - i::Isolate* isolate = self->GetIsolate(); - Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", - "ArrayBuffer already externalized"); - self->set_is_external(true); +namespace { +// The backing store deleter just deletes the indirection, which downrefs +// the shared pointer. It will get collected normally. +void BackingStoreDeleter(void* buffer, size_t length, void* info) { + std::shared_ptr* bs_indirection = + reinterpret_cast*>(info); + if (bs_indirection) { + i::BackingStore* backing_store = bs_indirection->get(); + TRACE_BS("API:delete bs=%p mem=%p (length=%zu)\n", backing_store, + backing_store->buffer_start(), backing_store->byte_length()); + USE(backing_store); + } + delete bs_indirection; +} - const v8::ArrayBuffer::Contents contents = GetContents(); - isolate->heap()->UnregisterArrayBuffer(*self); +void* MakeDeleterData(std::shared_ptr backing_store) { + if (!backing_store) return nullptr; + TRACE_BS("API:extern bs=%p mem=%p (length=%zu)\n", backing_store.get(), + backing_store->buffer_start(), backing_store->byte_length()); + return new std::shared_ptr(backing_store); +} - // A regular copy is good enough. No move semantics needed. - return contents; +std::shared_ptr LookupOrCreateBackingStore( + i::Isolate* i_isolate, void* data, size_t byte_length, i::SharedFlag shared, + ArrayBufferCreationMode mode) { + // "internalized" means that the storage was allocated by the + // ArrayBufferAllocator and thus should be freed upon destruction. + bool free_on_destruct = mode == ArrayBufferCreationMode::kInternalized; + + // Try to lookup a previously-registered backing store in the global + // registry. If found, use that instead of wrapping an embedder allocation. + std::shared_ptr backing_store = + i::GlobalBackingStoreRegistry::Lookup(data, byte_length); + + if (backing_store) { + // Check invariants for a previously-found backing store. + + // 1. We cannot allow an embedder to first allocate a backing store that + // should not be freed upon destruct, and then allocate an alias that should + // destruct it. The other order is fine. + bool changing_destruct_mode = + free_on_destruct && !backing_store->free_on_destruct(); + Utils::ApiCheck( + !changing_destruct_mode, "v8_[Shared]ArrayBuffer_New", + "previous backing store found that should not be freed on destruct"); + + // 2. We cannot allow embedders to use the same backing store for both + // SharedArrayBuffers and regular ArrayBuffers. + bool changing_shared_flag = + (shared == i::SharedFlag::kShared) != backing_store->is_shared(); + Utils::ApiCheck( + !changing_shared_flag, "v8_[Shared]ArrayBuffer_New", + "previous backing store found that does not match shared flag"); + } else { + // No previous backing store found. + backing_store = i::BackingStore::WrapAllocation( + i_isolate, data, byte_length, shared, free_on_destruct); + + // The embedder already has a direct pointer to the buffer start, so + // globally register the backing store in case they come back with the + // same buffer start and the backing store is marked as free_on_destruct. + i::GlobalBackingStoreRegistry::Register(backing_store); + } + return backing_store; } +std::shared_ptr ToInternal( + std::shared_ptr backing_store) { + return std::static_pointer_cast(backing_store); +} +} // namespace + v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length, void* allocation_base, size_t allocation_length, @@ -7249,29 +7353,70 @@ v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length, DCHECK_LE(byte_length_, allocation_length_); } -void WasmMemoryDeleter(void* buffer, size_t lenght, void* info) { - internal::wasm::WasmEngine* engine = - reinterpret_cast(info); - CHECK(engine->memory_tracker()->FreeWasmMemory(nullptr, buffer)); +v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() { + return GetContents(true); } -void ArrayBufferDeleter(void* buffer, size_t length, void* info) { - v8::ArrayBuffer::Allocator* allocator = - reinterpret_cast(info); - allocator->Free(buffer, length); +void v8::ArrayBuffer::Externalize( + const std::shared_ptr& backing_store) { + i::Handle self = Utils::OpenHandle(this); + Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", + "ArrayBuffer already externalized"); + self->set_is_external(true); + DCHECK_EQ(self->backing_store(), backing_store->Data()); } v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() { + return GetContents(false); +} + +v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents(bool externalize) { + // TODO(titzer): reduce duplication between shared/unshared GetContents() + using BufferType = v8::ArrayBuffer; + i::Handle self = Utils::OpenHandle(this); - Contents contents( - self->backing_store(), self->byte_length(), self->allocation_base(), - self->allocation_length(), - self->is_wasm_memory() ? Allocator::AllocationMode::kReservation - : Allocator::AllocationMode::kNormal, - self->is_wasm_memory() ? WasmMemoryDeleter : ArrayBufferDeleter, - self->is_wasm_memory() - ? static_cast(self->GetIsolate()->wasm_engine()) - : static_cast(self->GetIsolate()->array_buffer_allocator())); + + std::shared_ptr backing_store = self->GetBackingStore(); + + void* deleter_data = nullptr; + if (externalize) { + Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", + "ArrayBuffer already externalized"); + self->set_is_external(true); + // When externalizing, upref the shared pointer to the backing store + // and store that as the deleter data. When the embedder calls the deleter + // callback, we will delete the additional (on-heap) shared_ptr. + deleter_data = MakeDeleterData(backing_store); + } + + if (!backing_store) { + // If the array buffer has zero length or was detached, return empty + // contents. + DCHECK_EQ(0, self->byte_length()); + BufferType::Contents contents( + nullptr, 0, nullptr, 0, + v8::ArrayBuffer::Allocator::AllocationMode::kNormal, + BackingStoreDeleter, deleter_data); + return contents; + } + + // Backing stores that given to the embedder might be passed back through + // the API using only the start of the buffer. We need to find such + // backing stores using global registration until the API is changed. + i::GlobalBackingStoreRegistry::Register(backing_store); + + auto allocation_mode = + backing_store->is_wasm_memory() + ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation + : v8::ArrayBuffer::Allocator::AllocationMode::kNormal; + + BufferType::Contents contents(backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + allocation_mode, // -- + BackingStoreDeleter, // -- + deleter_data); return contents; } @@ -7296,33 +7441,85 @@ Local v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) { i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, ArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared); - // TODO(jbroman): It may be useful in the future to provide a MaybeLocal - // version that throws an exception or otherwise does not crash. - if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) { + i::MaybeHandle result = + i_isolate->factory()->NewJSArrayBufferAndBackingStore( + byte_length, i::InitializedFlag::kZeroInitialized); + + i::Handle array_buffer; + if (!result.ToHandle(&array_buffer)) { + // TODO(jbroman): It may be useful in the future to provide a MaybeLocal + // version that throws an exception or otherwise does not crash. i::FatalProcessOutOfMemory(i_isolate, "v8::ArrayBuffer::New"); } - return Utils::ToLocal(obj); + + return Utils::ToLocal(array_buffer); } Local v8::ArrayBuffer::New(Isolate* isolate, void* data, size_t byte_length, ArrayBufferCreationMode mode) { // Embedders must guarantee that the external backing store is valid. - CHECK(byte_length == 0 || data != nullptr); + CHECK_IMPLIES(byte_length != 0, data != nullptr); CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength); i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, ArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + + std::shared_ptr backing_store = LookupOrCreateBackingStore( + i_isolate, data, byte_length, i::SharedFlag::kNotShared, mode); + + i::Handle obj = + i_isolate->factory()->NewJSArrayBuffer(std::move(backing_store)); + if (mode == ArrayBufferCreationMode::kExternalized) { + obj->set_is_external(true); + } + return Utils::ToLocal(obj); +} + +Local v8::ArrayBuffer::New( + Isolate* isolate, std::shared_ptr backing_store) { + CHECK_IMPLIES(backing_store->ByteLength() != 0, + backing_store->Data() != nullptr); + CHECK_LE(backing_store->ByteLength(), i::JSArrayBuffer::kMaxByteLength); + i::Isolate* i_isolate = reinterpret_cast(isolate); + LOG_API(i_isolate, ArrayBuffer, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + std::shared_ptr i_backing_store( + ToInternal(std::move(backing_store))); + Utils::ApiCheck( + !i_backing_store->is_shared(), "v8_ArrayBuffer_New", + "Cannot construct ArrayBuffer with a BackingStore of SharedArrayBuffer"); i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared); - i::JSArrayBuffer::Setup(obj, i_isolate, - mode == ArrayBufferCreationMode::kExternalized, data, - byte_length); + i_isolate->factory()->NewJSArrayBuffer(std::move(i_backing_store)); return Utils::ToLocal(obj); } +std::unique_ptr v8::ArrayBuffer::NewBackingStore( + Isolate* isolate, size_t byte_length) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + LOG_API(i_isolate, ArrayBuffer, NewBackingStore); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + std::unique_ptr backing_store = + i::BackingStore::Allocate(i_isolate, byte_length, + i::SharedFlag::kNotShared, + i::InitializedFlag::kZeroInitialized); + if (!backing_store) { + i::FatalProcessOutOfMemory(i_isolate, "v8::ArrayBuffer::NewBackingStore"); + } + return std::unique_ptr( + static_cast(backing_store.release())); +} + +std::unique_ptr v8::ArrayBuffer::NewBackingStore( + void* data, size_t byte_length, BackingStoreDeleterCallback deleter, + void* deleter_data) { + std::unique_ptr backing_store = + i::BackingStore::WrapAllocation(data, byte_length, deleter, deleter_data, + i::SharedFlag::kNotShared); + return std::unique_ptr( + static_cast(backing_store.release())); +} + Local v8::ArrayBufferView::Buffer() { i::Handle obj = Utils::OpenHandle(this); i::Handle buffer; @@ -7362,9 +7559,9 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) { bool v8::ArrayBufferView::HasBuffer() const { i::Handle self = Utils::OpenHandle(this); - i::Handle buffer(i::JSArrayBuffer::cast(self->buffer()), - self->GetIsolate()); - return buffer->backing_store() != nullptr; + if (!self->IsJSTypedArray()) return true; + auto typed_array = i::Handle::cast(self); + return !typed_array->is_on_heap(); } size_t v8::ArrayBufferView::ByteOffset() { @@ -7460,13 +7657,16 @@ i::Handle SetupSharedArrayBuffer( i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, SharedArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + + std::shared_ptr backing_store = LookupOrCreateBackingStore( + i_isolate, data, byte_length, i::SharedFlag::kShared, mode); + i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared); - bool is_wasm_memory = - i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data); - i::JSArrayBuffer::Setup(obj, i_isolate, - mode == ArrayBufferCreationMode::kExternalized, data, - byte_length, i::SharedFlag::kShared, is_wasm_memory); + i_isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store)); + + if (mode == ArrayBufferCreationMode::kExternalized) { + obj->set_is_external(true); + } return obj; } @@ -7476,20 +7676,6 @@ bool v8::SharedArrayBuffer::IsExternal() const { return Utils::OpenHandle(this)->is_external(); } -v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() { - i::Handle self = Utils::OpenHandle(this); - i::Isolate* isolate = self->GetIsolate(); - Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", - "SharedArrayBuffer already externalized"); - self->set_is_external(true); - - const v8::SharedArrayBuffer::Contents contents = GetContents(); - isolate->heap()->UnregisterArrayBuffer(*self); - - // A regular copy is good enough. No move semantics needed. - return contents; -} - v8::SharedArrayBuffer::Contents::Contents( void* data, size_t byte_length, void* allocation_base, size_t allocation_length, Allocator::AllocationMode allocation_mode, @@ -7505,20 +7691,72 @@ v8::SharedArrayBuffer::Contents::Contents( DCHECK_LE(byte_length_, allocation_length_); } +v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() { + return GetContents(true); +} + +void v8::SharedArrayBuffer::Externalize( + const std::shared_ptr& backing_store) { + i::Handle self = Utils::OpenHandle(this); + Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", + "SharedArrayBuffer already externalized"); + self->set_is_external(true); + + DCHECK_EQ(self->backing_store(), backing_store->Data()); +} + v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() { + return GetContents(false); +} + +v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents( + bool externalize) { + // TODO(titzer): reduce duplication between shared/unshared GetContents() + using BufferType = v8::SharedArrayBuffer; + i::Handle self = Utils::OpenHandle(this); - Contents contents( - self->backing_store(), self->byte_length(), self->allocation_base(), - self->allocation_length(), - self->is_wasm_memory() - ? ArrayBuffer::Allocator::AllocationMode::kReservation - : ArrayBuffer::Allocator::AllocationMode::kNormal, - self->is_wasm_memory() - ? reinterpret_cast(WasmMemoryDeleter) - : reinterpret_cast(ArrayBufferDeleter), - self->is_wasm_memory() - ? static_cast(self->GetIsolate()->wasm_engine()) - : static_cast(self->GetIsolate()->array_buffer_allocator())); + + std::shared_ptr backing_store = self->GetBackingStore(); + + void* deleter_data = nullptr; + if (externalize) { + Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", + "SharedArrayBuffer already externalized"); + self->set_is_external(true); + // When externalizing, upref the shared pointer to the backing store + // and store that as the deleter data. When the embedder calls the deleter + // callback, we will delete the additional (on-heap) shared_ptr. + deleter_data = MakeDeleterData(backing_store); + } + + if (!backing_store) { + // If the array buffer has zero length or was detached, return empty + // contents. + DCHECK_EQ(0, self->byte_length()); + BufferType::Contents contents( + nullptr, 0, nullptr, 0, + v8::ArrayBuffer::Allocator::AllocationMode::kNormal, + BackingStoreDeleter, deleter_data); + return contents; + } + + // Backing stores that given to the embedder might be passed back through + // the API using only the start of the buffer. We need to find such + // backing stores using global registration until the API is changed. + i::GlobalBackingStoreRegistry::Register(backing_store); + + auto allocation_mode = + backing_store->is_wasm_memory() + ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation + : v8::ArrayBuffer::Allocator::AllocationMode::kNormal; + + BufferType::Contents contents(backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + allocation_mode, // -- + BackingStoreDeleter, // -- + deleter_data); return contents; } @@ -7533,14 +7771,19 @@ Local v8::SharedArrayBuffer::New(Isolate* isolate, i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, SharedArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared); - // TODO(jbroman): It may be useful in the future to provide a MaybeLocal - // version that throws an exception or otherwise does not crash. - if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true, - i::SharedFlag::kShared)) { + + std::unique_ptr backing_store = + i::BackingStore::Allocate(i_isolate, byte_length, i::SharedFlag::kShared, + i::InitializedFlag::kZeroInitialized); + + if (!backing_store) { + // TODO(jbroman): It may be useful in the future to provide a MaybeLocal + // version that throws an exception or otherwise does not crash. i::FatalProcessOutOfMemory(i_isolate, "v8::SharedArrayBuffer::New"); } + + i::Handle obj = + i_isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store)); return Utils::ToLocalShared(obj); } @@ -7552,6 +7795,24 @@ Local v8::SharedArrayBuffer::New( return Utils::ToLocalShared(buffer); } +Local v8::SharedArrayBuffer::New( + Isolate* isolate, std::shared_ptr backing_store) { + CHECK(i::FLAG_harmony_sharedarraybuffer); + CHECK_IMPLIES(backing_store->ByteLength() != 0, + backing_store->Data() != nullptr); + CHECK_LE(backing_store->ByteLength(), i::JSArrayBuffer::kMaxByteLength); + i::Isolate* i_isolate = reinterpret_cast(isolate); + LOG_API(i_isolate, SharedArrayBuffer, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + std::shared_ptr i_backing_store(ToInternal(backing_store)); + Utils::ApiCheck( + i_backing_store->is_shared(), "v8_SharedArrayBuffer_New", + "Cannot construct SharedArrayBuffer with BackingStore of ArrayBuffer"); + i::Handle obj = + i_isolate->factory()->NewJSSharedArrayBuffer(std::move(i_backing_store)); + return Utils::ToLocalShared(obj); +} + Local v8::SharedArrayBuffer::New( Isolate* isolate, const SharedArrayBuffer::Contents& contents, ArrayBufferCreationMode mode) { @@ -7560,6 +7821,32 @@ Local v8::SharedArrayBuffer::New( return Utils::ToLocalShared(buffer); } +std::unique_ptr v8::SharedArrayBuffer::NewBackingStore( + Isolate* isolate, size_t byte_length) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + LOG_API(i_isolate, SharedArrayBuffer, NewBackingStore); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + std::unique_ptr backing_store = + i::BackingStore::Allocate(i_isolate, byte_length, i::SharedFlag::kShared, + i::InitializedFlag::kZeroInitialized); + if (!backing_store) { + i::FatalProcessOutOfMemory(i_isolate, + "v8::SharedArrayBuffer::NewBackingStore"); + } + return std::unique_ptr( + static_cast(backing_store.release())); +} + +std::unique_ptr v8::SharedArrayBuffer::NewBackingStore( + void* data, size_t byte_length, BackingStoreDeleterCallback deleter, + void* deleter_data) { + std::unique_ptr backing_store = + i::BackingStore::WrapAllocation(data, byte_length, deleter, deleter_data, + i::SharedFlag::kShared); + return std::unique_ptr( + static_cast(backing_store.release())); +} + Local v8::Symbol::New(Isolate* isolate, Local name) { i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, Symbol, New); @@ -7899,6 +8186,13 @@ Isolate* Isolate::Allocate() { return reinterpret_cast(i::Isolate::New()); } +void Isolate::SetArrayBufferAllocatorShared( + std::shared_ptr allocator) { + i::Isolate* isolate = reinterpret_cast(this); + CHECK_EQ(allocator.get(), isolate->array_buffer_allocator()); + isolate->set_array_buffer_allocator_shared(std::move(allocator)); +} + // static // This is separate so that tests can provide a different |isolate|. void Isolate::Initialize(Isolate* isolate, @@ -8235,6 +8529,15 @@ bool Isolate::GetHeapCodeAndMetadataStatistics( return true; } +v8::MaybeLocal Isolate::MeasureMemory( + v8::Local context, MeasureMemoryMode mode) { + i::Isolate* isolate = reinterpret_cast(this); + i::Handle native_context = + handle(Utils::OpenHandle(*context)->native_context(), isolate); + return v8::Utils::PromiseToLocal( + isolate->heap()->MeasureMemory(native_context, mode)); +} + void Isolate::GetStackSample(const RegisterState& state, void** frames, size_t frames_limit, SampleInfo* sample_info) { RegisterState regs = state; @@ -9062,9 +9365,9 @@ bool debug::Script::GetPossibleBreakpoints( i::Handle script = Utils::OpenHandle(this); if (script->type() == i::Script::TYPE_WASM && this->SourceMappingURL().IsEmpty()) { - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - return module_object.GetPossibleBreakpoints(start, end, locations); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + return i::WasmModuleObject::GetPossibleBreakpoints(native_module, start, + end, locations); } i::Script::InitLineEnds(script); @@ -9113,8 +9416,9 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const { i::Handle script = Utils::OpenHandle(this); if (script->type() == i::Script::TYPE_WASM) { if (this->SourceMappingURL().IsEmpty()) { - return i::WasmModuleObject::cast(script->wasm_module_object()) - .GetFunctionOffset(location.GetLineNumber()) + + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); + return i::wasm::GetWasmFunctionOffset(module, location.GetLineNumber()) + location.GetColumnNumber(); } DCHECK_EQ(0, location.GetLineNumber()); @@ -9202,9 +9506,8 @@ int debug::WasmScript::NumFunctions() const { i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_GE(i::kMaxInt, module->functions.size()); return static_cast(module->functions.size()); } @@ -9213,21 +9516,26 @@ int debug::WasmScript::NumImportedFunctions() const { i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_GE(i::kMaxInt, module->num_imported_functions); return static_cast(module->num_imported_functions); } +MemorySpan debug::WasmScript::Bytecode() const { + i::Handle script = Utils::OpenHandle(this); + i::Vector wire_bytes = + script->wasm_native_module()->wire_bytes(); + return {wire_bytes.begin(), wire_bytes.size()}; +} + std::pair debug::WasmScript::GetFunctionRange( int function_index) const { i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_LE(0, function_index); DCHECK_GT(module->functions.size(), function_index); const i::wasm::WasmFunction& func = module->functions[function_index]; @@ -9241,14 +9549,12 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) { i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - const i::wasm::WasmModule* module = module_object.module(); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); DCHECK_LE(0, function_index); DCHECK_GT(module->functions.size(), function_index); const i::wasm::WasmFunction& func = module->functions[function_index]; - i::wasm::ModuleWireBytes wire_bytes( - module_object.native_module()->wire_bytes()); + i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes()); i::Vector function_bytes = wire_bytes.GetFunctionBytes(&func); // TODO(herhut): Maybe also take module, name and signature into account. return i::StringHasher::HashSequentialString(function_bytes.begin(), @@ -9260,9 +9566,10 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction( i::DisallowHeapAllocation no_gc; i::Handle script = Utils::OpenHandle(this); DCHECK_EQ(i::Script::TYPE_WASM, script->type()); - i::WasmModuleObject module_object = - i::WasmModuleObject::cast(script->wasm_module_object()); - return module_object.DisassembleFunction(function_index); + i::wasm::NativeModule* native_module = script->wasm_native_module(); + const i::wasm::WasmModule* module = native_module->module(); + i::wasm::ModuleWireBytes wire_bytes(native_module->wire_bytes()); + return DisassembleWasmFunction(module, wire_bytes, function_index); } debug::Location::Location(int line_number, int column_number) @@ -9438,7 +9745,7 @@ debug::ConsoleCallArguments::ConsoleCallArguments( } debug::ConsoleCallArguments::ConsoleCallArguments( - internal::BuiltinArguments& args) + const internal::BuiltinArguments& args) : v8::FunctionCallbackInfo( nullptr, // Drop the first argument (receiver, i.e. the "console" object). @@ -9501,14 +9808,14 @@ v8::Local debug::GeneratorObject::Cast( MaybeLocal debug::EvaluateGlobal(v8::Isolate* isolate, v8::Local source, - bool throw_on_side_effect) { + EvaluateGlobalMode mode) { i::Isolate* internal_isolate = reinterpret_cast(isolate); PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value); Local result; - has_pending_exception = !ToLocal( - i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source), - throw_on_side_effect), - &result); + has_pending_exception = + !ToLocal(i::DebugEvaluate::Global( + internal_isolate, Utils::OpenHandle(*source), mode), + &result); RETURN_ON_FAILED_EXECUTION(Value); RETURN_ESCAPED(result); } @@ -10009,6 +10316,10 @@ const char* CodeEvent::GetComment() { return reinterpret_cast(this)->comment; } +uintptr_t CodeEvent::GetPreviousCodeStartAddress() { + return reinterpret_cast(this)->previous_code_start_address; +} + const char* CodeEvent::GetCodeEventTypeName(CodeEventType code_event_type) { switch (code_event_type) { case kUnknownType: @@ -10303,17 +10614,6 @@ void Testing::DeoptimizeAll(Isolate* isolate) { i::Deoptimizer::DeoptimizeAll(i_isolate); } -void EmbedderHeapTracer::TracePrologue(TraceFlags flags) { -#if __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wdeprecated" -#endif - TracePrologue(); -#if __clang__ -#pragma clang diagnostic pop -#endif -} - void EmbedderHeapTracer::TraceEpilogue(TraceSummary* trace_summary) { #if __clang__ #pragma clang diagnostic push @@ -10369,11 +10669,12 @@ void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) { } void EmbedderHeapTracer::RegisterEmbedderReference( - const TracedGlobal& ref) { + const TracedReferenceBase& ref) { if (ref.IsEmpty()) return; i::Heap* const heap = reinterpret_cast(isolate_)->heap(); - heap->RegisterExternallyReferencedObject(reinterpret_cast(*ref)); + heap->RegisterExternallyReferencedObject( + reinterpret_cast(ref.val_)); } void EmbedderHeapTracer::IterateTracedGlobalHandles( @@ -10383,6 +10684,26 @@ void EmbedderHeapTracer::IterateTracedGlobalHandles( isolate->global_handles()->IterateTracedNodes(visitor); } +bool EmbedderHeapTracer::IsRootForNonTracingGC( + const v8::TracedReference& handle) { + return true; +} + +bool EmbedderHeapTracer::IsRootForNonTracingGC( + const v8::TracedGlobal& handle) { + return true; +} + +void EmbedderHeapTracer::ResetHandleInNonTracingGC( + const v8::TracedReference& handle) { + UNREACHABLE(); +} + +void EmbedderHeapTracer::ResetHandleInNonTracingGC( + const v8::TracedGlobal& handle) { + UNREACHABLE(); +} + namespace internal { const size_t HandleScopeImplementer::kEnteredContextsOffset = @@ -10473,9 +10794,10 @@ char* HandleScopeImplementer::Iterate(RootVisitor* v, char* storage) { return storage + ArchiveSpacePerThread(); } -DeferredHandles* HandleScopeImplementer::Detach(Address* prev_limit) { - DeferredHandles* deferred = - new DeferredHandles(isolate()->handle_scope_data()->next, isolate()); +std::unique_ptr HandleScopeImplementer::Detach( + Address* prev_limit) { + std::unique_ptr deferred( + new DeferredHandles(isolate()->handle_scope_data()->next, isolate())); while (!blocks_.empty()) { Address* block_start = blocks_.back(); @@ -10584,3 +10906,5 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, } // namespace internal } // namespace v8 + +#undef TRACE_BS diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index 21bbb3a101549d..907a68c4c26538 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -5,6 +5,8 @@ #ifndef V8_API_API_H_ #define V8_API_API_H_ +#include + #include "include/v8-testing.h" #include "src/execution/isolate.h" #include "src/heap/factory.h" @@ -431,7 +433,7 @@ class HandleScopeImplementer { } void BeginDeferredScope(); - DeferredHandles* Detach(Address* prev_limit); + std::unique_ptr Detach(Address* prev_limit); Isolate* isolate_; DetachableVector blocks_; diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS index 08f39f8d6a2df3..072ba582113ac0 100644 --- a/deps/v8/src/asmjs/OWNERS +++ b/deps/v8/src/asmjs/OWNERS @@ -1,5 +1,5 @@ ahaas@chromium.org -clemensh@chromium.org +clemensb@chromium.org mstarzinger@chromium.org titzer@chromium.org diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index 7433b6a12cbb72..22714ac16bd41a 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -322,7 +322,7 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) { std::unique_ptr AsmJs::NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator) { - return base::make_unique(parse_info, literal, allocator); + return std::make_unique(parse_info, literal, allocator); } namespace { @@ -387,7 +387,18 @@ MaybeHandle AsmJs::InstantiateAsmWasm(Isolate* isolate, ReportInstantiationFailure(script, position, "Requires heap buffer"); return MaybeHandle(); } - wasm_engine->memory_tracker()->MarkWasmMemoryNotGrowable(memory); + // AsmJs memory must be an ArrayBuffer. + if (memory->is_shared()) { + ReportInstantiationFailure(script, position, + "Invalid heap type: SharedArrayBuffer"); + return MaybeHandle(); + } + // Mark the buffer as being used as an asm.js memory. This implies two + // things: 1) if the buffer is from a Wasm memory, that memory can no longer + // be grown, since that would detach this buffer, and 2) the buffer cannot + // be postMessage()'d, as that also detaches the buffer. + memory->set_is_asmjs_memory(true); + memory->set_is_detachable(false); size_t size = memory->byte_length(); // Check the asm.js heap size against the valid limits. if (!IsValidAsmjsMemorySize(size)) { diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h index 3e714cba7a67ed..80a75d0372900c 100644 --- a/deps/v8/src/asmjs/asm-js.h +++ b/deps/v8/src/asmjs/asm-js.h @@ -7,6 +7,8 @@ // Clients of this interface shouldn't depend on lots of asmjs internals. // Do not include anything from src/asmjs here! +#include + #include "src/common/globals.h" namespace v8 { diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index 6ac39dc89ccf31..33872399262e2a 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -387,8 +387,8 @@ void AsmJsParser::ValidateModule() { uint32_t import_index = module_builder_->AddGlobalImport( global_import.import_name, global_import.value_type, false /* mutability */); - start->EmitWithI32V(kExprGetGlobal, import_index); - start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info)); + start->EmitWithI32V(kExprGlobalGet, import_index); + start->EmitWithI32V(kExprGlobalSet, VarIndex(global_import.var_info)); } start->Emit(kExprEnd); FunctionSig::Builder b(zone(), 0, 0); @@ -952,8 +952,8 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count, } else { FAIL("Bad local variable definition"); } - current_function_builder_->EmitWithI32V(kExprGetGlobal, - VarIndex(sinfo)); + current_function_builder_->EmitWithI32V(kExprGlobalGet, + VarIndex(sinfo)); current_function_builder_->EmitSetLocal(info->index); } else if (sinfo->type->IsA(stdlib_fround_)) { EXPECT_TOKEN('('); @@ -1447,7 +1447,7 @@ AsmType* AsmJsParser::Identifier() { if (info->kind != VarKind::kGlobal) { FAILn("Undefined global variable"); } - current_function_builder_->EmitWithI32V(kExprGetGlobal, VarIndex(info)); + current_function_builder_->EmitWithI32V(kExprGlobalGet, VarIndex(info)); return info->type; } UNREACHABLE(); @@ -1558,8 +1558,8 @@ AsmType* AsmJsParser::AssignmentExpression() { if (info->kind == VarKind::kLocal) { current_function_builder_->EmitTeeLocal(info->index); } else if (info->kind == VarKind::kGlobal) { - current_function_builder_->EmitWithU32V(kExprSetGlobal, VarIndex(info)); - current_function_builder_->EmitWithU32V(kExprGetGlobal, VarIndex(info)); + current_function_builder_->EmitWithU32V(kExprGlobalSet, VarIndex(info)); + current_function_builder_->EmitWithU32V(kExprGlobalGet, VarIndex(info)); } else { UNREACHABLE(); } @@ -2489,7 +2489,7 @@ void AsmJsParser::ValidateFloatCoercion() { // because imported functions are not allowed to have float return type. call_coercion_position_ = scanner_.Position(); AsmType* ret; - RECURSE(ret = ValidateExpression()); + RECURSE(ret = AssignmentExpression()); if (ret->IsA(AsmType::Floatish())) { // Do nothing, as already a float. } else if (ret->IsA(AsmType::DoubleQ())) { diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc index 95bd94d8d4d874..b583b5e4214ad4 100644 --- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc +++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc @@ -46,15 +46,28 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) { if (expr->instance_members_initializer_function() != nullptr) { Visit(expr->instance_members_initializer_function()); } - ZonePtrList* props = expr->properties(); + ZonePtrList* private_members = + expr->private_members(); + for (int i = 0; i < private_members->length(); ++i) { + ClassLiteralProperty* prop = private_members->at(i); + + // Private fields have their key and value present in + // instance_members_initializer_function, so they will + // already have been visited. + if (prop->value()->IsFunctionLiteral()) { + Visit(prop->value()); + } else { + CheckVisited(prop->value()); + } + } + ZonePtrList* props = expr->public_members(); for (int i = 0; i < props->length(); ++i) { ClassLiteralProperty* prop = props->at(i); - // Private fields and public fields with computed names have both their key + // Public fields with computed names have their key // and value present in instance_members_initializer_function, so they will // already have been visited. - if ((prop->is_computed_name() || prop->is_private()) && - !prop->value()->IsFunctionLiteral()) { + if (prop->is_computed_name() && !prop->value()->IsFunctionLiteral()) { if (!prop->key()->IsLiteral()) { CheckVisited(prop->key()); } diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h index 2796e59a8dbb90..a52c5f946e0980 100644 --- a/deps/v8/src/ast/ast-traversal-visitor.h +++ b/deps/v8/src/ast/ast-traversal-visitor.h @@ -490,7 +490,13 @@ void AstTraversalVisitor::VisitClassLiteral(ClassLiteral* expr) { if (expr->instance_members_initializer_function() != nullptr) { RECURSE_EXPRESSION(Visit(expr->instance_members_initializer_function())); } - ZonePtrList* props = expr->properties(); + ZonePtrList* private_members = + expr->private_members(); + for (int i = 0; i < private_members->length(); ++i) { + ClassLiteralProperty* prop = private_members->at(i); + RECURSE_EXPRESSION(Visit(prop->value())); + } + ZonePtrList* props = expr->public_members(); for (int i = 0; i < props->length(); ++i) { ClassLiteralProperty* prop = props->at(i); if (!prop->key()->IsLiteral()) { diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index 4b6c4805dedc16..130d34dffa518c 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -293,6 +293,10 @@ bool FunctionLiteral::requires_brand_initialization() const { return outer->AsClassScope()->brand() != nullptr; } +bool FunctionLiteral::private_name_lookup_skips_outer_class() const { + return scope()->private_name_lookup_skips_outer_class(); +} + ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value, Kind kind, bool is_computed_name) : LiteralProperty(key, value, is_computed_name), @@ -886,7 +890,7 @@ Handle Literal::BuildValue(Isolate* isolate) const { case kSmi: return handle(Smi::FromInt(smi_), isolate); case kHeapNumber: - return isolate->factory()->NewNumber(number_, AllocationType::kOld); + return isolate->factory()->NewNumber(number_); case kString: return string_->string(); case kSymbol: diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index ced9f775dd57bc..d706dbc37fbc7b 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -724,11 +724,14 @@ class BreakStatement final : public JumpStatement { class ReturnStatement final : public JumpStatement { public: - enum Type { kNormal, kAsyncReturn }; + enum Type { kNormal, kAsyncReturn, kSyntheticAsyncReturn }; Expression* expression() const { return expression_; } Type type() const { return TypeField::decode(bit_field_); } - bool is_async_return() const { return type() == kAsyncReturn; } + bool is_async_return() const { return type() != kNormal; } + bool is_synthetic_async_return() const { + return type() == kSyntheticAsyncReturn; + } int end_position() const { return end_position_; } @@ -745,7 +748,7 @@ class ReturnStatement final : public JumpStatement { Expression* expression_; int end_position_; - using TypeField = JumpStatement::NextBitField; + using TypeField = JumpStatement::NextBitField; }; @@ -917,6 +920,10 @@ class TryCatchStatement final : public TryStatement { outer_catch_prediction != HandlerTable::UNCAUGHT; } + bool is_try_catch_for_async() { + return catch_prediction_ == HandlerTable::ASYNC_AWAIT; + } + private: friend class AstNodeFactory; @@ -2343,6 +2350,8 @@ class FunctionLiteral final : public Expression { bool requires_brand_initialization() const; + bool private_name_lookup_skips_outer_class() const; + ProducedPreparseData* produced_preparse_data() const { return produced_preparse_data_; } @@ -2481,10 +2490,10 @@ class ClassLiteral final : public Expression { using Property = ClassLiteralProperty; ClassScope* scope() const { return scope_; } - Variable* class_variable() const { return class_variable_; } Expression* extends() const { return extends_; } FunctionLiteral* constructor() const { return constructor_; } - ZonePtrList* properties() const { return properties_; } + ZonePtrList* public_members() const { return public_members_; } + ZonePtrList* private_members() const { return private_members_; } int start_position() const { return position(); } int end_position() const { return end_position_; } bool has_name_static_property() const { @@ -2497,6 +2506,9 @@ class ClassLiteral final : public Expression { bool is_anonymous_expression() const { return IsAnonymousExpression::decode(bit_field_); } + bool has_private_methods() const { + return HasPrivateMethods::decode(bit_field_); + } bool IsAnonymousFunctionDefinition() const { return is_anonymous_expression(); } @@ -2512,39 +2524,43 @@ class ClassLiteral final : public Expression { private: friend class AstNodeFactory; - ClassLiteral(ClassScope* scope, Variable* class_variable, Expression* extends, - FunctionLiteral* constructor, ZonePtrList* properties, + ClassLiteral(ClassScope* scope, Expression* extends, + FunctionLiteral* constructor, + ZonePtrList* public_members, + ZonePtrList* private_members, FunctionLiteral* static_fields_initializer, FunctionLiteral* instance_members_initializer_function, int start_position, int end_position, bool has_name_static_property, bool has_static_computed_names, - bool is_anonymous) + bool is_anonymous, bool has_private_methods) : Expression(start_position, kClassLiteral), end_position_(end_position), scope_(scope), - class_variable_(class_variable), extends_(extends), constructor_(constructor), - properties_(properties), + public_members_(public_members), + private_members_(private_members), static_fields_initializer_(static_fields_initializer), instance_members_initializer_function_( instance_members_initializer_function) { bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) | HasStaticComputedNames::encode(has_static_computed_names) | - IsAnonymousExpression::encode(is_anonymous); + IsAnonymousExpression::encode(is_anonymous) | + HasPrivateMethods::encode(has_private_methods); } int end_position_; ClassScope* scope_; - Variable* class_variable_; Expression* extends_; FunctionLiteral* constructor_; - ZonePtrList* properties_; + ZonePtrList* public_members_; + ZonePtrList* private_members_; FunctionLiteral* static_fields_initializer_; FunctionLiteral* instance_members_initializer_function_; using HasNameStaticProperty = Expression::NextBitField; using HasStaticComputedNames = HasNameStaticProperty::Next; using IsAnonymousExpression = HasStaticComputedNames::Next; + using HasPrivateMethods = IsAnonymousExpression::Next; }; @@ -2885,6 +2901,12 @@ class AstNodeFactory final { expression, ReturnStatement::kAsyncReturn, pos, end_position); } + ReturnStatement* NewSyntheticAsyncReturnStatement( + Expression* expression, int pos, int end_position = kNoSourcePosition) { + return new (zone_) ReturnStatement( + expression, ReturnStatement::kSyntheticAsyncReturn, pos, end_position); + } + WithStatement* NewWithStatement(Scope* scope, Expression* expression, Statement* statement, @@ -3244,18 +3266,19 @@ class AstNodeFactory final { } ClassLiteral* NewClassLiteral( - ClassScope* scope, Variable* variable, Expression* extends, - FunctionLiteral* constructor, - ZonePtrList* properties, + ClassScope* scope, Expression* extends, FunctionLiteral* constructor, + ZonePtrList* public_members, + ZonePtrList* private_members, FunctionLiteral* static_fields_initializer, FunctionLiteral* instance_members_initializer_function, int start_position, int end_position, bool has_name_static_property, - bool has_static_computed_names, bool is_anonymous) { + bool has_static_computed_names, bool is_anonymous, + bool has_private_methods) { return new (zone_) ClassLiteral( - scope, variable, extends, constructor, properties, + scope, extends, constructor, public_members, private_members, static_fields_initializer, instance_members_initializer_function, start_position, end_position, has_name_static_property, - has_static_computed_names, is_anonymous); + has_static_computed_names, is_anonymous, has_private_methods); } NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name, diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index dbd20f50a80869..9c122fca869c2c 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -84,11 +84,11 @@ void SourceTextModuleDescriptor::AddStarExport( } namespace { -Handle ToStringOrUndefined(Isolate* isolate, - const AstRawString* s) { - return (s == nullptr) - ? Handle::cast(isolate->factory()->undefined_value()) - : Handle::cast(s->string()); +Handle ToStringOrUndefined(Isolate* isolate, + const AstRawString* s) { + return (s == nullptr) ? Handle::cast( + isolate->factory()->undefined_value()) + : Handle::cast(s->string()); } } // namespace diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index 581517ee4ec34a..5bf9362fb8c5b7 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -217,8 +217,11 @@ void CallPrinter::VisitFunctionLiteral(FunctionLiteral* node) { void CallPrinter::VisitClassLiteral(ClassLiteral* node) { if (node->extends()) Find(node->extends()); - for (int i = 0; i < node->properties()->length(); i++) { - Find(node->properties()->at(i)->value()); + for (int i = 0; i < node->public_members()->length(); i++) { + Find(node->public_members()->at(i)->value()); + } + for (int i = 0; i < node->private_members()->length(); i++) { + Find(node->private_members()->at(i)->value()); } } @@ -1106,7 +1109,8 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) { PrintIndentedVisit("INSTANCE MEMBERS INITIALIZER", node->instance_members_initializer_function()); } - PrintClassProperties(node->properties()); + PrintClassProperties(node->private_members()); + PrintClassProperties(node->public_members()); } void AstPrinter::VisitInitializeClassMembersStatement( diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h index 322fd9fb1437bf..795436d4222fc8 100644 --- a/deps/v8/src/ast/prettyprinter.h +++ b/deps/v8/src/ast/prettyprinter.h @@ -5,10 +5,12 @@ #ifndef V8_AST_PRETTYPRINTER_H_ #define V8_AST_PRETTYPRINTER_H_ +#include + #include "src/ast/ast.h" #include "src/base/compiler-specific.h" -#include "src/utils/allocation.h" #include "src/objects/function-kind.h" +#include "src/utils/allocation.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index c8002dd088c9c7..3e1f8f53ae2111 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -40,7 +40,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, - bool* was_added) { + IsStaticFlag is_static_flag, bool* was_added) { // AstRawStrings are unambiguous, i.e., the same string is always represented // by the same AstRawString*. // FIXME(marja): fix the type of Lookup. @@ -51,8 +51,9 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, if (*was_added) { // The variable has not been declared yet -> insert it. DCHECK_EQ(name, p->key); - Variable* variable = new (zone) Variable( - scope, name, mode, kind, initialization_flag, maybe_assigned_flag); + Variable* variable = + new (zone) Variable(scope, name, mode, kind, initialization_flag, + maybe_assigned_flag, is_static_flag); p->value = variable; } return reinterpret_cast(p->value); @@ -102,6 +103,9 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type) DCHECK_NE(SCRIPT_SCOPE, scope_type); SetDefaults(); set_language_mode(outer_scope->language_mode()); + private_name_lookup_skips_outer_class_ = + outer_scope->is_class_scope() && + outer_scope->AsClassScope()->IsParsingHeritage(); outer_scope_->AddInnerScope(this); } @@ -140,14 +144,18 @@ ModuleScope::ModuleScope(Isolate* isolate, Handle scope_info, set_language_mode(LanguageMode::kStrict); } -ClassScope::ClassScope(Zone* zone, Scope* outer_scope) - : Scope(zone, outer_scope, CLASS_SCOPE) { +ClassScope::ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous) + : Scope(zone, outer_scope, CLASS_SCOPE), + rare_data_and_is_parsing_heritage_(nullptr), + is_anonymous_class_(is_anonymous) { set_language_mode(LanguageMode::kStrict); } -ClassScope::ClassScope(Zone* zone, AstValueFactory* ast_value_factory, +ClassScope::ClassScope(Isolate* isolate, Zone* zone, + AstValueFactory* ast_value_factory, Handle scope_info) - : Scope(zone, CLASS_SCOPE, scope_info) { + : Scope(zone, CLASS_SCOPE, scope_info), + rare_data_and_is_parsing_heritage_(nullptr) { set_language_mode(LanguageMode::kStrict); if (scope_info->HasClassBrand()) { Variable* brand = @@ -155,6 +163,25 @@ ClassScope::ClassScope(Zone* zone, AstValueFactory* ast_value_factory, DCHECK_NOT_NULL(brand); EnsureRareData()->brand = brand; } + + // If the class variable is context-allocated and its index is + // saved for deserialization, deserialize it. + if (scope_info->HasSavedClassVariableIndex()) { + int index = scope_info->SavedClassVariableContextLocalIndex(); + DCHECK_GE(index, 0); + DCHECK_LT(index, scope_info->ContextLocalCount()); + String name = scope_info->ContextLocalName(index); + DCHECK_EQ(scope_info->ContextLocalMode(index), VariableMode::kConst); + DCHECK_EQ(scope_info->ContextLocalInitFlag(index), + InitializationFlag::kNeedsInitialization); + DCHECK_EQ(scope_info->ContextLocalMaybeAssignedFlag(index), + MaybeAssignedFlag::kMaybeAssigned); + Variable* var = DeclareClassVariable( + ast_value_factory, ast_value_factory->GetString(handle(name, isolate)), + kNoSourcePosition); + var->AllocateTo(VariableLocation::CONTEXT, + Context::MIN_CONTEXT_SLOTS + index); + } } Scope::Scope(Zone* zone, ScopeType scope_type, Handle scope_info) @@ -171,6 +198,8 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle scope_info) set_language_mode(scope_info->language_mode()); num_heap_slots_ = scope_info->ContextLength(); DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_); + private_name_lookup_skips_outer_class_ = + scope_info->PrivateNameLookupSkipsOuterClass(); // We don't really need to use the preparsed scope data; this is just to // shorten the recursion in SetMustUsePreparseData. must_use_preparsed_scope_data_ = true; @@ -222,6 +251,7 @@ void DeclarationScope::SetDefaults() { has_this_reference_ = false; has_this_declaration_ = (is_function_scope() && !is_arrow_scope()) || is_module_scope(); + needs_private_name_context_chain_recalc_ = false; has_rest_ = false; receiver_ = nullptr; new_target_ = nullptr; @@ -270,6 +300,8 @@ void Scope::SetDefaults() { is_declaration_scope_ = false; + private_name_lookup_skips_outer_class_ = false; + must_use_preparsed_scope_data_ = false; } @@ -343,8 +375,8 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, outer_scope = new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate)); } else if (scope_info.scope_type() == CLASS_SCOPE) { - outer_scope = new (zone) - ClassScope(zone, ast_value_factory, handle(scope_info, isolate)); + outer_scope = new (zone) ClassScope(isolate, zone, ast_value_factory, + handle(scope_info, isolate)); } else if (scope_info.scope_type() == BLOCK_SCOPE) { if (scope_info.is_declaration_scope()) { outer_scope = new (zone) @@ -546,7 +578,8 @@ bool DeclarationScope::Analyze(ParseInfo* info) { if (scope->must_use_preparsed_scope_data_) { DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE); allow_deref.emplace(); - info->consumed_preparse_data()->RestoreScopeAllocationData(scope); + info->consumed_preparse_data()->RestoreScopeAllocationData( + scope, info->ast_value_factory()); } if (!scope->AllocateVariables(info)) return false; @@ -787,11 +820,13 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; + IsStaticFlag is_static_flag; { location = VariableLocation::CONTEXT; index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + &init_flag, &maybe_assigned_flag, + &is_static_flag); found = index >= 0; } @@ -816,9 +851,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { } bool was_added; - Variable* var = - cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - init_flag, maybe_assigned_flag, &was_added); + Variable* var = cache->variables_.Declare( + zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag, + IsStaticFlag::kNotStatic, &was_added); DCHECK(was_added); var->AllocateTo(location, index); return var; @@ -1047,7 +1082,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, bool was_added; return cache->variables_.Declare( zone(), this, name, VariableMode::kDynamicGlobal, kind, - kCreatedInitialized, kNotAssigned, &was_added); + kCreatedInitialized, kNotAssigned, IsStaticFlag::kNotStatic, &was_added); // TODO(neis): Mark variable as maybe-assigned? } @@ -1165,9 +1200,9 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) { // to ensure that UpdateNeedsHoleCheck() can detect import variables. if (is_module_scope()) AsModuleScope()->AllocateModuleVariables(); - ClassScope* closest_class_scope = GetClassScope(); - if (closest_class_scope != nullptr && - !closest_class_scope->ResolvePrivateNames(info)) { + PrivateNameScopeIterator private_name_scope_iter(this); + if (!private_name_scope_iter.Done() && + !private_name_scope_iter.GetScope()->ResolvePrivateNames(info)) { DCHECK(info->pending_error_handler()->has_pending_error()); return false; } @@ -1177,7 +1212,7 @@ bool DeclarationScope::AllocateVariables(ParseInfo* info) { return false; } - // // Don't allocate variables of preparsed scopes. + // Don't allocate variables of preparsed scopes. if (!was_lazily_parsed()) AllocateVariablesRecursively(); return true; @@ -1254,17 +1289,6 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const { return result; } -ClassScope* Scope::GetClassScope() { - Scope* scope = this; - while (scope != nullptr && !scope->is_class_scope()) { - scope = scope->outer_scope(); - } - if (scope != nullptr && scope->is_class_scope()) { - return scope->AsClassScope(); - } - return nullptr; -} - DeclarationScope* Scope::GetDeclarationScope() { Scope* scope = this; while (!scope->is_declaration_scope()) { @@ -1688,11 +1712,17 @@ void Scope::Print(int n) { if (is_declaration_scope() && AsDeclarationScope()->NeedsHomeObject()) { Indent(n1, "// scope needs home object\n"); } + if (private_name_lookup_skips_outer_class()) { + Indent(n1, "// scope skips outer class for #-names\n"); + } if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n"); if (is_declaration_scope()) { DeclarationScope* scope = AsDeclarationScope(); if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n"); if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n"); + if (scope->needs_private_name_context_chain_recalc()) { + Indent(n1, "// needs #-name context chain recalc\n"); + } } if (num_stack_slots_ > 0) { Indent(n1, "// "); @@ -1729,15 +1759,24 @@ void Scope::Print(int n) { if (is_class_scope()) { ClassScope* class_scope = AsClassScope(); - if (class_scope->rare_data_ != nullptr) { + if (class_scope->GetRareData() != nullptr) { PrintMap(n1, "// private name vars:\n", - &(class_scope->rare_data_->private_name_map), true, function); + &(class_scope->GetRareData()->private_name_map), true, function); Variable* brand = class_scope->brand(); if (brand != nullptr) { Indent(n1, "// brand var:\n"); PrintVar(n1, brand); } } + if (class_scope->class_variable() != nullptr) { + Indent(n1, "// class var"); + PrintF("%s%s:\n", + class_scope->class_variable()->is_used() ? ", used" : ", unused", + class_scope->should_save_class_variable_index() + ? ", index saved" + : ", index not saved"); + PrintVar(n1, class_scope->class_variable()); + } } // Print inner scopes (disable by providing negative n). @@ -1780,9 +1819,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) { // Declare a new non-local. DCHECK(IsDynamicVariableMode(mode)); bool was_added; - Variable* var = - variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - kCreatedInitialized, kNotAssigned, &was_added); + Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, + kCreatedInitialized, kNotAssigned, + IsStaticFlag::kNotStatic, &was_added); // Allocate it by giving it a dynamic lookup. var->AllocateTo(VariableLocation::LOOKUP, -1); return var; @@ -2103,8 +2142,7 @@ bool Scope::MustAllocateInContext(Variable* var) { if (mode == VariableMode::kTemporary) return false; if (is_catch_scope()) return true; if (is_script_scope() || is_eval_scope()) { - if (IsLexicalVariableMode(mode) || - IsPrivateMethodOrAccessorVariableMode(mode)) { + if (IsLexicalVariableMode(mode)) { return true; } } @@ -2308,6 +2346,47 @@ void Scope::AllocateScopeInfosRecursively(Isolate* isolate, } } +void DeclarationScope::RecalcPrivateNameContextChain() { + // The outermost scope in a class heritage expression is marked to skip the + // class scope during private name resolution. It is possible, however, that + // either the class scope won't require a Context and ScopeInfo, or the + // outermost scope in the heritage position won't. Simply copying the bit from + // full parse into the ScopeInfo will break lazy compilation. In the former + // case the scope that is marked to skip its outer scope will incorrectly skip + // a different class scope than the one we intended to skip. In the latter + // case variables resolved through an inner scope will incorrectly check the + // class scope since we lost the skip bit from the outermost heritage scope. + // + // This method fixes both cases by, in outermost to innermost order, copying + // the value of the skip bit from outer scopes that don't require a Context. + DCHECK(needs_private_name_context_chain_recalc_); + this->ForEach([](Scope* scope) { + Scope* outer = scope->outer_scope(); + if (!outer) return Iteration::kDescend; + if (!outer->NeedsContext()) { + scope->private_name_lookup_skips_outer_class_ = + outer->private_name_lookup_skips_outer_class(); + } + if (!scope->is_function_scope() || + scope->AsDeclarationScope()->ShouldEagerCompile()) { + return Iteration::kDescend; + } + return Iteration::kContinue; + }); +} + +void DeclarationScope::RecordNeedsPrivateNameContextChainRecalc() { + DCHECK_EQ(GetClosureScope(), this); + DeclarationScope* scope; + for (scope = this; scope != nullptr; + scope = scope->outer_scope() != nullptr + ? scope->outer_scope()->GetClosureScope() + : nullptr) { + if (scope->needs_private_name_context_chain_recalc_) return; + scope->needs_private_name_context_chain_recalc_ = true; + } +} + // static void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) { DeclarationScope* scope = info->literal()->scope(); @@ -2318,6 +2397,9 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) { outer_scope = scope->outer_scope_->scope_info_; } + if (scope->needs_private_name_context_chain_recalc()) { + scope->RecalcPrivateNameContextChain(); + } scope->AllocateScopeInfosRecursively(isolate, outer_scope); // The debugger expects all shared function infos to contain a scope info. @@ -2359,14 +2441,20 @@ bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) { } Variable* ClassScope::DeclarePrivateName(const AstRawString* name, - VariableMode mode, bool* was_added) { + VariableMode mode, + IsStaticFlag is_static_flag, + bool* was_added) { Variable* result = EnsureRareData()->private_name_map.Declare( zone(), this, name, mode, NORMAL_VARIABLE, InitializationFlag::kNeedsInitialization, - MaybeAssignedFlag::kMaybeAssigned, was_added); + MaybeAssignedFlag::kMaybeAssigned, is_static_flag, was_added); if (*was_added) { locals_.Add(result); - } else if (IsComplementaryAccessorPair(result->mode(), mode)) { + has_static_private_methods_ |= + (result->is_static() && + IsPrivateMethodOrAccessorVariableMode(result->mode())); + } else if (IsComplementaryAccessorPair(result->mode(), mode) && + result->is_static_flag() == is_static_flag) { *was_added = true; result->set_mode(VariableMode::kPrivateGetterAndSetter); } @@ -2375,38 +2463,42 @@ Variable* ClassScope::DeclarePrivateName(const AstRawString* name, } Variable* ClassScope::LookupLocalPrivateName(const AstRawString* name) { - if (rare_data_ == nullptr) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr) { return nullptr; } - return rare_data_->private_name_map.Lookup(name); + return rare_data->private_name_map.Lookup(name); } UnresolvedList::Iterator ClassScope::GetUnresolvedPrivateNameTail() { - if (rare_data_ == nullptr) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr) { return UnresolvedList::Iterator(); } - return rare_data_->unresolved_private_names.end(); + return rare_data->unresolved_private_names.end(); } void ClassScope::ResetUnresolvedPrivateNameTail(UnresolvedList::Iterator tail) { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.end() == tail) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || + rare_data->unresolved_private_names.end() == tail) { return; } bool tail_is_empty = tail == UnresolvedList::Iterator(); if (tail_is_empty) { // If the saved tail is empty, the list used to be empty, so clear it. - rare_data_->unresolved_private_names.Clear(); + rare_data->unresolved_private_names.Clear(); } else { - rare_data_->unresolved_private_names.Rewind(tail); + rare_data->unresolved_private_names.Rewind(tail); } } void ClassScope::MigrateUnresolvedPrivateNameTail( AstNodeFactory* ast_node_factory, UnresolvedList::Iterator tail) { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.end() == tail) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || + rare_data->unresolved_private_names.end() == tail) { return; } UnresolvedList migrated_names; @@ -2415,9 +2507,9 @@ void ClassScope::MigrateUnresolvedPrivateNameTail( // migrate everything after the head. bool tail_is_empty = tail == UnresolvedList::Iterator(); UnresolvedList::Iterator it = - tail_is_empty ? rare_data_->unresolved_private_names.begin() : tail; + tail_is_empty ? rare_data->unresolved_private_names.begin() : tail; - for (; it != rare_data_->unresolved_private_names.end(); ++it) { + for (; it != rare_data->unresolved_private_names.end(); ++it) { VariableProxy* proxy = *it; VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy); migrated_names.Add(copy); @@ -2425,20 +2517,11 @@ void ClassScope::MigrateUnresolvedPrivateNameTail( // Replace with the migrated copies. if (tail_is_empty) { - rare_data_->unresolved_private_names.Clear(); + rare_data->unresolved_private_names.Clear(); } else { - rare_data_->unresolved_private_names.Rewind(tail); + rare_data->unresolved_private_names.Rewind(tail); } - rare_data_->unresolved_private_names.Append(std::move(migrated_names)); -} - -void ClassScope::AddUnresolvedPrivateName(VariableProxy* proxy) { - // During a reparse, already_resolved_ may be true here, because - // the class scope is deserialized while the function scope inside may - // be new. - DCHECK(!proxy->is_resolved()); - DCHECK(proxy->IsPrivateName()); - EnsureRareData()->unresolved_private_names.Add(proxy); + rare_data->unresolved_private_names.Append(std::move(migrated_names)); } Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { @@ -2450,8 +2533,10 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; - int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + IsStaticFlag is_static_flag; + int index = + ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag, + &maybe_assigned_flag, &is_static_flag); if (index < 0) { return nullptr; } @@ -2463,7 +2548,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { // Add the found private name to the map to speed up subsequent // lookups for the same name. bool was_added; - Variable* var = DeclarePrivateName(name, mode, &was_added); + Variable* var = DeclarePrivateName(name, mode, is_static_flag, &was_added); DCHECK(was_added); var->AllocateTo(VariableLocation::CONTEXT, index); return var; @@ -2472,15 +2557,14 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) { DCHECK(!proxy->is_resolved()); - for (Scope* scope = this; !scope->is_script_scope(); - scope = scope->outer_scope_) { - if (!scope->is_class_scope()) continue; // Only search in class scopes - ClassScope* class_scope = scope->AsClassScope(); + for (PrivateNameScopeIterator scope_iter(this); !scope_iter.Done(); + scope_iter.Next()) { + ClassScope* scope = scope_iter.GetScope(); // Try finding it in the private name map first, if it can't be found, // try the deseralized scope info. - Variable* var = class_scope->LookupLocalPrivateName(proxy->raw_name()); - if (var == nullptr && !class_scope->scope_info_.is_null()) { - var = class_scope->LookupPrivateNameInScopeInfo(proxy->raw_name()); + Variable* var = scope->LookupLocalPrivateName(proxy->raw_name()); + if (var == nullptr && !scope->scope_info_.is_null()) { + var = scope->LookupPrivateNameInScopeInfo(proxy->raw_name()); } if (var != nullptr) { return var; @@ -2490,22 +2574,24 @@ Variable* ClassScope::LookupPrivateName(VariableProxy* proxy) { } bool ClassScope::ResolvePrivateNames(ParseInfo* info) { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.is_empty()) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || rare_data->unresolved_private_names.is_empty()) { return true; } - UnresolvedList& list = rare_data_->unresolved_private_names; + UnresolvedList& list = rare_data->unresolved_private_names; for (VariableProxy* proxy : list) { Variable* var = LookupPrivateName(proxy); if (var == nullptr) { + // It's only possible to fail to resolve private names here if + // this is at the top level or the private name is accessed through eval. + DCHECK(info->is_eval() || outer_scope_->is_script_scope()); Scanner::Location loc = proxy->location(); info->pending_error_handler()->ReportMessageAt( loc.beg_pos, loc.end_pos, MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name()); return false; } else { - var->set_is_used(); proxy->BindTo(var); } } @@ -2517,20 +2603,20 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) { } VariableProxy* ClassScope::ResolvePrivateNamesPartially() { - if (rare_data_ == nullptr || - rare_data_->unresolved_private_names.is_empty()) { + RareData* rare_data = GetRareData(); + if (rare_data == nullptr || rare_data->unresolved_private_names.is_empty()) { return nullptr; } - ClassScope* outer_class_scope = - outer_scope_ == nullptr ? nullptr : outer_scope_->GetClassScope(); - UnresolvedList& unresolved = rare_data_->unresolved_private_names; - bool has_private_names = rare_data_->private_name_map.capacity() > 0; + PrivateNameScopeIterator private_name_scope_iter(this); + private_name_scope_iter.Next(); + UnresolvedList& unresolved = rare_data->unresolved_private_names; + bool has_private_names = rare_data->private_name_map.capacity() > 0; // If the class itself does not have private names, nor does it have - // an outer class scope, then we are certain any private name access + // an outer private name scope, then we are certain any private name access // inside cannot be resolved. - if (!has_private_names && outer_class_scope == nullptr && + if (!has_private_names && private_name_scope_iter.Done() && !unresolved.is_empty()) { return unresolved.first(); } @@ -2548,21 +2634,27 @@ VariableProxy* ClassScope::ResolvePrivateNamesPartially() { if (var != nullptr) { var->set_is_used(); proxy->BindTo(var); + // If the variable being accessed is a static private method, we need to + // save the class variable in the context to check that the receiver is + // the class during runtime. + has_explicit_static_private_methods_access_ |= + (var->is_static() && + IsPrivateMethodOrAccessorVariableMode(var->mode())); } } // If the current scope does not have declared private names, // try looking from the outer class scope later. if (var == nullptr) { - // There's no outer class scope so we are certain that the variable + // There's no outer private name scope so we are certain that the variable // cannot be resolved later. - if (outer_class_scope == nullptr) { + if (private_name_scope_iter.Done()) { return proxy; } - // The private name may be found later in the outer class scope, - // so push it to the outer sopce. - outer_class_scope->AddUnresolvedPrivateName(proxy); + // The private name may be found later in the outer private name scope, so + // push it to the outer sopce. + private_name_scope_iter.AddUnresolvedPrivateName(proxy); } proxy = next; @@ -2573,14 +2665,16 @@ VariableProxy* ClassScope::ResolvePrivateNamesPartially() { } Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory, + IsStaticFlag is_static_flag, int class_token_pos) { - DCHECK_IMPLIES(rare_data_ != nullptr, rare_data_->brand == nullptr); + DCHECK_IMPLIES(GetRareData() != nullptr, GetRareData()->brand == nullptr); bool was_added; Variable* brand = Declare(zone(), ast_value_factory->dot_brand_string(), VariableMode::kConst, NORMAL_VARIABLE, InitializationFlag::kNeedsInitialization, MaybeAssignedFlag::kMaybeAssigned, &was_added); DCHECK(was_added); + brand->set_is_static_flag(is_static_flag); brand->ForceContextAllocation(); brand->set_is_used(); EnsureRareData()->brand = brand; @@ -2588,5 +2682,61 @@ Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory, return brand; } +Variable* ClassScope::DeclareClassVariable(AstValueFactory* ast_value_factory, + const AstRawString* name, + int class_token_pos) { + DCHECK_NULL(class_variable_); + bool was_added; + class_variable_ = + Declare(zone(), name == nullptr ? ast_value_factory->dot_string() : name, + VariableMode::kConst, NORMAL_VARIABLE, + InitializationFlag::kNeedsInitialization, + MaybeAssignedFlag::kMaybeAssigned, &was_added); + DCHECK(was_added); + class_variable_->set_initializer_position(class_token_pos); + return class_variable_; +} + +PrivateNameScopeIterator::PrivateNameScopeIterator(Scope* start) + : start_scope_(start), current_scope_(start) { + if (!start->is_class_scope() || start->AsClassScope()->IsParsingHeritage()) { + Next(); + } +} + +void PrivateNameScopeIterator::Next() { + DCHECK(!Done()); + Scope* inner = current_scope_; + Scope* scope = inner->outer_scope(); + while (scope != nullptr) { + if (scope->is_class_scope()) { + if (!inner->private_name_lookup_skips_outer_class()) { + current_scope_ = scope; + return; + } + skipped_any_scopes_ = true; + } + inner = scope; + scope = scope->outer_scope(); + } + current_scope_ = nullptr; +} + +void PrivateNameScopeIterator::AddUnresolvedPrivateName(VariableProxy* proxy) { + // During a reparse, current_scope_->already_resolved_ may be true here, + // because the class scope is deserialized while the function scope inside may + // be new. + DCHECK(!proxy->is_resolved()); + DCHECK(proxy->IsPrivateName()); + GetScope()->EnsureRareData()->unresolved_private_names.Add(proxy); + // Any closure scope that contain uses of private names that skips over a + // class scope due to heritage expressions need private name context chain + // recalculation, since not all scopes require a Context or ScopeInfo. See + // comment in DeclarationScope::RecalcPrivateNameContextChain. + if (V8_UNLIKELY(skipped_any_scopes_)) { + start_scope_->GetClosureScope()->RecordNeedsPrivateNameContextChainRecalc(); + } +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 73e6e8fd89755f..30838db28b553c 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -44,7 +44,7 @@ class VariableMap : public ZoneHashMap { VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, - bool* was_added); + IsStaticFlag is_static_flag, bool* was_added); V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name); void Remove(Variable* var); @@ -360,6 +360,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { bool is_class_scope() const { return scope_type_ == CLASS_SCOPE; } bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; } + bool private_name_lookup_skips_outer_class() const { + return private_name_lookup_skips_outer_class_; + } bool IsAsmModule() const; // Returns true if this scope or any inner scopes that might be eagerly // compiled are asm modules. @@ -464,10 +467,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // sloppy eval call. One if this->sloppy_eval_can_extend_vars(). int ContextChainLengthUntilOutermostSloppyEval() const; - // Find the closest class scope in the current scope and outer scopes. If no - // class scope is found, nullptr will be returned. - ClassScope* GetClassScope(); - // Find the first function, script, eval or (declaration) block scope. This is // the scope where var declarations will be hoisted to in the implementation. DeclarationScope* GetDeclarationScope(); @@ -557,9 +556,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { Variable* Declare(Zone* zone, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, bool* was_added) { - Variable* result = - variables_.Declare(zone, this, name, mode, kind, initialization_flag, - maybe_assigned_flag, was_added); + // Static variables can only be declared using ClassScope methods. + Variable* result = variables_.Declare( + zone, this, name, mode, kind, initialization_flag, maybe_assigned_flag, + IsStaticFlag::kNotStatic, was_added); if (*was_added) locals_.Add(result); return result; } @@ -713,7 +713,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // This scope's declarations might not be executed in order (e.g., switch). bool scope_nonlinear_ : 1; bool is_hidden_ : 1; - // Temporary workaround that allows masking of 'this' in debug-evalute scopes. + // Temporary workaround that allows masking of 'this' in debug-evaluate + // scopes. bool is_debug_evaluate_scope_ : 1; // True if one of the inner scopes or the scope itself calls eval. @@ -723,6 +724,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // True if it holds 'var' declarations. bool is_declaration_scope_ : 1; + // True if the outer scope is a class scope and should be skipped when + // resolving private names, i.e. if the scope is in a class heritage + // expression. + bool private_name_lookup_skips_outer_class_ : 1; + bool must_use_preparsed_scope_data_ : 1; }; @@ -859,6 +865,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { return IsClassMembersInitializerFunction(function_kind()); } + void set_is_async_module() { + DCHECK(IsModule(function_kind_)); + function_kind_ = kAsyncModule; + } + void DeclareThis(AstValueFactory* ast_value_factory); void DeclareArguments(AstValueFactory* ast_value_factory); void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory); @@ -1082,6 +1093,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { GetReceiverScope()->receiver()->ForceContextAllocation(); } + bool needs_private_name_context_chain_recalc() const { + return needs_private_name_context_chain_recalc_; + } + void RecordNeedsPrivateNameContextChainRecalc(); + private: V8_INLINE void AllocateParameter(Variable* var, int index); @@ -1099,6 +1115,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { void SetDefaults(); + // Recalculate the private name context chain from the existing skip bit in + // preparation for AllocateScopeInfos. Because the private name scope is + // implemented with a skip bit for scopes in heritage position, that bit may + // need to be recomputed due scopes that do not need contexts. + void RecalcPrivateNameContextChain(); + bool has_simple_parameters_ : 1; // This scope contains an "use asm" annotation. bool is_asm_module_ : 1; @@ -1120,9 +1142,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { bool has_checked_syntax_ : 1; bool has_this_reference_ : 1; bool has_this_declaration_ : 1; + bool needs_private_name_context_chain_recalc_ : 1; // If the scope is a function scope, this is the function kind. - const FunctionKind function_kind_; + FunctionKind function_kind_; int num_parameters_ = 0; @@ -1220,17 +1243,26 @@ class ModuleScope final : public DeclarationScope { class V8_EXPORT_PRIVATE ClassScope : public Scope { public: - ClassScope(Zone* zone, Scope* outer_scope); + ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous); // Deserialization. - ClassScope(Zone* zone, AstValueFactory* ast_value_factory, + ClassScope(Isolate* isolate, Zone* zone, AstValueFactory* ast_value_factory, Handle scope_info); + struct HeritageParsingScope { + explicit HeritageParsingScope(ClassScope* class_scope) + : class_scope_(class_scope) { + class_scope_->SetIsParsingHeritage(true); + } + ~HeritageParsingScope() { class_scope_->SetIsParsingHeritage(false); } + + private: + ClassScope* class_scope_; + }; + // Declare a private name in the private name map and add it to the // local variables of this scope. Variable* DeclarePrivateName(const AstRawString* name, VariableMode mode, - bool* was_added); - - void AddUnresolvedPrivateName(VariableProxy* proxy); + IsStaticFlag is_static_flag, bool* was_added); // Try resolving all unresolved private names found in the current scope. // Called from DeclarationScope::AllocateVariables() when reparsing a @@ -1261,13 +1293,53 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { void MigrateUnresolvedPrivateNameTail(AstNodeFactory* ast_node_factory, UnresolvedList::Iterator tail); Variable* DeclareBrandVariable(AstValueFactory* ast_value_factory, + IsStaticFlag is_static_flag, int class_token_pos); + + Variable* DeclareClassVariable(AstValueFactory* ast_value_factory, + const AstRawString* name, int class_token_pos); + Variable* brand() { - return rare_data_ == nullptr ? nullptr : rare_data_->brand; + return GetRareData() == nullptr ? nullptr : GetRareData()->brand; + } + + Variable* class_variable() { return class_variable_; } + + V8_INLINE bool IsParsingHeritage() { + return rare_data_and_is_parsing_heritage_.GetPayload(); + } + + // Only maintained when the scope is parsed, not when the scope is + // deserialized. + bool has_static_private_methods() const { + return has_static_private_methods_; + } + + // Returns whether the index of class variable of this class scope should be + // recorded in the ScopeInfo. + // If any inner scope accesses static private names directly, the class + // variable will be forced to be context-allocated. + // The inner scope may also calls eval which may results in access to + // static private names. + // Only maintained when the scope is parsed. + bool should_save_class_variable_index() const { + return should_save_class_variable_index_ || + has_explicit_static_private_methods_access_ || + (has_static_private_methods_ && inner_scope_calls_eval_); + } + + // Only maintained when the scope is parsed. + bool is_anonymous_class() const { return is_anonymous_class_; } + + // Overriden during reparsing + void set_should_save_class_variable_index() { + should_save_class_variable_index_ = true; } private: friend class Scope; + friend class PrivateNameScopeIterator; + // Find the private name declared in the private name map first, // if it cannot be found there, try scope info if there is any. // Returns nullptr if it cannot be found. @@ -1285,14 +1357,53 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { Variable* brand = nullptr; }; + V8_INLINE RareData* GetRareData() { + return rare_data_and_is_parsing_heritage_.GetPointer(); + } V8_INLINE RareData* EnsureRareData() { - if (rare_data_ == nullptr) { - rare_data_ = new (zone_) RareData(zone_); + if (GetRareData() == nullptr) { + rare_data_and_is_parsing_heritage_.SetPointer(new (zone_) + RareData(zone_)); } - return rare_data_; + return GetRareData(); + } + V8_INLINE void SetIsParsingHeritage(bool v) { + rare_data_and_is_parsing_heritage_.SetPayload(v); } - RareData* rare_data_ = nullptr; + PointerWithPayload rare_data_and_is_parsing_heritage_; + Variable* class_variable_ = nullptr; + // These are only maintained when the scope is parsed, not when the + // scope is deserialized. + bool has_static_private_methods_ = false; + bool has_explicit_static_private_methods_access_ = false; + bool is_anonymous_class_ = false; + // This is only maintained during reparsing, restored from the + // preparsed data. + bool should_save_class_variable_index_ = false; +}; + +// Iterate over the private name scope chain. The iteration proceeds from the +// innermost private name scope outwards. +class PrivateNameScopeIterator { + public: + explicit PrivateNameScopeIterator(Scope* start); + + bool Done() const { return current_scope_ == nullptr; } + void Next(); + + // Add an unresolved private name to the current scope. + void AddUnresolvedPrivateName(VariableProxy* proxy); + + ClassScope* GetScope() const { + DCHECK(!Done()); + return current_scope_->AsClassScope(); + } + + private: + bool skipped_any_scopes_ = false; + Scope* start_scope_; + Scope* current_scope_; }; } // namespace internal diff --git a/deps/v8/src/ast/source-range-ast-visitor.cc b/deps/v8/src/ast/source-range-ast-visitor.cc index 2fcf151999ace0..74709916159a9c 100644 --- a/deps/v8/src/ast/source-range-ast-visitor.cc +++ b/deps/v8/src/ast/source-range-ast-visitor.cc @@ -39,6 +39,11 @@ void SourceRangeAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) { MaybeRemoveLastContinuationRange(stmts); } +void SourceRangeAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) { + AstTraversalVisitor::VisitTryCatchStatement(stmt); + MaybeRemoveContinuationRangeOfAsyncReturn(stmt); +} + bool SourceRangeAstVisitor::VisitNode(AstNode* node) { AstNodeSourceRanges* range = source_range_map_->Find(node); @@ -59,11 +64,8 @@ bool SourceRangeAstVisitor::VisitNode(AstNode* node) { return true; } -void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange( - ZonePtrList* statements) { - if (statements->is_empty()) return; - - Statement* last_statement = statements->last(); +void SourceRangeAstVisitor::MaybeRemoveContinuationRange( + Statement* last_statement) { AstNodeSourceRanges* last_range = nullptr; if (last_statement->IsExpressionStatement() && @@ -83,5 +85,38 @@ void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange( } } +void SourceRangeAstVisitor::MaybeRemoveLastContinuationRange( + ZonePtrList* statements) { + if (statements->is_empty()) return; + MaybeRemoveContinuationRange(statements->last()); +} + +namespace { +Statement* FindLastNonSyntheticReturn(ZonePtrList* statements) { + for (int i = statements->length() - 1; i >= 0; --i) { + Statement* stmt = statements->at(i); + if (!stmt->IsReturnStatement()) break; + if (stmt->AsReturnStatement()->is_synthetic_async_return()) continue; + return stmt; + } + return nullptr; +} +} // namespace + +void SourceRangeAstVisitor::MaybeRemoveContinuationRangeOfAsyncReturn( + TryCatchStatement* try_catch_stmt) { + // Detect try-catch inserted by NewTryCatchStatementForAsyncAwait in the + // parser (issued for async functions, including async generators), and + // remove the continuation ranges of return statements corresponding to + // returns at function end in the untransformed source. + if (try_catch_stmt->is_try_catch_for_async()) { + Statement* last_non_synthetic = + FindLastNonSyntheticReturn(try_catch_stmt->try_block()->statements()); + if (last_non_synthetic) { + MaybeRemoveContinuationRange(last_non_synthetic); + } + } +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ast/source-range-ast-visitor.h b/deps/v8/src/ast/source-range-ast-visitor.h index 4ba5feb2d299f9..8b6b99c434634e 100644 --- a/deps/v8/src/ast/source-range-ast-visitor.h +++ b/deps/v8/src/ast/source-range-ast-visitor.h @@ -37,8 +37,11 @@ class SourceRangeAstVisitor final void VisitSwitchStatement(SwitchStatement* stmt); void VisitFunctionLiteral(FunctionLiteral* expr); bool VisitNode(AstNode* node); + void VisitTryCatchStatement(TryCatchStatement* stmt); + void MaybeRemoveContinuationRange(Statement* last_statement); void MaybeRemoveLastContinuationRange(ZonePtrList* stmts); + void MaybeRemoveContinuationRangeOfAsyncReturn(TryCatchStatement* stmt); SourceRangeMap* source_range_map_ = nullptr; std::unordered_set continuation_positions_; diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h index 1ff6f9f4228375..7be99adc7c12ff 100644 --- a/deps/v8/src/ast/variables.h +++ b/deps/v8/src/ast/variables.h @@ -21,7 +21,8 @@ class Variable final : public ZoneObject { public: Variable(Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, - MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, + IsStaticFlag is_static_flag = IsStaticFlag::kNotStatic) : scope_(scope), name_(name), local_if_not_shadowed_(nullptr), @@ -35,10 +36,13 @@ class Variable final : public ZoneObject { ForceContextAllocationField::encode(false) | ForceHoleInitializationField::encode(false) | LocationField::encode(VariableLocation::UNALLOCATED) | - VariableKindField::encode(kind)) { + VariableKindField::encode(kind) | + IsStaticFlagField::encode(is_static_flag)) { // Var declared variables never need initialization. DCHECK(!(mode == VariableMode::kVar && initialization_flag == kNeedsInitialization)); + DCHECK_IMPLIES(is_static_flag == IsStaticFlag::kStatic, + IsConstVariableMode(mode)); } explicit Variable(Variable* other); @@ -59,6 +63,14 @@ class Variable final : public ZoneObject { void set_mode(VariableMode mode) { bit_field_ = VariableModeField::update(bit_field_, mode); } + void set_is_static_flag(IsStaticFlag is_static_flag) { + bit_field_ = IsStaticFlagField::update(bit_field_, is_static_flag); + } + IsStaticFlag is_static_flag() const { + return IsStaticFlagField::decode(bit_field_); + } + bool is_static() const { return is_static_flag() == IsStaticFlag::kStatic; } + bool has_forced_context_allocation() const { return ForceContextAllocationField::decode(bit_field_); } @@ -72,6 +84,9 @@ class Variable final : public ZoneObject { MaybeAssignedFlag maybe_assigned() const { return MaybeAssignedFlagField::decode(bit_field_); } + void clear_maybe_assigned() { + bit_field_ = MaybeAssignedFlagField::update(bit_field_, kNotAssigned); + } void SetMaybeAssigned() { if (mode() == VariableMode::kConst) return; @@ -249,6 +264,7 @@ class Variable final : public ZoneObject { using ForceHoleInitializationField = InitializationFlagField::Next; using MaybeAssignedFlagField = ForceHoleInitializationField::Next; + using IsStaticFlagField = MaybeAssignedFlagField::Next; Variable** next() { return &next_; } friend List; diff --git a/deps/v8/src/base/OWNERS b/deps/v8/src/base/OWNERS index 9c6fd3c859ab75..3654b400adad26 100644 --- a/deps/v8/src/base/OWNERS +++ b/deps/v8/src/base/OWNERS @@ -1,4 +1,4 @@ -clemensh@chromium.org +clemensb@chromium.org mlippautz@chromium.org # COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h deleted file mode 100644 index f684b52ccb6dc0..00000000000000 --- a/deps/v8/src/base/adapters.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Slightly adapted for inclusion in V8. -// Copyright 2014 the V8 project authors. All rights reserved. - -#ifndef V8_BASE_ADAPTERS_H_ -#define V8_BASE_ADAPTERS_H_ - -#include - -#include "src/base/macros.h" - -namespace v8 { -namespace base { - -// Internal adapter class for implementing base::Reversed. -template -class ReversedAdapter { - public: - using Iterator = - std::reverse_iterator()))>; - - explicit ReversedAdapter(T& t) : t_(t) {} - ReversedAdapter(const ReversedAdapter& ra) V8_NOEXCEPT = default; - - // TODO(clemensh): Use std::rbegin/std::rend once we have C++14 support. - Iterator begin() const { return Iterator(std::end(t_)); } - Iterator end() const { return Iterator(std::begin(t_)); } - - private: - T& t_; - - DISALLOW_ASSIGN(ReversedAdapter); -}; - -// Reversed returns a container adapter usable in a range-based "for" statement -// for iterating a reversible container in reverse order. -// -// Example: -// -// std::vector v = ...; -// for (int i : base::Reversed(v)) { -// // iterates through v from back to front -// } -template -ReversedAdapter Reversed(T&& t) { - return ReversedAdapter(t); -} - -} // namespace base -} // namespace v8 - -#endif // V8_BASE_ADAPTERS_H_ diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index 6ab0ffee29ee5a..4f4ac2b3282933 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -9,6 +9,7 @@ #endif #if V8_OS_LINUX #include // AT_HWCAP +extern "C" char** environ; #endif #if V8_GLIBC_PREREQ(2, 16) #include // getauxval() @@ -16,7 +17,7 @@ #if V8_OS_QNX #include // cpuinfo #endif -#if V8_OS_LINUX && V8_HOST_ARCH_PPC +#if (V8_OS_LINUX && V8_HOST_ARCH_PPC) || V8_OS_ANDROID #include #endif #if V8_OS_AIX @@ -109,28 +110,25 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { #define HWCAP_LPAE (1 << 20) static uint32_t ReadELFHWCaps() { - uint32_t result = 0; #if V8_GLIBC_PREREQ(2, 16) - result = static_cast(getauxval(AT_HWCAP)); + return static_cast(getauxval(AT_HWCAP)); #else - // Read the ELF HWCAP flags by parsing /proc/self/auxv. - FILE* fp = fopen("/proc/self/auxv", "r"); - if (fp != nullptr) { - struct { uint32_t tag; uint32_t value; } entry; - for (;;) { - size_t n = fread(&entry, sizeof(entry), 1, fp); - if (n == 0 || (entry.tag == 0 && entry.value == 0)) { - break; - } - if (entry.tag == AT_HWCAP) { - result = entry.value; - break; - } + char** head = environ; + while (*head++ != nullptr) { + } +#ifdef __LP64__ + using elf_auxv_t = Elf64_auxv_t; +#else + using elf_auxv_t = Elf32_auxv_t; +#endif + for (elf_auxv_t* entry = reinterpret_cast(head); + entry->a_type != AT_NULL; ++entry) { + if (entry->a_type == AT_HWCAP) { + return entry->a_un.a_val; } - fclose(fp); } + return 0u; #endif - return result; } #endif // V8_HOST_ARCH_ARM @@ -608,33 +606,28 @@ CPU::CPU() #ifndef USE_SIMULATOR #if V8_OS_LINUX - // Read processor info from /proc/self/auxv. char* auxv_cpu_type = nullptr; - FILE* fp = fopen("/proc/self/auxv", "r"); - if (fp != nullptr) { + char** head = environ; + while (*head++ != nullptr) { + } #if V8_TARGET_ARCH_PPC64 - Elf64_auxv_t entry; + using elf_auxv_t = Elf64_auxv_t; #else - Elf32_auxv_t entry; + using elf_auxv_t = Elf32_auxv_t; #endif - for (;;) { - size_t n = fread(&entry, sizeof(entry), 1, fp); - if (n == 0 || entry.a_type == AT_NULL) { + for (elf_auxv_t* entry = reinterpret_cast(head); + entry->a_type != AT_NULL; ++entry) { + switch (entry->a_type) { + case AT_PLATFORM: + auxv_cpu_type = reinterpret_cast(entry->a_un.a_val); + break; + case AT_ICACHEBSIZE: + icache_line_size_ = entry->a_un.a_val; + break; + case AT_DCACHEBSIZE: + dcache_line_size_ = entry->a_un.a_val; break; - } - switch (entry.a_type) { - case AT_PLATFORM: - auxv_cpu_type = reinterpret_cast(entry.a_un.a_val); - break; - case AT_ICACHEBSIZE: - icache_line_size_ = entry.a_un.a_val; - break; - case AT_DCACHEBSIZE: - dcache_line_size_ = entry.a_un.a_val; - break; - } } - fclose(fp); } part_ = -1; diff --git a/deps/v8/src/base/file-utils.cc b/deps/v8/src/base/file-utils.cc index 31b1b411908dc9..6e1c4921440f3c 100644 --- a/deps/v8/src/base/file-utils.cc +++ b/deps/v8/src/base/file-utils.cc @@ -12,24 +12,18 @@ namespace v8 { namespace base { -char* RelativePath(char** buffer, const char* exec_path, const char* name) { +std::unique_ptr RelativePath(const char* exec_path, const char* name) { DCHECK(exec_path); - int path_separator = static_cast(strlen(exec_path)) - 1; - while (path_separator >= 0 && - !OS::isDirectorySeparator(exec_path[path_separator])) { - path_separator--; + size_t basename_start = strlen(exec_path); + while (basename_start > 0 && + !OS::isDirectorySeparator(exec_path[basename_start - 1])) { + --basename_start; } - if (path_separator >= 0) { - int name_length = static_cast(strlen(name)); - *buffer = - reinterpret_cast(calloc(path_separator + name_length + 2, 1)); - *buffer[0] = '\0'; - strncat(*buffer, exec_path, path_separator + 1); - strncat(*buffer, name, name_length); - } else { - *buffer = strdup(name); - } - return *buffer; + size_t name_length = strlen(name); + auto buffer = std::make_unique(basename_start + name_length + 1); + if (basename_start > 0) memcpy(buffer.get(), exec_path, basename_start); + memcpy(buffer.get() + basename_start, name, name_length); + return buffer; } } // namespace base diff --git a/deps/v8/src/base/file-utils.h b/deps/v8/src/base/file-utils.h index afd9a1fc253103..84b57fb40b34c6 100644 --- a/deps/v8/src/base/file-utils.h +++ b/deps/v8/src/base/file-utils.h @@ -5,6 +5,8 @@ #ifndef V8_BASE_FILE_UTILS_H_ #define V8_BASE_FILE_UTILS_H_ +#include + #include "src/base/base-export.h" namespace v8 { @@ -12,8 +14,8 @@ namespace base { // Helper functions to manipulate file paths. -V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path, - const char* name); +V8_BASE_EXPORT +std::unique_ptr RelativePath(const char* exec_path, const char* name); } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/free_deleter.h b/deps/v8/src/base/free_deleter.h index 77e4f0ed14a760..a556926685949d 100644 --- a/deps/v8/src/base/free_deleter.h +++ b/deps/v8/src/base/free_deleter.h @@ -9,6 +9,7 @@ #define V8_BASE_FREE_DELETER_H_ #include +#include namespace v8 { namespace base { diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h index b081af62aeac55..baaf324e2185b5 100644 --- a/deps/v8/src/base/iterator.h +++ b/deps/v8/src/base/iterator.h @@ -59,6 +59,26 @@ class iterator_range { const_iterator const end_; }; +template +auto make_iterator_range(ForwardIterator&& begin, ForwardIterator&& end) { + return iterator_range{std::forward(begin), + std::forward(end)}; +} + +// {Reversed} returns a container adapter usable in a range-based "for" +// statement for iterating a reversible container in reverse order. +// +// Example: +// +// std::vector v = ...; +// for (int i : base::Reversed(v)) { +// // iterates through v from back to front +// } +template +auto Reversed(T& t) { // NOLINT(runtime/references): match {rbegin} and {rend} + return make_iterator_range(std::rbegin(t), std::rend(t)); +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index ad70e9820ddb4a..5f52a9893e6a56 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -6,6 +6,7 @@ #define V8_BASE_MACROS_H_ #include +#include #include "src/base/compiler-specific.h" #include "src/base/logging.h" @@ -232,35 +233,16 @@ struct is_trivially_copyable { // the standard does not, so let's skip this check.) // Trivial non-deleted destructor. std::is_trivially_destructible::value; - -#elif defined(__GNUC__) && __GNUC__ < 5 - // WARNING: - // On older libstdc++ versions, there is no way to correctly implement - // is_trivially_copyable. The workaround below is an approximation (neither - // over- nor underapproximation). E.g. it wrongly returns true if the move - // constructor is non-trivial, and it wrongly returns false if the copy - // constructor is deleted, but copy assignment is trivial. - // TODO(rongjie) Remove this workaround once we require gcc >= 5.0 - static constexpr bool value = - __has_trivial_copy(T) && __has_trivial_destructor(T); - #else static constexpr bool value = std::is_trivially_copyable::value; #endif }; -#if defined(__GNUC__) && __GNUC__ < 5 -// On older libstdc++ versions, base::is_trivially_copyable::value is only an -// approximation (see above), so make ASSERT_{NOT_,}TRIVIALLY_COPYABLE a noop. -#define ASSERT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled") -#define ASSERT_NOT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled") -#else #define ASSERT_TRIVIALLY_COPYABLE(T) \ static_assert(::v8::base::is_trivially_copyable::value, \ #T " should be trivially copyable") #define ASSERT_NOT_TRIVIALLY_COPYABLE(T) \ static_assert(!::v8::base::is_trivially_copyable::value, \ #T " should not be trivially copyable") -#endif // The USE(x, ...) template is used to silence C++ compiler warnings // issued for (yet) unused variables (typically parameters). @@ -407,6 +389,9 @@ bool is_inbounds(float_t v) { constexpr bool kUpperBoundIsMax = static_cast(kUpperBound) == static_cast(std::numeric_limits::max()); + // Using USE(var) is only a workaround for a GCC 8.1 bug. + USE(kLowerBoundIsMin); + USE(kUpperBoundIsMax); return (kLowerBoundIsMin ? (kLowerBound <= v) : (kLowerBound < v)) && (kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound)); } diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h index b8df88d8442cab..6610c7ffc33623 100644 --- a/deps/v8/src/base/optional.h +++ b/deps/v8/src/base/optional.h @@ -131,21 +131,8 @@ struct OptionalStorageBase { // the condition of constexpr-ness is satisfied because the base class also has // compiler generated constexpr {copy,move} constructors). Note that // placement-new is prohibited in constexpr. -#if defined(__GNUC__) && __GNUC__ < 5 -// gcc <5 does not implement std::is_trivially_copy_constructible. -// Conservatively assume false for this configuration. -// TODO(clemensh): Remove this once we drop support for gcc <5. -#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) false -#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) false -#else -#define TRIVIALLY_COPY_CONSTRUCTIBLE(T) \ - std::is_trivially_copy_constructible::value -#define TRIVIALLY_MOVE_CONSTRUCTIBLE(T) \ - std::is_trivially_move_constructible::value -#endif -template -#undef TRIVIALLY_COPY_CONSTRUCTIBLE +template ::value, + bool = std::is_trivially_move_constructible::value> struct OptionalStorage : OptionalStorageBase { // This is no trivially {copy,move} constructible case. Other cases are // defined below as specializations. diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h index c48cf8d3393c12..5b3b31ec1e5246 100644 --- a/deps/v8/src/base/platform/mutex.h +++ b/deps/v8/src/base/platform/mutex.h @@ -290,6 +290,7 @@ class LockGuard final { }; using MutexGuard = LockGuard; +using RecursiveMutexGuard = LockGuard; enum MutexSharedType : bool { kShared = true, kExclusive = false }; diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc index c133ffb68d7da6..e4a3cb6f35f0ae 100644 --- a/deps/v8/src/base/platform/platform-openbsd.cc +++ b/deps/v8/src/base/platform/platform-openbsd.cc @@ -107,7 +107,7 @@ void OS::SignalCodeMovingGC() { // it. This injects a GC marker into the stream of events generated // by the kernel and allows us to synchronize V8 code log and the // kernel log. - int size = sysconf(_SC_PAGESIZE); + long size = sysconf(_SC_PAGESIZE); // NOLINT: type more fit than uint64_t FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+"); if (f == nullptr) { OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile()); @@ -116,7 +116,7 @@ void OS::SignalCodeMovingGC() { void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0); DCHECK(addr != MAP_FAILED); - OS::Free(addr, size); + CHECK(OS::Free(addr, size)); fclose(f); } diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index c50cdd7a98eefd..99abcd55686c16 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -48,6 +48,7 @@ #if V8_OS_MACOSX #include +#include #endif #if V8_OS_LINUX diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc index a7e50f5880467b..66464d8258dc9b 100644 --- a/deps/v8/src/base/platform/semaphore.cc +++ b/deps/v8/src/base/platform/semaphore.cc @@ -5,8 +5,7 @@ #include "src/base/platform/semaphore.h" #if V8_OS_MACOSX -#include -#include +#include #endif #include @@ -21,53 +20,23 @@ namespace base { #if V8_OS_MACOSX Semaphore::Semaphore(int count) { - kern_return_t result = semaphore_create( - mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count); - DCHECK_EQ(KERN_SUCCESS, result); - USE(result); + native_handle_ = dispatch_semaphore_create(count); + DCHECK(native_handle_); } +Semaphore::~Semaphore() { dispatch_release(native_handle_); } -Semaphore::~Semaphore() { - kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_); - DCHECK_EQ(KERN_SUCCESS, result); - USE(result); -} - -void Semaphore::Signal() { - kern_return_t result = semaphore_signal(native_handle_); - DCHECK_EQ(KERN_SUCCESS, result); - USE(result); -} - +void Semaphore::Signal() { dispatch_semaphore_signal(native_handle_); } void Semaphore::Wait() { - while (true) { - kern_return_t result = semaphore_wait(native_handle_); - if (result == KERN_SUCCESS) return; // Semaphore was signalled. - DCHECK_EQ(KERN_ABORTED, result); - } + dispatch_semaphore_wait(native_handle_, DISPATCH_TIME_FOREVER); } bool Semaphore::WaitFor(const TimeDelta& rel_time) { - TimeTicks now = TimeTicks::Now(); - TimeTicks end = now + rel_time; - while (true) { - mach_timespec_t ts; - if (now >= end) { - // Return immediately if semaphore was not signalled. - ts.tv_sec = 0; - ts.tv_nsec = 0; - } else { - ts = (end - now).ToMachTimespec(); - } - kern_return_t result = semaphore_timedwait(native_handle_, ts); - if (result == KERN_SUCCESS) return true; // Semaphore was signalled. - if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout. - DCHECK_EQ(KERN_ABORTED, result); - now = TimeTicks::Now(); - } + dispatch_time_t timeout = + dispatch_time(DISPATCH_TIME_NOW, rel_time.InNanoseconds()); + return dispatch_semaphore_wait(native_handle_, timeout) == 0; } #elif V8_OS_POSIX diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h index 11ff0b9199f845..c4937acadd19a7 100644 --- a/deps/v8/src/base/platform/semaphore.h +++ b/deps/v8/src/base/platform/semaphore.h @@ -12,7 +12,7 @@ #endif #if V8_OS_MACOSX -#include // NOLINT +#include // NOLINT #elif V8_OS_POSIX #include // NOLINT #endif @@ -50,7 +50,7 @@ class V8_BASE_EXPORT Semaphore final { bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT; #if V8_OS_MACOSX - using NativeHandle = semaphore_t; + using NativeHandle = dispatch_semaphore_t; #elif V8_OS_POSIX using NativeHandle = sem_t; #elif V8_OS_WIN diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h index 530114a8e2faa5..146f8d6e6ae4a7 100644 --- a/deps/v8/src/base/template-utils.h +++ b/deps/v8/src/base/template-utils.h @@ -6,32 +6,20 @@ #define V8_BASE_TEMPLATE_UTILS_H_ #include -#include +#include +#include namespace v8 { namespace base { namespace detail { -// make_array_helper statically iteratively creates the index list 0 .. Size-1. -// A specialization for the base case (first index is 0) finally constructs the -// array. -// TODO(clemensh): Use std::index_sequence once we have C++14 support. -template -struct make_array_helper; - -template -struct make_array_helper { - constexpr static std::array::type, - sizeof...(Indexes) + 1> - make_array(Function f) { - return {{f(0), f(Indexes)...}}; - } -}; - -template -struct make_array_helper - : make_array_helper {}; +template +constexpr inline auto make_array_helper(Function f, + std::index_sequence) + -> std::array { + return {{f(Indexes)...}}; +} } // namespace detail @@ -42,18 +30,8 @@ struct make_array_helper // [](std::size_t i) { return static_cast(2 * i); }); // The resulting array will be constexpr if the passed function is constexpr. template -constexpr std::array::type, Size> -make_array(Function f) { - static_assert(Size > 0, "Can only create non-empty arrays"); - return detail::make_array_helper::make_array(f); -} - -// base::make_unique: Construct an object of type T and wrap it in a -// std::unique_ptr. -// Replacement for C++14's std::make_unique. -template -std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); +constexpr auto make_array(Function f) { + return detail::make_array_helper(f, std::make_index_sequence{}); } // Helper to determine how to pass values: Pass scalars and arrays by value, @@ -80,38 +58,17 @@ struct has_output_operator() << std::declval()))> : std::true_type {}; -namespace detail { - -template -struct fold_helper { - static_assert(sizeof...(Ts) == 0, "this is the base case"); - using result_t = typename std::remove_reference::type; - static constexpr T&& fold(Func func, T&& first) { - return std::forward(first); - } -}; +// Fold all arguments from left to right with a given function. +template +constexpr auto fold(Func func, T&& t) { + return std::forward(t); +} template -struct fold_helper { - using folded_t = typename std::result_of::type; - using next_fold_helper = fold_helper; - using result_t = typename next_fold_helper::result_t; - static constexpr result_t fold(Func func, T1&& first, T2&& second, - Ts&&... more) { - return next_fold_helper::fold( - func, func(std::forward(first), std::forward(second)), - std::forward(more)...); - } -}; - -} // namespace detail - -// Fold all arguments from left to right with a given function. -template -constexpr auto fold(Func func, Ts&&... more) -> - typename detail::fold_helper::result_t { - return detail::fold_helper::fold(func, - std::forward(more)...); +constexpr auto fold(Func func, T1&& first, T2&& second, Ts&&... more) { + auto&& folded = func(std::forward(first), std::forward(second)); + return fold(std::move(func), std::forward(folded), + std::forward(more)...); } // {is_same::value} is true if all Ts are the same, false otherwise. diff --git a/deps/v8/src/base/ubsan.cc b/deps/v8/src/base/ubsan.cc new file mode 100644 index 00000000000000..fc77156eb1cf6c --- /dev/null +++ b/deps/v8/src/base/ubsan.cc @@ -0,0 +1,50 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include +#include + +#include "src/base/build_config.h" + +#if !defined(UNDEFINED_SANITIZER) || !defined(V8_TARGET_ARCH_32_BIT) +#error "This file is only needed for 32-bit UBSan builds." +#endif + +// Compiling with -fsanitize=undefined on 32-bit platforms requires __mulodi4 +// to be available. Usually it comes from libcompiler_rt, which our build +// doesn't provide, so here is a custom implementation (inspired by digit_mul +// in src/objects/bigint.cc). +extern "C" int64_t __mulodi4(int64_t a, int64_t b, int* overflow) { + // Multiply in 32-bit chunks. + // For inputs [AH AL]*[BH BL], the result is: + // + // [AL*BL] // r_low + // + [AL*BH] // r_mid1 + // + [AH*BL] // r_mid2 + // + [AH*BH] // r_high + // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1] + // + // Where of course we must be careful with carries between the columns. + uint64_t a_low = a & 0xFFFFFFFFu; + uint64_t a_high = static_cast(a) >> 32; + uint64_t b_low = b & 0xFFFFFFFFu; + uint64_t b_high = static_cast(b) >> 32; + + uint64_t r_low = a_low * b_low; + uint64_t r_mid1 = a_low * b_high; + uint64_t r_mid2 = a_high * b_low; + uint64_t r_high = a_high * b_high; + + uint64_t result1 = r_low + (r_mid1 << 32); + if (result1 < r_low) r_high++; + uint64_t result2 = result1 + (r_mid2 << 32); + if (result2 < result1) r_high++; + r_high += (r_mid1 >> 32) + (r_mid2 >> 32); + int64_t result = static_cast(result2); + uint64_t result_sign = (result >> 63); + uint64_t expected_result_sign = (a >> 63) ^ (b >> 63); + + *overflow = (r_high > 0 || result_sign != expected_result_sign) ? 1 : 0; + return result; +} diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc index ea6308622da13b..fa39142cb4cb3c 100644 --- a/deps/v8/src/builtins/accessors.cc +++ b/deps/v8/src/builtins/accessors.cc @@ -16,6 +16,7 @@ #include "src/objects/contexts.h" #include "src/objects/field-index-inl.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-regexp-inl.h" #include "src/objects/module-inl.h" #include "src/objects/property-details.h" #include "src/objects/prototype.h" @@ -840,5 +841,25 @@ Handle Accessors::MakeErrorStackInfo(Isolate* isolate) { &ErrorStackGetter, &ErrorStackSetter); } +// +// Accessors::RegExpResultIndices +// + +void Accessors::RegExpResultIndicesGetter( + v8::Local key, const v8::PropertyCallbackInfo& info) { + i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); + HandleScope scope(isolate); + Handle regexp_result( + Handle::cast(Utils::OpenHandle(*info.Holder()))); + Handle indices( + JSRegExpResult::GetAndCacheIndices(isolate, regexp_result)); + info.GetReturnValue().Set(Utils::ToLocal(indices)); +} + +Handle Accessors::MakeRegExpResultIndicesInfo(Isolate* isolate) { + return MakeAccessor(isolate, isolate->factory()->indices_string(), + &RegExpResultIndicesGetter, nullptr); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h index 43a65342966e28..b6a8e65446f963 100644 --- a/deps/v8/src/builtins/accessors.h +++ b/deps/v8/src/builtins/accessors.h @@ -43,6 +43,8 @@ class JavaScriptFrame; kHasSideEffectToReceiver) \ V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \ kHasSideEffectToReceiver) \ + V(_, regexp_result_indices, RegExpResultIndices, kHasSideEffectToReceiver, \ + kHasSideEffectToReceiver) \ V(_, string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver) #define ACCESSOR_SETTER_LIST(V) \ diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index e9b562620fcee5..164c09db259013 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -885,102 +885,70 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, - Register scratch2) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- r3 : new target (preserved for callee if needed, and caller) // -- r1 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch)); Register closure = r1; - Register optimized_code_entry = scratch1; - - __ ldr( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ cmp(optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kNone))); - __ b(eq, &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ ldr(scratch, + FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ ldr(scratch, + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit)); + __ b(ne, &found_deoptimized_code); - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ cmp( - optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ jmp(&fallthrough); - } - } + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); + static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch"); + __ LoadCodeObjectEntry(r2, optimized_code_entry); + __ Jump(r2); - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); - - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ ldr(scratch2, FieldMemOperand(optimized_code_entry, - Code::kCodeDataContainerOffset)); - __ ldr( - scratch2, - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit)); - __ b(ne, &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); - static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch"); - __ LoadCodeObjectEntry(r2, optimized_code_entry); - __ Jump(r2); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- r3 : new target (preserved for callee if needed, and caller) + // -- r1 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ cmp(optimization_marker, + Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -999,7 +967,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ExternalReference::bytecode_size_table_address()); // Check if the bytecode is a Wide or ExtraWide prefix bytecode. - Label process_bytecode, extra_wide; + Label process_bytecode; STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); STATIC_ASSERT(1 == static_cast(interpreter::Bytecode::kExtraWide)); STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); @@ -1008,31 +976,34 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ cmp(bytecode, Operand(0x3)); __ b(hi, &process_bytecode); __ tst(bytecode, Operand(0x1)); - __ b(ne, &extra_wide); - - // Load the next bytecode and update table to the wide scaled table. + // Load the next bytecode. __ add(bytecode_offset, bytecode_offset, Operand(1)); __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + + // Update table to the wide scaled table. __ add(bytecode_size_table, bytecode_size_table, Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&process_bytecode); - - __ bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ add(bytecode_offset, bytecode_offset, Operand(1)); - __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + // Conditionally update table to the extra wide scaled table. We are taking + // advantage of the fact that the extra wide follows the wide one. __ add(bytecode_size_table, bytecode_size_table, - Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); + Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC, + ne); __ bind(&process_bytecode); // Bailout to the return label if this is a return bytecode. -#define JUMP_IF_EQUAL(NAME) \ - __ cmp(bytecode, Operand(static_cast(interpreter::Bytecode::k##NAME))); \ - __ b(if_return, eq); + + // Create cmp, cmpne, ..., cmpne to check for a return bytecode. + Condition flag = al; +#define JUMP_IF_EQUAL(NAME) \ + __ cmp(bytecode, Operand(static_cast(interpreter::Bytecode::k##NAME)), \ + flag); \ + flag = ne; RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) #undef JUMP_IF_EQUAL + __ b(if_return, eq); + // Otherwise, load the size of the current bytecode and advance the offset. __ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2)); __ add(bytecode_offset, bytecode_offset, scratch1); @@ -1084,9 +1055,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE)); __ b(ne, &push_stack_frame); - // Read off the optimized code slot in the feedback vector, and if there - // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6); + Register optimized_code_entry = r4; + + // Read off the optimized code slot in the feedback vector. + __ ldr(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ cmp(optimized_code_entry, + Operand(Smi::FromEnum(OptimizationMarker::kNone))); + __ b(ne, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ ldr(r9, FieldMemOperand(feedback_vector, @@ -1121,28 +1104,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, r0); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ sub(r9, sp, Operand(r4)); LoadRealStackLimit(masm, r2); __ cmp(r9, Operand(r2)); - __ b(hs, &ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ b(lo, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(r9, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ b(&loop_check, al); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. - __ push(r9); + __ push(kInterpreterAccumulatorRegister); // Continue loop if not done. __ bind(&loop_check); __ sub(r4, r4, Operand(kPointerSize), SetCC); @@ -1157,8 +1138,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ cmp(r9, Operand::Zero()); __ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1199,8 +1179,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, r2); __ Jump(lr); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bkpt(0); // Should not return. } @@ -1565,14 +1563,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(r0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2182,7 +2174,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- r1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(r1, &non_callable); __ bind(&non_smi); __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE); @@ -2199,12 +2191,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ cmp(r5, Operand(JS_PROXY_TYPE)); - __ b(ne, &non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver the (original) target. __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // Let the "call_as_function_delegate" take care of the rest. @@ -3167,51 +3157,6 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { __ Ret(); } -void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) { - Register dest = r0; - Register src = r1; - Register chars = r2; - - { - UseScratchRegisterScope temps(masm); - - Register temp1 = r3; - Register temp2 = temps.Acquire(); - Register temp3 = lr; - Register temp4 = r4; - Label loop; - Label not_two; - - __ Push(lr, r4); - __ bic(temp2, chars, Operand(0x3)); - __ add(temp2, dest, Operand(temp2, LSL, 1)); - - __ bind(&loop); - __ ldr(temp1, MemOperand(src, 4, PostIndex)); - __ uxtb16(temp3, temp1); - __ uxtb16(temp4, temp1, 8); - __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16)); - __ str(temp1, MemOperand(dest)); - __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16)); - __ str(temp1, MemOperand(dest, 4)); - __ add(dest, dest, Operand(8)); - __ cmp(dest, temp2); - __ b(&loop, ne); - - __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs - __ b(¬_two, cc); - __ ldrh(temp1, MemOperand(src, 2, PostIndex)); - __ uxtb(temp3, temp1, 8); - __ mov(temp3, Operand(temp3, LSL, 16)); - __ uxtab(temp3, temp3, temp1); - __ str(temp3, MemOperand(dest, 4, PostIndex)); - __ bind(¬_two); - __ ldrb(temp1, MemOperand(src), ne); - __ strh(temp1, MemOperand(dest), ne); - __ Pop(pc, r4); - } -} - #undef __ } // namespace internal diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 4e159a69b7ede8..9edd074e3d023d 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -1001,108 +1001,78 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, OptimizationMarker marker, Runtime::FunctionId function_id) { Label no_match; - __ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match); + __ CompareTaggedAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, + &no_match); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, - Register scratch2) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- x3 : new target (preserved for callee if needed, and caller) // -- x1 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch)); Register closure = x1; - Register optimized_code_entry = scratch1; - - __ LoadAnyTaggedField( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret is at a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ CompareAndBranch(optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq, - &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ Cmp( - optimized_code_entry, - Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ B(&fallthrough); - } - } - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadTaggedPointerField( + scratch, + FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ Ldr(scratch.W(), + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit, + &found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure into + // the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); + static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); + __ LoadCodeObjectEntry(x2, optimized_code_entry); + __ Jump(x2); - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadTaggedPointerField( - scratch2, - FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); - __ Ldr( - scratch2.W(), - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ Tbnz(scratch2.W(), Code::kMarkedForDeoptimizationBit, - &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); - static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); - __ LoadCodeObjectEntry(x2, optimized_code_entry); - __ Jump(x2); + // Optimized code slot contains deoptimized code, evict it and re-enter the + // closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- x3 : new target (preserved for callee if needed, and caller) + // -- x1 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, x1, x3, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ CmpTagged( + optimization_marker, + Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1129,19 +1099,19 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ Cmp(bytecode, Operand(0x3)); __ B(hi, &process_bytecode); __ Tst(bytecode, Operand(0x1)); - __ B(ne, &extra_wide); - - // Load the next bytecode and update table to the wide scaled table. + // The code to load the next bytecode is common to both wide and extra wide. + // We can hoist them up here since they do not modify the flags after Tst. __ Add(bytecode_offset, bytecode_offset, Operand(1)); __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + __ B(ne, &extra_wide); + + // Update table to the wide scaled table. __ Add(bytecode_size_table, bytecode_size_table, Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); __ B(&process_bytecode); __ Bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ Add(bytecode_offset, bytecode_offset, Operand(1)); - __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); + // Update table to the extra wide scaled table. __ Add(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); @@ -1211,7 +1181,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4); + Register optimized_code_entry = x7; + __ LoadAnyTaggedField( + optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ CompareTaggedAndBranch(optimized_code_entry, + Operand(Smi::FromEnum(OptimizationMarker::kNone)), + ne, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. // MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse @@ -1248,13 +1231,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, x0); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ Sub(x10, sp, Operand(x11)); { UseScratchRegisterScope temps(masm); @@ -1262,21 +1245,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LoadRealStackLimit(masm, scratch); __ Cmp(x10, scratch); } - __ B(hs, &ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ Bind(&ok); + __ B(lo, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. // Note: there should always be at least one stack slot for the return // register in the register file. Label loop_header; - __ LoadRoot(x10, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ Lsr(x11, x11, kSystemPointerSizeLog2); // Round up the number of registers to a multiple of 2, to align the stack // to 16 bytes. __ Add(x11, x11, 1); __ Bic(x11, x11, 1); - __ PushMultipleTimes(x10, x11); + __ PushMultipleTimes(kInterpreterAccumulatorRegister, x11); __ Bind(&loop_header); } @@ -1291,8 +1272,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2)); __ Bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1315,9 +1295,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ Ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ Ldr(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Either return, or advance to the next bytecode and dispatch. Label do_return; @@ -1333,9 +1312,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, x2); __ Ret(); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code as opposed to an optimization marker. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); __ Unreachable(); // Should not return. + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + __ Unreachable(); // Should not return. } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1543,9 +1541,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { } // Get the target bytecode offset from the frame. - __ Ldr(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Dispatch to the target bytecode. __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister, @@ -1560,9 +1557,8 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ ldr(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Load the current bytecode. __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister, @@ -1633,7 +1629,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { // Set flags for determining the value of smi-tagged argc. // lt => 1, eq => 2, gt => 3. - __ Cmp(argc, Smi::FromInt(2)); + __ CmpTagged(argc, Smi::FromInt(2)); __ B(gt, &three_args); // One or two arguments. @@ -1769,20 +1765,14 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ PushArgument(x0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } // If the code object is null, just return to the caller. Label skip; - __ CompareAndBranch(x0, Smi::zero(), ne, &skip); + __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &skip); __ Ret(); __ Bind(&skip); @@ -1878,8 +1868,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ Cmp(arg_array, null_value); - __ Ccmp(arg_array, undefined_value, ZFlag, ne); + __ CmpTagged(arg_array, null_value); + __ CcmpTagged(arg_array, undefined_value, ZFlag, ne); __ B(eq, &no_arguments); // 4a. Apply the receiver to the given argArray. @@ -2261,7 +2251,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ Bind(&loop); __ Sub(len, len, 1); __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex)); - __ Cmp(scratch, the_hole_value); + __ CmpTagged(scratch, the_hole_value); __ Csel(scratch, scratch, undefined_value, ne); __ Poke(scratch, Operand(len, LSL, kSystemPointerSizeLog2)); __ Cbnz(len, &loop); @@ -2319,7 +2309,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ Ldr(x4, MemOperand(args_fp, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); + __ CmpTagged(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); __ B(eq, &arguments_adaptor); { __ Ldr(scratch, @@ -2626,7 +2616,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- x1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(x1, &non_callable); __ Bind(&non_smi); __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE); @@ -2642,12 +2632,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ Cmp(x5, JS_PROXY_TYPE); - __ B(ne, &non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ Bind(&non_function); // Overwrite the original receiver with the (original) target. __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2)); // Let the "call_as_function_delegate" take care of the rest. @@ -2712,7 +2700,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // Patch new.target to [[BoundTargetFunction]] if new.target equals target. { Label done; - __ Cmp(x1, x3); + __ CmpTagged(x1, x3); __ B(ne, &done); __ LoadTaggedPointerField( x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset)); diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index aa5d4cc50a731c..065cd08e4c3bda 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -90,11 +90,28 @@ type bool generates 'TNode' constexpr 'bool'; type bint generates 'TNode' constexpr 'BInt'; type string constexpr 'const char*'; -type NameDictionary extends FixedArray; +// The HashTable inheritance hierarchy doesn't actually look like this in C++ +// because it uses some class templates that we can't yet (and may never) +// express in Torque, but this is the expected organization of instance types. +@abstract @dirtyInstantiatedAbstractClass +extern class HashTable extends FixedArray generates 'TNode'; +extern class OrderedHashMap extends HashTable; +extern class OrderedHashSet extends HashTable; +extern class OrderedNameDictionary extends HashTable; +extern class NameDictionary extends HashTable; +extern class GlobalDictionary extends HashTable; +extern class SimpleNumberDictionary extends HashTable; +extern class StringTable extends HashTable; +extern class EphemeronHashTable extends HashTable; +type ObjectHashTable extends HashTable + generates 'TNode'; +extern class NumberDictionary extends HashTable; type RawPtr generates 'TNode' constexpr 'void*'; -type Code extends HeapObject generates 'TNode'; +extern class Code extends HeapObject; type BuiltinPtr extends Smi generates 'TNode'; + +@abstract extern class Context extends HeapObject { length: Smi; scope_info: ScopeInfo; @@ -102,10 +119,27 @@ extern class Context extends HeapObject { extension: Object; native_context: Object; } -type NativeContext extends Context generates 'TNode'; +extern class AwaitContext extends Context generates 'TNode'; +extern class BlockContext extends Context generates 'TNode'; +extern class CatchContext extends Context generates 'TNode'; +extern class DebugEvaluateContext extends Context + generates 'TNode'; +extern class EvalContext extends Context generates 'TNode'; +extern class FunctionContext extends Context generates 'TNode'; +extern class ModuleContext extends Context generates 'TNode'; +extern class NativeContext extends Context; +extern class ScriptContext extends Context generates 'TNode'; +extern class WithContext extends Context generates 'TNode'; + +@generateCppClass +@abstract +extern class PrimitiveHeapObject extends HeapObject { +} @generateCppClass -extern class Oddball extends HeapObject { +@apiExposedInstanceTypeValue(0x43) +@highestInstanceTypeWithinParentClassRange +extern class Oddball extends PrimitiveHeapObject { to_number_raw: float64; to_string: String; to_number: Number; @@ -113,13 +147,13 @@ extern class Oddball extends HeapObject { kind: Smi; } -extern class HeapNumber extends HeapObject { value: float64; } +extern class HeapNumber extends PrimitiveHeapObject { value: float64; } type Number = Smi | HeapNumber; type Numeric = Number | BigInt; @abstract @generateCppClass -extern class Name extends HeapObject { +extern class Name extends PrimitiveHeapObject { hash_field: uint32; } // This is the same as Name, but with the information that there are no other @@ -137,6 +171,7 @@ type PrivateSymbol extends Symbol; @abstract @generateCppClass +@reserveBitsInInstanceType(6) extern class String extends Name { length: int32; } @@ -222,20 +257,35 @@ extern class FixedArrayBase extends HeapObject { length: Smi; } -extern class FixedArray extends FixedArrayBase { objects[length]: Object; } +@abstract +@dirtyInstantiatedAbstractClass +extern class FixedArray extends FixedArrayBase { + objects[length]: Object; +} extern class FixedDoubleArray extends FixedArrayBase { floats[length]: float64; } -extern class WeakFixedArray extends HeapObject { length: Smi; } +@abstract +@dirtyInstantiatedAbstractClass +extern class WeakFixedArray extends HeapObject { + length: Smi; +} extern class ByteArray extends FixedArrayBase {} +@hasSameInstanceTypeAsParent +extern class ArrayList extends FixedArray { +} + +extern class ObjectBoilerplateDescription extends FixedArray; +extern class ClosureFeedbackCellArray extends FixedArray; +extern class ScriptContextTable extends FixedArray; + type LayoutDescriptor extends ByteArray generates 'TNode'; -type TransitionArray extends WeakFixedArray - generates 'TNode'; +extern class TransitionArray extends WeakFixedArray; type InstanceType extends uint16 constexpr 'v8::internal::InstanceType'; @@ -282,6 +332,7 @@ extern class SourcePositionTableWithFrameCache extends Struct { // We make this class abstract because it is missing the variable-sized part, // which is still impossible to express in Torque. @abstract +@dirtyInstantiatedAbstractClass extern class DescriptorArray extends HeapObject { number_of_all_descriptors: uint16; number_of_descriptors: uint16; @@ -327,7 +378,9 @@ intrinsic } } +// JSReceiver corresponds to objects in the JS sense. @abstract +@highestInstanceTypeWithinParentClassRange extern class JSReceiver extends HeapObject { properties_or_hash: FixedArrayBase | PropertyArray | Smi; } @@ -337,6 +390,8 @@ type Constructor extends JSReceiver; @abstract @dirtyInstantiatedAbstractClass @generateCppClass +@apiExposedInstanceTypeValue(0x421) +@highestInstanceTypeWithinParentClassRange extern class JSObject extends JSReceiver { // [elements]: The elements (properties with names that are integers). // @@ -368,6 +423,18 @@ macro NewJSObject(implicit context: Context)(): JSObject { }; } +@abstract +@generateCppClass +@lowestInstanceTypeWithinParentClassRange +extern class JSCustomElementsObject extends JSObject { +} + +@abstract +@generateCppClass +@lowestInstanceTypeWithinParentClassRange +extern class JSSpecialObject extends JSCustomElementsObject { +} + extern macro HasPrototypeSlot(JSFunction): bool; macro GetDerivedMap(implicit context: Context)( @@ -401,7 +468,8 @@ macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map): map, properties, kEmptyFixedArray, kNone, kWithSlackTracking); } -extern class JSFunction extends JSObject { +@highestInstanceTypeWithinParentClassRange +extern class JSFunction extends JSFunctionOrBoundFunction { shared_function_info: SharedFunctionInfo; context: Context; feedback_cell: FeedbackCell; @@ -419,6 +487,7 @@ extern class JSProxy extends JSReceiver { // Just a starting shape for JSObject; properties can move after initialization. @noVerifier +@hasSameInstanceTypeAsParent extern class JSProxyRevocableResult extends JSObject { proxy: JSAny; revoke: JSAny; @@ -436,14 +505,14 @@ macro NewJSProxyRevocableResult(implicit context: Context)( } @generateCppClass -extern class JSGlobalProxy extends JSObject { +extern class JSGlobalProxy extends JSSpecialObject { // [native_context]: the owner native context of this global proxy object. // It is null value if this object is not used by any context. native_context: Object; } @generateCppClass -extern class JSPrimitiveWrapper extends JSObject { +extern class JSPrimitiveWrapper extends JSCustomElementsObject { value: JSAny; } @@ -531,8 +600,6 @@ extern class CallHandlerInfo extends Struct { data: Object; } -type ObjectHashTable extends FixedArray; - @abstract extern class Module extends HeapObject { exports: ObjectHashTable; @@ -569,9 +636,12 @@ extern class SourceTextModule extends Module { // Lazily initialized on first access. It's the hole before first access and // a JSObject afterwards. import_meta: TheHole | JSObject; - + async_parent_modules: ArrayList; + top_level_capability: JSPromise | Undefined; dfs_index: Smi; dfs_ancestor_index: Smi; + pending_async_dependencies: Smi; + flags: Smi; } @generateCppClass @@ -583,7 +653,8 @@ extern class SyntheticModule extends Module { @abstract @generateCppClass -extern class JSModuleNamespace extends JSObject { +@dirtyInstantiatedAbstractClass +extern class JSModuleNamespace extends JSSpecialObject { module: Module; } @@ -606,6 +677,7 @@ extern class JSWeakMap extends JSWeakCollection { } @generateCppClass +@abstract extern class JSCollectionIterator extends JSObject { // The backing hash table mapping keys to values. table: Object; @@ -613,6 +685,20 @@ extern class JSCollectionIterator extends JSObject { index: Object; } +@abstract extern class JSMapIterator extends JSCollectionIterator; +extern class JSMapKeyIterator extends JSMapIterator + generates 'TNode'; +extern class JSMapKeyValueIterator extends JSMapIterator + generates 'TNode'; +extern class JSMapValueIterator extends JSMapIterator + generates 'TNode'; + +@abstract extern class JSSetIterator extends JSCollectionIterator; +extern class JSSetKeyValueIterator extends JSSetIterator + generates 'TNode'; +extern class JSSetValueIterator extends JSSetIterator + generates 'TNode'; + extern class JSMessageObject extends JSObject { // Tagged fields. message_type: Smi; @@ -656,7 +742,7 @@ extern class Script extends Struct { line_ends: Object; id: Smi; eval_from_shared_or_wrapped_arguments: Object; - eval_from_position: Smi; + eval_from_position: Smi | Foreign; // Smi or Managed shared_function_infos: Object; flags: Smi; source_url: Object; @@ -669,12 +755,13 @@ extern class EmbedderDataArray extends HeapObject { length: Smi; } -type ScopeInfo extends HeapObject generates 'TNode'; +extern class ScopeInfo extends FixedArray; +@generateCppClass extern class PreparseData extends HeapObject { // TODO(v8:8983): Add declaration for variable-sized region. data_length: int32; - inner_length: int32; + children_length: int32; } extern class InterpreterData extends Struct { @@ -697,13 +784,36 @@ extern class SharedFunctionInfo extends HeapObject { @if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32; } +@abstract +@generateCppClass +extern class UncompiledData extends HeapObject { + inferred_name: String; + start_position: int32; + end_position: int32; +} + +@generateCppClass +extern class UncompiledDataWithoutPreparseData extends UncompiledData { +} + +@generateCppClass +extern class UncompiledDataWithPreparseData extends UncompiledData { + preparse_data: PreparseData; +} + +@abstract +@generateCppClass +@highestInstanceTypeWithinParentClassRange +extern class JSFunctionOrBoundFunction extends JSObject { +} + @generateCppClass -extern class JSBoundFunction extends JSObject { +extern class JSBoundFunction extends JSFunctionOrBoundFunction { // The wrapped function object. bound_target_function: Callable; // The value that is always passed as the this value when calling the wrapped // function. - bound_this: JSAny; + bound_this: JSAny | SourceTextModule; // A list of values whose elements are used as the first arguments to any call // to the wrapped function. bound_arguments: FixedArray; @@ -728,8 +838,6 @@ extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength( FixedArrayBase): intptr; type SloppyArgumentsElements extends FixedArray; -type NumberDictionary extends HeapObject - generates 'TNode'; extern class FreeSpace extends HeapObject { size: Smi; @@ -763,6 +871,8 @@ const PROXY_REVOCABLE_RESULT_MAP_INDEX: constexpr NativeContextSlot generates 'Context::PROXY_REVOCABLE_RESULT_MAP_INDEX'; const REFLECT_APPLY_INDEX: constexpr NativeContextSlot generates 'Context::REFLECT_APPLY_INDEX'; +const REGEXP_FUNCTION_INDEX: constexpr NativeContextSlot + generates 'Context::REGEXP_FUNCTION_INDEX'; const REGEXP_LAST_MATCH_INFO_INDEX: constexpr NativeContextSlot generates 'Context::REGEXP_LAST_MATCH_INFO_INDEX'; const INITIAL_STRING_ITERATOR_MAP_INDEX: constexpr NativeContextSlot @@ -834,7 +944,7 @@ extern class JSDate extends JSObject { cache_stamp: Undefined | Smi | NaN; } -extern class JSGlobalObject extends JSObject { +extern class JSGlobalObject extends JSSpecialObject { native_context: NativeContext; global_proxy: JSGlobalProxy; } @@ -847,9 +957,12 @@ extern class JSAsyncFromSyncIterator extends JSObject { next: Object; } +@generateCppClass extern class JSStringIterator extends JSObject { + // The [[IteratedString]] inobject property. string: String; - next_index: Smi; + // The [[StringIteratorNextIndex]] inobject property. + index: Smi; } @abstract @@ -885,7 +998,7 @@ extern class FunctionTemplateRareData extends Struct { @generateCppClass extern class FunctionTemplateInfo extends TemplateInfo { // Handler invoked when calling an instance of this FunctionTemplateInfo. - // Either CallInfoHandler or Undefined. + // Either CallHandlerInfo or Undefined. call_code: Object; class_name: Object; // If the signature is a FunctionTemplateInfo it is used to check whether the @@ -946,7 +1059,10 @@ const UTF16: const UTF32: constexpr UnicodeEncoding generates 'UnicodeEncoding::UTF32'; -extern class Foreign extends HeapObject { foreign_address: RawPtr; } +@apiExposedInstanceTypeValue(0x46) +extern class Foreign extends HeapObject { + foreign_address: RawPtr; +} @generateCppClass extern class InterceptorInfo extends Struct { @@ -985,6 +1101,7 @@ extern class Cell extends HeapObject { value: Object; } +@abstract extern class DataHandler extends Struct { smi_handler: Smi | Code; validity_cell: Smi | Cell; @@ -996,6 +1113,9 @@ extern class DataHandler extends Struct { @noVerifier weak data_3: Object; } +extern class LoadHandler extends DataHandler; +extern class StoreHandler extends DataHandler; + @abstract @dirtyInstantiatedAbstractClass @generateCppClass @@ -1087,7 +1207,7 @@ extern class ClassPositions extends Struct { end: Smi; } -type WasmInstanceObject extends JSObject; +extern class WasmInstanceObject extends JSObject; extern class WasmExportedFunctionData extends Struct { wrapper_code: Code; @@ -1129,6 +1249,7 @@ extern class WasmIndirectFunctionTable extends Struct { extern class WasmDebugInfo extends Struct { instance: WasmInstanceObject; interpreter_handle: Foreign | Undefined; + interpreter_reference_stack: Cell; locals_names: FixedArray | Undefined; c_wasm_entries: FixedArray | Undefined; c_wasm_entry_map: Foreign | Undefined; // Managed @@ -1305,9 +1426,6 @@ const kStrictReadOnlyProperty: constexpr MessageTemplate const kString: constexpr PrimitiveType generates 'PrimitiveType::kString'; -const kExternalPointerForOnHeapArray: constexpr RawPtr - generates 'JSTypedArray::ExternalPointerForOnHeapArray()'; - const kNameDictionaryInitialCapacity: constexpr int32 generates 'NameDictionary::kInitialCapacity'; @@ -1332,6 +1450,7 @@ extern macro EmptyStringConstant(): EmptyString; extern macro LengthStringConstant(): String; extern macro NanConstant(): NaN; extern macro IteratorSymbolConstant(): Symbol; +extern macro MatchSymbolConstant(): Symbol; const TheHole: TheHole = TheHoleConstant(); const Null: Null = NullConstant(); @@ -1443,15 +1562,30 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void; @hasSameInstanceTypeAsParent extern class JSRegExpResult extends JSArray { + // In-object properties: + // The below fields are externally exposed. index: JSAny; input: JSAny; groups: JSAny; + + // The below fields are for internal use only. + cached_indices_or_match_info: JSRegExpResultIndices | RegExpMatchInfo; + names: FixedArray | Undefined; } +@hasSameInstanceTypeAsParent +extern class JSRegExpResultIndices extends JSArray { + // In-object properties: + // The groups field is externally exposed. + groups: JSAny; +} + +transient type FastJSRegExpResult extends JSRegExpResult; + @generateCppClass extern class JSRegExpStringIterator extends JSObject { // The [[IteratingRegExp]] internal property. - iterating_reg_exp: JSAny; + iterating_reg_exp: JSReceiver; // The [[IteratedString]] internal property. iterated_string: String; flags: Smi; @@ -1493,21 +1627,33 @@ extern class AccessorInfo extends Struct { data: Object; } +@generateCppClass extern class AccessorPair extends Struct { getter: Object; setter: Object; } -extern class BreakPoint extends Tuple2 {} -extern class BreakPointInfo extends Tuple2 {} +@hasSameInstanceTypeAsParent +extern class BreakPoint extends Tuple2 { +} +@hasSameInstanceTypeAsParent +extern class BreakPointInfo extends Tuple2 { +} type CoverageInfo extends FixedArray; +@generateCppClass extern class DebugInfo extends Struct { - shared_function_info: SharedFunctionInfo; + shared: SharedFunctionInfo; debugger_hints: Smi; + // Script field from shared function info. script: Undefined | Script; + // The original uninstrumented bytecode array for functions with break + // points - the instrumented bytecode is held in the shared function info. original_bytecode_array: Undefined | BytecodeArray; + // The debug instrumented bytecode array for functions with break points + // - also pointed to by the shared function info. debug_bytecode_array: Undefined | BytecodeArray; + // Fixed array holding status information for each active break point. break_points: FixedArray; flags: Smi; coverage_info: CoverageInfo | Undefined; @@ -1527,12 +1673,15 @@ extern class FeedbackVector extends HeapObject { padding: uint32; } +@generateCppClass extern class FeedbackCell extends Struct { value: Undefined | FeedbackVector | FixedArray; interrupt_budget: int32; } -type AllocationSite extends Struct; +extern class FeedbackMetadata extends HeapObject; + +extern class AllocationSite extends Struct; extern class AllocationMemento extends Struct { allocation_site: AllocationSite; } @@ -1541,9 +1690,7 @@ extern class WasmModuleObject extends JSObject { native_module: Foreign; export_wrappers: FixedArray; script: Script; - weak_instance_list: WeakArrayList; asm_js_offset_table: ByteArray | Undefined; - break_point_infos: FixedArray | Undefined; } extern class WasmTableObject extends JSObject { @@ -1590,22 +1737,35 @@ extern class JSFinalizationGroup extends JSObject { flags: Smi; } +@generateCppClass extern class JSFinalizationGroupCleanupIterator extends JSObject { finalization_group: JSFinalizationGroup; } +@generateCppClass extern class WeakCell extends HeapObject { finalization_group: Undefined | JSFinalizationGroup; target: Undefined | JSReceiver; holdings: Object; + + // For storing doubly linked lists of WeakCells in JSFinalizationGroup's + // "active_cells" and "cleared_cells" lists. prev: Undefined | WeakCell; next: Undefined | WeakCell; + + // For storing doubly linked lists of WeakCells per key in + // JSFinalizationGroup's key-based hashmap. WeakCell also needs to know its + // key, so that we can remove the key from the key_map when we remove the last + // WeakCell associated with it. key: Object; key_list_prev: Undefined | WeakCell; key_list_next: Undefined | WeakCell; } -extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; } +@generateCppClass +extern class JSWeakRef extends JSObject { + target: Undefined | JSReceiver; +} extern class BytecodeArray extends FixedArrayBase { // TODO(v8:8983): bytecode array object sizes vary based on their contents. @@ -1620,6 +1780,29 @@ extern class BytecodeArray extends FixedArrayBase { bytecode_age: int8; } +extern class Filler extends HeapObject generates 'TNode'; +extern class CodeDataContainer extends HeapObject; +@abstract +extern class SmallOrderedHashTable extends HeapObject + generates 'TNode'; +extern class SmallOrderedHashMap extends SmallOrderedHashTable; +extern class SmallOrderedHashSet extends SmallOrderedHashTable; +extern class SmallOrderedNameDictionary extends SmallOrderedHashTable; + +// Various logical subclasses of JSObject, which have their own instance types +// but not their own class definitions: + +// Like JSObject, but created from API function. +@apiExposedInstanceTypeValue(0x420) +extern class JSApiObject extends JSObject generates 'TNode'; +// Like JSApiObject, but requires access checks and/or has interceptors. +@apiExposedInstanceTypeValue(0x410) +extern class JSSpecialApiObject extends JSSpecialObject + generates 'TNode'; +extern class JSContextExtensionObject extends JSObject + generates 'TNode'; +extern class JSError extends JSObject generates 'TNode'; + extern macro Is64(): constexpr bool; extern macro SelectBooleanConstant(bool): Boolean; @@ -1657,6 +1840,8 @@ extern transitioning builtin HasProperty(implicit context: Context)( extern transitioning macro HasProperty_Inline(implicit context: Context)( JSReceiver, JSAny): Boolean; extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny; +extern macro CollectCallFeedback( + JSAny, Context, Undefined | FeedbackVector, uintptr); extern macro ThrowRangeError(implicit context: Context)( constexpr MessageTemplate): never; @@ -1674,6 +1859,10 @@ extern macro ThrowTypeError(implicit context: Context)( constexpr MessageTemplate, Object, Object, Object): never; extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)( Smi, Object, Object): void; +extern transitioning runtime ThrowCalledNonCallable(implicit context: Context)( + JSAny): never; +extern transitioning runtime ThrowSymbolIteratorInvalid( + implicit context: Context)(): never; extern transitioning macro ThrowIfNotJSReceiver(implicit context: Context)( JSAny, constexpr MessageTemplate, constexpr string): void; @@ -2232,6 +2421,14 @@ Cast(o: HeapObject): JSStringIterator return HeapObjectToJSStringIterator(o) otherwise CastError; } +Cast(o: HeapObject): JSRegExpStringIterator + labels CastError { + if (IsJSRegExpStringIterator(o)) { + return %RawDownCast(o); + } + goto CastError; +} + Cast(o: HeapObject): JSTypedArray labels CastError { if (IsJSTypedArray(o)) return %RawDownCast(o); @@ -2354,12 +2551,25 @@ Cast(o: HeapObject): JSRegExp goto CastError; } +Cast(implicit context: Context)(o: HeapObject): + FastJSRegExpResult + labels CastError { + if (regexp::IsFastRegExpResult(o)) return %RawDownCast(o); + goto CastError; +} + Cast(implicit context: Context)(o: HeapObject): Map labels CastError { if (IsMap(o)) return %RawDownCast(o); goto CastError; } +Cast(implicit context: Context)(o: HeapObject): FeedbackVector + labels CastError { + if (IsFeedbackVector(o)) return %RawDownCast(o); + goto CastError; +} + Cast(o: HeapObject): JSPrimitiveWrapper labels CastError { if (IsJSPrimitiveWrapper(o)) return %RawDownCast(o); @@ -2513,6 +2723,7 @@ extern macro Signed(RawPtr): intptr; extern macro TruncateIntPtrToInt32(intptr): int32; extern macro SmiTag(intptr): Smi; extern macro SmiFromInt32(int32): Smi; +extern macro SmiFromUint32(uint32): Smi; extern macro SmiUntag(Smi): intptr; extern macro SmiToInt32(Smi): int32; extern macro RoundIntPtrToFloat64(intptr): float64; @@ -2556,6 +2767,7 @@ extern macro BitcastWordToTaggedSigned(uintptr): Smi; extern macro BitcastWordToTagged(intptr): Object; extern macro BitcastWordToTagged(uintptr): Object; extern macro BitcastTaggedToWord(Tagged): intptr; +extern macro BitcastTaggedToWordForTagAndSmiBits(Tagged): intptr; intrinsic %FromConstexpr(b: From): To; macro FromConstexpr(o: From): To; @@ -2674,7 +2886,7 @@ Convert(ui: uint32): Number { return ChangeUint32ToTagged(ui); } Convert(ui: uint32): Smi { - return SmiFromInt32(Signed(ui)); + return SmiFromUint32(ui); } Convert(ui: uint32): uintptr { return ChangeUint32ToWord(ui); @@ -2811,8 +3023,7 @@ extern macro IsMockArrayBufferAllocatorFlag(): bool; extern macro IsPrototypeTypedArrayPrototype(implicit context: Context)(Map): bool; -extern operator '.data_ptr' macro LoadJSTypedArrayBackingStore(JSTypedArray): - RawPtr; +extern operator '.data_ptr' macro LoadJSTypedArrayDataPtr(JSTypedArray): RawPtr; extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind; extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray): @@ -2879,8 +3090,6 @@ extern macro LoadConstructorOrBackPointer(Map): Object; extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): JSAny labels NotData, IfHole; -extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, JSAny) - labels NotData, IfHole, ReadOnly; extern macro IsFastElementsKind(ElementsKind): bool; extern macro IsDoubleElementsKind(ElementsKind): bool; @@ -3255,9 +3464,11 @@ extern macro PerformStackCheck(implicit context: Context)(): void; extern macro IsCallable(HeapObject): bool; extern macro IsConstructor(HeapObject): bool; +extern macro IsFeedbackVector(HeapObject): bool; extern macro IsJSArray(HeapObject): bool; extern macro IsJSProxy(HeapObject): bool; extern macro IsJSRegExp(HeapObject): bool; +extern macro IsJSRegExpStringIterator(HeapObject): bool; extern macro IsMap(HeapObject): bool; extern macro IsJSFunction(HeapObject): bool; extern macro IsJSObject(HeapObject): bool; diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq index a1b1cb67809d84..f0409ad23df13f 100644 --- a/deps/v8/src/builtins/bigint.tq +++ b/deps/v8/src/builtins/bigint.tq @@ -7,7 +7,8 @@ // TODO(nicohartmann): Discuss whether types used by multiple builtins should be // in global namespace @noVerifier -extern class BigIntBase extends HeapObject generates 'TNode' { +extern class BigIntBase extends PrimitiveHeapObject + generates 'TNode' { } type BigInt extends BigIntBase; @@ -44,9 +45,6 @@ namespace bigint { MutableBigInt, intptr, uintptr): void; extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr; - @export // Silence unused warning. - // TODO(szuend): Remove @export once macros that are only used in - // asserts are no longer detected as unused. macro IsCanonicalized(bigint: BigIntBase): bool { const length = ReadBigIntLength(bigint); diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc index c4399175e9846d..fb6169adf86b4b 100644 --- a/deps/v8/src/builtins/builtins-arguments-gen.cc +++ b/deps/v8/src/builtins/builtins-arguments-gen.cc @@ -17,38 +17,34 @@ namespace v8 { namespace internal { -using Node = compiler::Node; - -std::tuple -ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, - Node* arguments_count, - Node* parameter_map_count, - ParameterMode mode, - int base_size) { +ArgumentsBuiltinsAssembler::ArgumentsAllocationResult +ArgumentsBuiltinsAssembler::AllocateArgumentsObject( + TNode map, TNode arguments_count, + TNode parameter_map_count, int base_size) { // Allocate the parameter object (either a Rest parameter object, a strict // argument object or a sloppy arguments object) and the elements/mapped // arguments together. int elements_offset = base_size; - Node* element_count = arguments_count; + TNode element_count = arguments_count; if (parameter_map_count != nullptr) { base_size += FixedArray::kHeaderSize; - element_count = IntPtrOrSmiAdd(element_count, parameter_map_count, mode); + element_count = IntPtrOrSmiAdd(element_count, parameter_map_count); } - bool empty = IsIntPtrOrSmiConstantZero(arguments_count, mode); + bool empty = IsIntPtrOrSmiConstantZero(arguments_count); DCHECK_IMPLIES(empty, parameter_map_count == nullptr); TNode size = empty ? IntPtrConstant(base_size) - : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, mode, + : ElementOffsetFromIndex(element_count, PACKED_ELEMENTS, base_size + FixedArray::kHeaderSize); TNode result = Allocate(size); Comment("Initialize arguments object"); StoreMapNoWriteBarrier(result, map); TNode empty_fixed_array = EmptyFixedArrayConstant(); StoreObjectField(result, JSArray::kPropertiesOrHashOffset, empty_fixed_array); - TNode smi_arguments_count = ParameterToTagged(arguments_count, mode); + TNode smi_arguments_count = BIntToSmi(arguments_count); StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, smi_arguments_count); - Node* arguments = nullptr; + TNode arguments; if (!empty) { arguments = InnerAllocate(result, elements_offset); StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset, @@ -56,18 +52,17 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, TNode fixed_array_map = FixedArrayMapConstant(); StoreMapNoWriteBarrier(arguments, fixed_array_map); } - Node* parameter_map = nullptr; - if (parameter_map_count != nullptr) { + TNode parameter_map; + if (!parameter_map_count.is_null()) { TNode parameter_map_offset = ElementOffsetFromIndex( - arguments_count, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize); - parameter_map = InnerAllocate(CAST(arguments), parameter_map_offset); + arguments_count, PACKED_ELEMENTS, FixedArray::kHeaderSize); + parameter_map = InnerAllocate(arguments, parameter_map_offset); StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, parameter_map); TNode sloppy_elements_map = SloppyArgumentsElementsMapConstant(); StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map); - parameter_map_count = ParameterToTagged(parameter_map_count, mode); StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset, - parameter_map_count); + BIntToSmi(parameter_map_count)); } else { if (empty) { StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, @@ -77,80 +72,73 @@ ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map, arguments); } } - return std::tuple(result, arguments, parameter_map); + return {CAST(result), UncheckedCast(arguments), + UncheckedCast(parameter_map)}; } -Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs( - Node* map, Node* frame_ptr, Node* arg_count, Node* first_arg, - Node* rest_count, ParameterMode param_mode, int base_size) { +TNode ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs( + TNode map, TNode frame_ptr, TNode arg_count, + TNode first_arg, TNode rest_count, int base_size) { // Allocate the parameter object (either a Rest parameter object, a strict // argument object or a sloppy arguments object) and the elements together and // fill in the contents with the arguments above |formal_parameter_count|. - Node* result; - Node* elements; - Node* unused; - std::tie(result, elements, unused) = - AllocateArgumentsObject(map, rest_count, nullptr, param_mode, base_size); - DCHECK_NULL(unused); - CodeStubArguments arguments(this, arg_count, frame_ptr, param_mode); - VARIABLE(offset, MachineType::PointerRepresentation()); - offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag)); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(map, rest_count, {}, base_size); + DCHECK(alloc_result.parameter_map.is_null()); + CodeStubArguments arguments(this, arg_count, frame_ptr); + TVARIABLE(IntPtrT, offset, + IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag)); VariableList list({&offset}, zone()); arguments.ForEach( list, - [this, elements, &offset](Node* arg) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, - offset.value(), arg); + [&](TNode arg) { + StoreNoWriteBarrier(MachineRepresentation::kTagged, + alloc_result.elements, offset.value(), arg); Increment(&offset, kTaggedSize); }, - first_arg, nullptr, param_mode); - return result; + first_arg); + return alloc_result.arguments_object; } -Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context, - Node* function) { +TNode ArgumentsBuiltinsAssembler::EmitFastNewRestParameter( + TNode context, TNode function) { ParameterMode mode = OptimalParameterMode(); - Node* zero = IntPtrOrSmiConstant(0, mode); + TNode zero = BIntConstant(0); - TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( - CAST(context), UncheckedCast(function)); + TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(JSObject, result); Label no_rest_parameters(this), runtime(this, Label::kDeferred), done(this, &result); - Node* rest_count = - IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count, mode); + TNode rest_count = + IntPtrOrSmiSub(info.argument_count, info.formal_parameter_count); TNode const native_context = LoadNativeContext(context); TNode const array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode), - &no_rest_parameters); + GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero), &no_rest_parameters); GotoIfFixedArraySizeDoesntFitInNewSpace( rest_count, &runtime, JSArray::kSize + FixedArray::kHeaderSize, mode); // Allocate the Rest JSArray and the elements together and fill in the // contents with the arguments above |formal_parameter_count|. - result.Bind(ConstructParametersObjectFromArgs( + result = ConstructParametersObjectFromArgs( array_map, info.frame, info.argument_count, info.formal_parameter_count, - rest_count, mode, JSArray::kSize)); + rest_count, JSArray::kSize); Goto(&done); BIND(&no_rest_parameters); { - Node* arguments; - Node* elements; - Node* unused; - std::tie(arguments, elements, unused) = - AllocateArgumentsObject(array_map, zero, nullptr, mode, JSArray::kSize); - result.Bind(arguments); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(array_map, zero, {}, JSArray::kSize); + result = alloc_result.arguments_object; Goto(&done); } BIND(&runtime); { - result.Bind(CallRuntime(Runtime::kNewRestParameter, context, function)); + result = CAST(CallRuntime(Runtime::kNewRestParameter, context, function)); Goto(&done); } @@ -158,45 +146,41 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context, return result.value(); } -Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context, - Node* function) { - VARIABLE(result, MachineRepresentation::kTagged); +TNode ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments( + TNode context, TNode function) { + TVARIABLE(JSObject, result); Label done(this, &result), empty(this), runtime(this, Label::kDeferred); ParameterMode mode = OptimalParameterMode(); TNode zero = BIntConstant(0); - TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( - CAST(context), UncheckedCast(function)); + TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function); GotoIfFixedArraySizeDoesntFitInNewSpace( info.argument_count, &runtime, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode); TNode const native_context = LoadNativeContext(context); - TNode const map = - LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX); + TNode map = CAST( + LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX)); GotoIf(BIntEqual(info.argument_count, zero), &empty); - result.Bind(ConstructParametersObjectFromArgs( - map, info.frame, info.argument_count, zero, info.argument_count, mode, - JSStrictArgumentsObject::kSize)); + result = ConstructParametersObjectFromArgs( + map, info.frame, info.argument_count, zero, info.argument_count, + JSStrictArgumentsObject::kSize); Goto(&done); BIND(&empty); { - Node* arguments; - Node* elements; - Node* unused; - std::tie(arguments, elements, unused) = AllocateArgumentsObject( - map, zero, nullptr, mode, JSStrictArgumentsObject::kSize); - result.Bind(arguments); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(map, zero, {}, JSStrictArgumentsObject::kSize); + result = alloc_result.arguments_object; Goto(&done); } BIND(&runtime); { - result.Bind(CallRuntime(Runtime::kNewStrictArguments, context, function)); + result = CAST(CallRuntime(Runtime::kNewStrictArguments, context, function)); Goto(&done); } @@ -204,9 +188,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context, return result.value(); } -Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, - Node* function) { - VARIABLE(result, MachineRepresentation::kTagged); +TNode ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments( + TNode context, TNode function) { + TVARIABLE(JSObject, result); ParameterMode mode = OptimalParameterMode(); TNode zero = BIntConstant(0); @@ -214,8 +198,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, Label done(this, &result), empty(this), no_parameters(this), runtime(this, Label::kDeferred); - TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount( - CAST(context), UncheckedCast(function)); + TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(context, function); GotoIf(BIntEqual(info.argument_count, zero), &empty); @@ -224,54 +207,55 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, { Comment("Mapped parameter JSSloppyArgumentsObject"); - Node* mapped_count = - IntPtrOrSmiMin(info.argument_count, info.formal_parameter_count, mode); + TNode mapped_count = + IntPtrOrSmiMin(info.argument_count, info.formal_parameter_count); - Node* parameter_map_size = - IntPtrOrSmiAdd(mapped_count, IntPtrOrSmiConstant(2, mode), mode); + TNode parameter_map_size = + IntPtrOrSmiAdd(mapped_count, BIntConstant(2)); // Verify that the overall allocation will fit in new space. - Node* elements_allocated = - IntPtrOrSmiAdd(info.argument_count, parameter_map_size, mode); + TNode elements_allocated = + IntPtrOrSmiAdd(info.argument_count, parameter_map_size); GotoIfFixedArraySizeDoesntFitInNewSpace( elements_allocated, &runtime, JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode); TNode const native_context = LoadNativeContext(context); - TNode const map = LoadContextElement( - native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); - Node* argument_object; - Node* elements; - Node* map_array; - std::tie(argument_object, elements, map_array) = + TNode const map = CAST(LoadContextElement( + native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)); + ArgumentsAllocationResult alloc_result = AllocateArgumentsObject(map, info.argument_count, parameter_map_size, - mode, JSSloppyArgumentsObject::kSize); - StoreObjectFieldNoWriteBarrier( - argument_object, JSSloppyArgumentsObject::kCalleeOffset, function); - StoreFixedArrayElement(CAST(map_array), 0, context, SKIP_WRITE_BARRIER); - StoreFixedArrayElement(CAST(map_array), 1, elements, SKIP_WRITE_BARRIER); + JSSloppyArgumentsObject::kSize); + StoreObjectFieldNoWriteBarrier(alloc_result.arguments_object, + JSSloppyArgumentsObject::kCalleeOffset, + function); + StoreFixedArrayElement(alloc_result.parameter_map, 0, context, + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(alloc_result.parameter_map, 1, alloc_result.elements, + SKIP_WRITE_BARRIER); Comment("Fill in non-mapped parameters"); TNode argument_offset = - ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, mode, + ElementOffsetFromIndex(info.argument_count, PACKED_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag); TNode mapped_offset = - ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, mode, + ElementOffsetFromIndex(mapped_count, PACKED_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag); - CodeStubArguments arguments(this, info.argument_count, info.frame, mode); - VARIABLE(current_argument, MachineType::PointerRepresentation()); - current_argument.Bind(arguments.AtIndexPtr(info.argument_count, mode)); + CodeStubArguments arguments(this, info.argument_count, info.frame); + TVARIABLE(RawPtrT, current_argument, + arguments.AtIndexPtr(info.argument_count)); VariableList var_list1({¤t_argument}, zone()); - mapped_offset = UncheckedCast(BuildFastLoop( + mapped_offset = BuildFastLoop( var_list1, argument_offset, mapped_offset, - [this, elements, ¤t_argument](Node* offset) { + [&](TNode offset) { Increment(¤t_argument, kSystemPointerSize); TNode arg = LoadBufferObject( - UncheckedCast(current_argument.value()), 0); - StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, - arg); + ReinterpretCast(current_argument.value()), 0); + StoreNoWriteBarrier(MachineRepresentation::kTagged, + alloc_result.elements, offset, arg); + return; }, - -kTaggedSize, INTPTR_PARAMETERS)); + -kTaggedSize); // Copy the parameter slots and the holes in the arguments. // We need to fill in mapped_count slots. They index the context, @@ -282,32 +266,32 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, // MIN_CONTEXT_SLOTS+argument_count-mapped_count // We loop from right to left. Comment("Fill in mapped parameters"); - VARIABLE(context_index, OptimalParameterRepresentation()); - context_index.Bind(IntPtrOrSmiSub( - IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode), - info.formal_parameter_count, mode), - mapped_count, mode)); + TVARIABLE( + BInt, context_index, + IntPtrOrSmiSub(IntPtrOrSmiAdd(BIntConstant(Context::MIN_CONTEXT_SLOTS), + info.formal_parameter_count), + mapped_count)); TNode the_hole = TheHoleConstant(); VariableList var_list2({&context_index}, zone()); const int kParameterMapHeaderSize = FixedArray::OffsetOfElementAt(2); TNode adjusted_map_array = IntPtrAdd( - BitcastTaggedToWord(map_array), + BitcastTaggedToWord(alloc_result.parameter_map), IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize)); TNode zero_offset = ElementOffsetFromIndex( zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag); - BuildFastLoop( + BuildFastLoop( var_list2, mapped_offset, zero_offset, - [=, &context_index](Node* offset) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, - the_hole); + [&](TNode offset) { + StoreNoWriteBarrier(MachineRepresentation::kTagged, + alloc_result.elements, offset, the_hole); StoreNoWriteBarrier(MachineRepresentation::kTagged, adjusted_map_array, offset, - ParameterToTagged(context_index.value(), mode)); - Increment(&context_index, 1, mode); + BIntToSmi(context_index.value())); + Increment(&context_index); }, - -kTaggedSize, INTPTR_PARAMETERS); + -kTaggedSize); - result.Bind(argument_object); + result = alloc_result.arguments_object; Goto(&done); } @@ -318,11 +302,11 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, info.argument_count, &runtime, JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode); TNode const native_context = LoadNativeContext(context); - TNode const map = - LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX); - result.Bind(ConstructParametersObjectFromArgs( - map, info.frame, info.argument_count, zero, info.argument_count, mode, - JSSloppyArgumentsObject::kSize)); + TNode map = CAST(LoadContextElement( + native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); + result = ConstructParametersObjectFromArgs( + map, info.frame, info.argument_count, zero, info.argument_count, + JSSloppyArgumentsObject::kSize); StoreObjectFieldNoWriteBarrier( result.value(), JSSloppyArgumentsObject::kCalleeOffset, function); Goto(&done); @@ -332,14 +316,11 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, { Comment("Empty JSSloppyArgumentsObject"); TNode const native_context = LoadNativeContext(context); - TNode const map = - LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX); - Node* arguments; - Node* elements; - Node* unused; - std::tie(arguments, elements, unused) = AllocateArgumentsObject( - map, zero, nullptr, mode, JSSloppyArgumentsObject::kSize); - result.Bind(arguments); + TNode const map = CAST(LoadContextElement( + native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX)); + ArgumentsAllocationResult alloc_result = + AllocateArgumentsObject(map, zero, {}, JSSloppyArgumentsObject::kSize); + result = alloc_result.arguments_object; StoreObjectFieldNoWriteBarrier( result.value(), JSSloppyArgumentsObject::kCalleeOffset, function); Goto(&done); @@ -347,7 +328,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, BIND(&runtime); { - result.Bind(CallRuntime(Runtime::kNewSloppyArguments, context, function)); + result = CAST(CallRuntime(Runtime::kNewSloppyArguments, context, function)); Goto(&done); } diff --git a/deps/v8/src/builtins/builtins-arguments-gen.h b/deps/v8/src/builtins/builtins-arguments-gen.h index 4eeae4bf866482..2565c3e81ff910 100644 --- a/deps/v8/src/builtins/builtins-arguments-gen.h +++ b/deps/v8/src/builtins/builtins-arguments-gen.h @@ -10,7 +10,7 @@ namespace v8 { namespace internal { -using Node = compiler::Node; +// TODO(v8:9396): these declarations pollute the v8::internal scope. using CodeAssemblerState = compiler::CodeAssemblerState; using CodeAssemblerLabel = compiler::CodeAssemblerLabel; @@ -19,19 +19,25 @@ class ArgumentsBuiltinsAssembler : public CodeStubAssembler { explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state) : CodeStubAssembler(state) {} - Node* EmitFastNewStrictArguments(Node* context, Node* function); - Node* EmitFastNewSloppyArguments(Node* context, Node* function); - Node* EmitFastNewRestParameter(Node* context, Node* function); + TNode EmitFastNewStrictArguments(TNode context, + TNode function); + TNode EmitFastNewSloppyArguments(TNode context, + TNode function); + TNode EmitFastNewRestParameter(TNode context, + TNode function); private: + struct ArgumentsAllocationResult { + TNode arguments_object; + TNode elements; + TNode parameter_map; + }; // Allocates an an arguments (either rest, strict or sloppy) together with the // FixedArray elements for the arguments and a parameter map (for sloppy - // arguments only). A tuple is returned with pointers to the arguments object, - // the elements and parameter map in the form: - // - std::tuple AllocateArgumentsObject( - Node* map, Node* arguments, Node* mapped_arguments, - ParameterMode param_mode, int base_size); + // arguments only, or empty TNode<> otherwise). + ArgumentsAllocationResult AllocateArgumentsObject( + TNode map, TNode arguments, TNode mapped_arguments, + int base_size); // For Rest parameters and Strict arguments, the copying of parameters from // the stack into the arguments object is straight-forward and shares much of @@ -40,11 +46,9 @@ class ArgumentsBuiltinsAssembler : public CodeStubAssembler { // and then copies |rest_count| arguments from the stack frame pointed to by // |frame_ptr| starting from |first_arg|. |arg_count| == |first_arg| + // |rest_count|. - Node* ConstructParametersObjectFromArgs(Node* map, Node* frame_ptr, - Node* arg_count, Node* first_arg, - Node* rest_count, - ParameterMode param_mode, - int base_size); + TNode ConstructParametersObjectFromArgs( + TNode map, TNode frame_ptr, TNode arg_count, + TNode first_arg, TNode rest_count, int base_size); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index c7d8eb009125da..f176924ae5869b 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -25,10 +25,9 @@ using IteratorRecord = TorqueStructIteratorRecord; ArrayBuiltinsAssembler::ArrayBuiltinsAssembler( compiler::CodeAssemblerState* state) : CodeStubAssembler(state), - k_(this, MachineRepresentation::kTagged), - a_(this, MachineRepresentation::kTagged), - to_(this, MachineRepresentation::kTagged, SmiConstant(0)), - fully_spec_compliant_(this, {&k_, &a_, &to_}) {} + k_(this), + a_(this), + fully_spec_compliant_(this, {&k_, &a_}) {} void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() { // 6. Let A be ? TypedArraySpeciesCreate(O, len). @@ -44,14 +43,16 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() { LoadJSTypedArrayLength(a))); fast_typed_array_target_ = Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a)); - a_.Bind(a); + a_ = a; } // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map. -Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { +TNode ArrayBuiltinsAssembler::TypedArrayMapProcessor( + TNode k_value, TNode k) { // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »). - Node* mapped_value = CallJS(CodeFactory::Call(isolate()), context(), - callbackfn(), this_arg(), k_value, k, o()); + TNode mapped_value = + CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(), + k_value, k, o()); Label fast(this), slow(this), done(this), detached(this, Label::kDeferred); // 8. d. Perform ? Set(A, Pk, mapped_value, true). @@ -65,7 +66,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let // numValue be ? ToBigInt(v). // 6. Otherwise, let numValue be ? ToNumber(value). - Node* num_value; + TNode num_value; if (source_elements_kind_ == BIGINT64_ELEMENTS || source_elements_kind_ == BIGUINT64_ELEMENTS) { num_value = ToBigInt(context(), mapped_value); @@ -78,7 +79,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { Goto(&done); BIND(&slow); - SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value)); + SetPropertyStrict(context(), a(), k, mapped_value); Goto(&done); BIND(&detached); @@ -90,32 +91,7 @@ Node* ArrayBuiltinsAssembler::TypedArrayMapProcessor(Node* k_value, Node* k) { return a(); } -void ArrayBuiltinsAssembler::NullPostLoopAction() {} - -void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero(TNode array, - TNode smi_length) { - CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array))); - - TNode length = SmiToIntPtr(smi_length); - TNode byte_length = TimesTaggedSize(length); - CSA_ASSERT(this, UintPtrLessThan(length, byte_length)); - - static const int32_t fa_base_data_offset = - FixedArray::kHeaderSize - kHeapObjectTag; - TNode backing_store = IntPtrAdd(BitcastTaggedToWord(array), - IntPtrConstant(fa_base_data_offset)); - - // Call out to memset to perform initialization. - TNode memset = - ExternalConstant(ExternalReference::libc_memset_function()); - STATIC_ASSERT(kSizetSize == kIntptrSize); - CallCFunction(memset, MachineType::Pointer(), - std::make_pair(MachineType::Pointer(), backing_store), - std::make_pair(MachineType::IntPtr(), IntPtrConstant(0)), - std::make_pair(MachineType::UintPtr(), byte_length)); -} - -void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) { +void ArrayBuiltinsAssembler::ReturnFromBuiltin(TNode value) { if (argc_ == nullptr) { Return(value); } else { @@ -126,8 +102,8 @@ void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) { } void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody( - TNode context, TNode receiver, Node* callbackfn, - Node* this_arg, TNode argc) { + TNode context, TNode receiver, TNode callbackfn, + TNode this_arg, TNode argc) { context_ = context; receiver_ = receiver; callbackfn_ = callbackfn; @@ -137,8 +113,7 @@ void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinBody( void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( const char* name, const BuiltinResultGenerator& generator, - const CallResultProcessor& processor, const PostLoopAction& action, - ForEachDirection direction) { + const CallResultProcessor& processor, ForEachDirection direction) { name_ = name; // ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray @@ -160,7 +135,7 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( Label throw_not_callable(this, Label::kDeferred); Label distinguish_types(this); GotoIf(TaggedIsSmi(callbackfn_), &throw_not_callable); - Branch(IsCallableMap(LoadMap(callbackfn_)), &distinguish_types, + Branch(IsCallableMap(LoadMap(CAST(callbackfn_))), &distinguish_types, &throw_not_callable); BIND(&throw_not_typed_array); @@ -192,9 +167,9 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( generator(this); if (direction == ForEachDirection::kForward) { - k_.Bind(SmiConstant(0)); + k_ = SmiConstant(0); } else { - k_.Bind(NumberDec(len())); + k_ = NumberDec(len()); } CSA_ASSERT(this, IsSafeInteger(k())); TNode elements_kind = LoadMapElementsKind(typed_array_map); @@ -214,26 +189,18 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody( Goto(&done); // No exception, return success BIND(&done); - action(this); ReturnFromBuiltin(a_.value()); } } void ArrayBuiltinsAssembler::VisitAllTypedArrayElements( - Node* array_buffer, const CallResultProcessor& processor, Label* detached, - ForEachDirection direction, TNode typed_array) { - VariableList list({&a_, &k_, &to_}, zone()); - - FastLoopBody body = [&](Node* index) { - GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached); - TNode data_ptr = LoadJSTypedArrayBackingStore(typed_array); - auto value = LoadFixedTypedArrayElementAsTagged( - data_ptr, index, source_elements_kind_, SMI_PARAMETERS); - k_.Bind(index); - a_.Bind(processor(this, value, index)); - }; - Node* start = SmiConstant(0); - Node* end = len_; + TNode array_buffer, const CallResultProcessor& processor, + Label* detached, ForEachDirection direction, + TNode typed_array) { + VariableList list({&a_, &k_}, zone()); + + TNode start = SmiConstant(0); + TNode end = CAST(len_); IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost; int incr = 1; if (direction == ForEachDirection::kReverse) { @@ -241,54 +208,17 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements( advance_mode = IndexAdvanceMode::kPre; incr = -1; } - BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS, - advance_mode); -} - -// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate). -void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode len) { - Label runtime(this, Label::kDeferred), done(this); - - TNode const original_map = LoadMap(o()); - GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(original_map), JS_ARRAY_TYPE), - &runtime); - - GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map), - &runtime); - - TNode species_protector = ArraySpeciesProtectorConstant(); - TNode value = - LoadObjectField(species_protector, PropertyCell::kValueOffset); - TNode const protector_invalid = SmiConstant(Isolate::kProtectorInvalid); - GotoIf(TaggedEqual(value, protector_invalid), &runtime); - - GotoIfNot(TaggedIsPositiveSmi(len), &runtime); - GotoIfNot(IsValidFastJSArrayCapacity(len, CodeStubAssembler::SMI_PARAMETERS), - &runtime); - - // We need to be conservative and start with holey because the builtins - // that create output arrays aren't guaranteed to be called for every - // element in the input array (maybe the callback deletes an element). - const ElementsKind elements_kind = - GetHoleyElementsKind(GetInitialFastElementsKind()); - TNode native_context = LoadNativeContext(context()); - TNode array_map = LoadJSArrayElementsMap(elements_kind, native_context); - a_.Bind(AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, len, CAST(len), - nullptr, CodeStubAssembler::SMI_PARAMETERS, - kAllowLargeObjectAllocation)); - - Goto(&done); - - BIND(&runtime); - { - // 5. Let A be ? ArraySpeciesCreate(O, len). - TNode constructor = - CAST(CallRuntime(Runtime::kArraySpeciesConstructor, context(), o())); - a_.Bind(Construct(context(), constructor, len)); - Goto(&fully_spec_compliant_); - } - - BIND(&done); + BuildFastLoop( + list, start, end, + [&](TNode index) { + GotoIf(IsDetachedBuffer(array_buffer), detached); + TNode data_ptr = LoadJSTypedArrayDataPtr(typed_array); + TNode value = LoadFixedTypedArrayElementAsTagged( + data_ptr, index, source_elements_kind_, SMI_PARAMETERS); + k_ = index; + a_ = processor(this, value, index); + }, + incr, advance_mode); } TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { @@ -297,7 +227,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget))); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode receiver = args.GetReceiver(); Label runtime(this, Label::kDeferred); @@ -315,9 +245,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { BIND(&fast); { TNode array_receiver = CAST(receiver); - CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver))); - TNode length = - LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset); + TNode length = SmiUntag(LoadFastJSArrayLength(array_receiver)); Label return_undefined(this), fast_elements(this); GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined); @@ -394,14 +322,12 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { Label double_transition(this); Label runtime(this, Label::kDeferred); - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. TNode argc = UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); TNode context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget))); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode receiver = args.GetReceiver(); TNode array_receiver; TNode kind; @@ -493,9 +419,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { BIND(&default_label); { args.ForEach( - [this, array_receiver, context](Node* arg) { + [=](TNode arg) { TNode length = LoadJSArrayLength(array_receiver); - SetPropertyStrict(context, array_receiver, length, CAST(arg)); + SetPropertyStrict(context, array_receiver, length, arg); }, arg_index.value()); args.PopAndReturn(LoadJSArrayLength(array_receiver)); @@ -515,11 +441,10 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) { ParameterMode mode = OptimalParameterMode(); TNode context = CAST(Parameter(Descriptor::kContext)); - Node* array = Parameter(Descriptor::kSource); + TNode array = CAST(Parameter(Descriptor::kSource)); Node* begin = TaggedToParameter(Parameter(Descriptor::kBegin), mode); Node* count = TaggedToParameter(Parameter(Descriptor::kCount), mode); - CSA_ASSERT(this, IsJSArray(array)); CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid())); Return(ExtractFastJSArray(context, array, begin, count, mode)); @@ -555,7 +480,7 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) { Word32BinaryNot(IsNoElementsProtectorCellInvalid()))); ParameterMode mode = OptimalParameterMode(); - Return(CloneFastJSArray(context, array, mode, nullptr, + Return(CloneFastJSArray(context, array, mode, {}, HoleConversionMode::kConvertToUndefined)); } @@ -584,9 +509,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler { TNode array_map = CAST(LoadContextElement( context, Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX)); - array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, SmiConstant(0), - SmiConstant(0), nullptr, - ParameterMode::SMI_PARAMETERS); + array = + AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, SmiConstant(0), + SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS); Goto(&done); } @@ -626,7 +551,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) { TNode argc = UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode items = args.GetOptionalArgumentValue(0); TNode receiver = args.GetReceiver(); @@ -810,8 +735,8 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) { GotoIf(IsUndefined(map_function), &next); CSA_ASSERT(this, IsCallable(CAST(map_function))); - value = CAST(CallJS(CodeFactory::Call(isolate()), context, map_function, - this_arg, value.value(), index.value())); + value = CallJS(CodeFactory::Call(isolate()), context, map_function, + this_arg, value.value(), index.value()); Goto(&next); BIND(&next); } @@ -846,8 +771,7 @@ TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) { GenerateIteratingTypedArrayBuiltinBody( "%TypedArray%.prototype.map", &ArrayBuiltinsAssembler::TypedArrayMapResultGenerator, - &ArrayBuiltinsAssembler::TypedArrayMapProcessor, - &ArrayBuiltinsAssembler::NullPostLoopAction); + &ArrayBuiltinsAssembler::TypedArrayMapProcessor); } TF_BUILTIN(ArrayIsArray, CodeStubAssembler) { @@ -884,15 +808,25 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler { void Generate(SearchVariant variant, TNode argc, TNode context); - void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements, - TNode search_element, Node* array_length, - Node* from_index); - void GeneratePackedDoubles(SearchVariant variant, Node* elements, - Node* search_element, Node* array_length, - Node* from_index); - void GenerateHoleyDoubles(SearchVariant variant, Node* elements, - Node* search_element, Node* array_length, - Node* from_index); + void GenerateSmiOrObject(SearchVariant variant, TNode context, + TNode elements, + TNode search_element, + TNode array_length, TNode from_index); + void GeneratePackedDoubles(SearchVariant variant, + TNode elements, + TNode search_element, + TNode array_length, TNode from_index); + void GenerateHoleyDoubles(SearchVariant variant, + TNode elements, + TNode search_element, + TNode array_length, TNode from_index); + + void ReturnIfEmpty(TNode length, TNode value) { + Label done(this); + GotoIf(SmiGreaterThan(length, SmiConstant(0)), &done); + Return(value); + BIND(&done); + } }; void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, @@ -916,7 +850,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, BranchIfFastJSArrayForRead(receiver, context, &init_index, &call_runtime); BIND(&init_index); - VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero); + TVARIABLE(IntPtrT, index_var, intptr_zero); TNode array = CAST(receiver); // JSArray length is always a positive Smi for fast arrays. @@ -946,14 +880,14 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, BIND(&is_smi); { TNode intptr_start_from = SmiUntag(CAST(start_from)); - index_var.Bind(intptr_start_from); + index_var = intptr_start_from; GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done); // The fromIndex is negative: add it to the array's length. - index_var.Bind(IntPtrAdd(array_length_untagged, index_var.value())); + index_var = IntPtrAdd(array_length_untagged, index_var.value()); // Clamp negative results at zero. GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done); - index_var.Bind(intptr_zero); + index_var = intptr_zero; Goto(&done); } BIND(&done); @@ -1031,8 +965,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, BIND(&call_runtime); { - TNode start_from = - args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant()); + TNode start_from = args.GetOptionalArgumentValue(kFromIndexArg); Runtime::FunctionId function = variant == kIncludes ? Runtime::kArrayIncludes_Slow : Runtime::kArrayIndexOf; @@ -1042,8 +975,9 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant, } void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( - SearchVariant variant, Node* context, Node* elements, - TNode search_element, Node* array_length, Node* from_index) { + SearchVariant variant, TNode context, TNode elements, + TNode search_element, TNode array_length, + TNode from_index) { TVARIABLE(IntPtrT, index_var, SmiUntag(from_index)); TVARIABLE(Float64T, search_num); TNode array_length_untagged = SmiUntag(array_length); @@ -1077,7 +1011,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(TaggedEqual(element_k, search_element), &return_found); Increment(&index_var); @@ -1090,7 +1024,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(IsUndefined(element_k), &return_found); GotoIf(IsTheHole(element_k), &return_found); @@ -1110,7 +1044,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIfNot(TaggedIsSmi(element_k), ¬_smi); Branch(Float64Equal(search_num.value(), SmiToFloat64(CAST(element_k))), &return_found, &continue_loop); @@ -1133,7 +1067,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(TaggedIsSmi(element_k), &continue_loop); GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop); BranchIfFloat64IsNaN(LoadHeapNumberValue(CAST(element_k)), &return_found, @@ -1157,7 +1091,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); TNode element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); GotoIf(TaggedIsSmi(element_k), &continue_loop); GotoIf(TaggedEqual(search_element_string, element_k), &return_found); TNode element_k_type = LoadInstanceType(CAST(element_k)); @@ -1186,7 +1120,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( &return_not_found); TNode element_k = - UnsafeLoadFixedArrayElement(CAST(elements), index_var.value()); + UnsafeLoadFixedArrayElement(elements, index_var.value()); Label continue_loop(this); GotoIf(TaggedIsSmi(element_k), &continue_loop); GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop); @@ -1213,11 +1147,10 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject( } } -void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant, - Node* elements, - Node* search_element, - Node* array_length, - Node* from_index) { +void ArrayIncludesIndexofAssembler::GeneratePackedDoubles( + SearchVariant variant, TNode elements, + TNode search_element, TNode array_length, + TNode from_index) { TVARIABLE(IntPtrT, index_var, SmiUntag(from_index)); TNode array_length_untagged = SmiUntag(array_length); @@ -1228,13 +1161,13 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant, search_num = Float64Constant(0); GotoIfNot(TaggedIsSmi(search_element), &search_notnan); - search_num = SmiToFloat64(search_element); + search_num = SmiToFloat64(CAST(search_element)); Goto(¬_nan_loop); BIND(&search_notnan); - GotoIfNot(IsHeapNumber(search_element), &return_not_found); + GotoIfNot(IsHeapNumber(CAST(search_element)), &return_not_found); - search_num = LoadHeapNumberValue(search_element); + search_num = LoadHeapNumberValue(CAST(search_element)); Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found; BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop); @@ -1282,11 +1215,10 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant, } } -void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant, - Node* elements, - Node* search_element, - Node* array_length, - Node* from_index) { +void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles( + SearchVariant variant, TNode elements, + TNode search_element, TNode array_length, + TNode from_index) { TVARIABLE(IntPtrT, index_var, SmiUntag(from_index)); TNode array_length_untagged = SmiUntag(array_length); @@ -1297,16 +1229,16 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant, search_num = Float64Constant(0); GotoIfNot(TaggedIsSmi(search_element), &search_notnan); - search_num = SmiToFloat64(search_element); + search_num = SmiToFloat64(CAST(search_element)); Goto(¬_nan_loop); BIND(&search_notnan); if (variant == kIncludes) { GotoIf(IsUndefined(search_element), &hole_loop); } - GotoIfNot(IsHeapNumber(search_element), &return_not_found); + GotoIfNot(IsHeapNumber(CAST(search_element)), &return_not_found); - search_num = LoadHeapNumberValue(search_element); + search_num = LoadHeapNumberValue(CAST(search_element)); Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found; BranchIfFloat64IsNaN(search_num.value(), nan_handling, ¬_nan_loop); @@ -1387,32 +1319,34 @@ TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) { TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); - Node* elements = Parameter(Descriptor::kElements); + TNode elements = CAST(Parameter(Descriptor::kElements)); TNode search_element = CAST(Parameter(Descriptor::kSearchElement)); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode array_length = CAST(Parameter(Descriptor::kLength)); + TNode from_index = CAST(Parameter(Descriptor::kFromIndex)); GenerateSmiOrObject(kIncludes, context, elements, search_element, array_length, from_index); } TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode elements = CAST(Parameter(Descriptor::kElements)); + TNode search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode array_length = CAST(Parameter(Descriptor::kLength)); + TNode from_index = CAST(Parameter(Descriptor::kFromIndex)); - GeneratePackedDoubles(kIncludes, elements, search_element, array_length, + ReturnIfEmpty(array_length, FalseConstant()); + GeneratePackedDoubles(kIncludes, CAST(elements), search_element, array_length, from_index); } TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode elements = CAST(Parameter(Descriptor::kElements)); + TNode search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode array_length = CAST(Parameter(Descriptor::kLength)); + TNode from_index = CAST(Parameter(Descriptor::kFromIndex)); - GenerateHoleyDoubles(kIncludes, elements, search_element, array_length, + ReturnIfEmpty(array_length, FalseConstant()); + GenerateHoleyDoubles(kIncludes, CAST(elements), search_element, array_length, from_index); } @@ -1426,32 +1360,34 @@ TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); - Node* elements = Parameter(Descriptor::kElements); + TNode elements = CAST(Parameter(Descriptor::kElements)); TNode search_element = CAST(Parameter(Descriptor::kSearchElement)); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode array_length = CAST(Parameter(Descriptor::kLength)); + TNode from_index = CAST(Parameter(Descriptor::kFromIndex)); GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length, from_index); } TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode elements = CAST(Parameter(Descriptor::kElements)); + TNode search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode array_length = CAST(Parameter(Descriptor::kLength)); + TNode from_index = CAST(Parameter(Descriptor::kFromIndex)); - GeneratePackedDoubles(kIndexOf, elements, search_element, array_length, + ReturnIfEmpty(array_length, NumberConstant(-1)); + GeneratePackedDoubles(kIndexOf, CAST(elements), search_element, array_length, from_index); } TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) { - Node* elements = Parameter(Descriptor::kElements); - Node* search_element = Parameter(Descriptor::kSearchElement); - Node* array_length = Parameter(Descriptor::kLength); - Node* from_index = Parameter(Descriptor::kFromIndex); + TNode elements = CAST(Parameter(Descriptor::kElements)); + TNode search_element = CAST(Parameter(Descriptor::kSearchElement)); + TNode array_length = CAST(Parameter(Descriptor::kLength)); + TNode from_index = CAST(Parameter(Descriptor::kFromIndex)); - GenerateHoleyDoubles(kIndexOf, elements, search_element, array_length, + ReturnIfEmpty(array_length, NumberConstant(-1)); + GenerateHoleyDoubles(kIndexOf, CAST(elements), search_element, array_length, from_index); } @@ -1484,10 +1420,10 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { const char* method_name = "Array Iterator.prototype.next"; TNode context = CAST(Parameter(Descriptor::kContext)); - Node* iterator = Parameter(Descriptor::kReceiver); + TNode maybe_iterator = CAST(Parameter(Descriptor::kReceiver)); - VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant()); - VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant()); + TVARIABLE(Oddball, var_done, TrueConstant()); + TVARIABLE(Object, var_value, UndefinedConstant()); Label allocate_entry_if_needed(this); Label allocate_iterator_result(this); @@ -1497,9 +1433,11 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { // If O does not have all of the internal slots of an Array Iterator Instance // (22.1.5.3), throw a TypeError exception - ThrowIfNotInstanceType(context, iterator, JS_ARRAY_ITERATOR_TYPE, + ThrowIfNotInstanceType(context, maybe_iterator, JS_ARRAY_ITERATOR_TYPE, method_name); + TNode iterator = CAST(maybe_iterator); + // Let a be O.[[IteratedObject]]. TNode array = CAST(LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset)); @@ -1531,8 +1469,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { iterator, JSArrayIterator::kNextIndexOffset, ChangeUint32ToTagged(Unsigned(Int32Add(index32, Int32Constant(1))))); - var_done.Bind(FalseConstant()); - var_value.Bind(index); + var_done = FalseConstant(); + var_value = index; GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField( iterator, JSArrayIterator::kKindOffset), @@ -1543,9 +1481,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { TNode elements_kind = LoadMapElementsKind(array_map); TNode elements = LoadElements(CAST(array)); GotoIfForceSlowPath(&if_generic); - var_value.Bind(LoadFixedArrayBaseElementAsTagged( + var_value = LoadFixedArrayBaseElementAsTagged( elements, Signed(ChangeUint32ToWord(index32)), elements_kind, - &if_generic, &if_hole)); + &if_generic, &if_hole); Goto(&allocate_entry_if_needed); BIND(&if_hole); @@ -1553,7 +1491,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic); GotoIfNot(IsPrototypeInitialArrayPrototype(context, array_map), &if_generic); - var_value.Bind(UndefinedConstant()); + var_value = UndefinedConstant(); Goto(&allocate_entry_if_needed); } } @@ -1572,8 +1510,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset, NumberInc(index)); - var_done.Bind(FalseConstant()); - var_value.Bind(index); + var_done = FalseConstant(); + var_value = index; Branch(Word32Equal(LoadAndUntagToWord32ObjectField( iterator, JSArrayIterator::kKindOffset), @@ -1609,7 +1547,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { BIND(&if_generic); { - var_value.Bind(GetProperty(context, array, index)); + var_value = GetProperty(context, array, index); Goto(&allocate_entry_if_needed); } @@ -1632,8 +1570,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset, SmiInc(CAST(index))); - var_done.Bind(FalseConstant()); - var_value.Bind(index); + var_done = FalseConstant(); + var_value = index; GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField( iterator, JSArrayIterator::kKindOffset), @@ -1641,9 +1579,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { &allocate_iterator_result); TNode elements_kind = LoadMapElementsKind(array_map); - TNode data_ptr = LoadJSTypedArrayBackingStore(CAST(array)); - var_value.Bind(LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index), - elements_kind)); + TNode data_ptr = LoadJSTypedArrayDataPtr(CAST(array)); + var_value = LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index), + elements_kind); Goto(&allocate_entry_if_needed); } @@ -1654,7 +1592,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { Int32Constant(static_cast(IterationKind::kValues))), &allocate_iterator_result); - Node* result = + TNode result = AllocateJSIteratorResultForEntry(context, index, var_value.value()); Return(result); } @@ -1673,29 +1611,28 @@ class ArrayFlattenAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray - Node* FlattenIntoArray(Node* context, Node* target, Node* source, - Node* source_length, Node* start, Node* depth, - Node* mapper_function = nullptr, - Node* this_arg = nullptr) { - CSA_ASSERT(this, IsJSReceiver(target)); - CSA_ASSERT(this, IsJSReceiver(source)); + TNode FlattenIntoArray( + TNode context, TNode target, + TNode source, TNode source_length, + TNode start, TNode depth, + base::Optional> mapper_function = base::nullopt, + base::Optional> this_arg = base::nullopt) { CSA_ASSERT(this, IsNumberPositive(source_length)); CSA_ASSERT(this, IsNumberPositive(start)); - CSA_ASSERT(this, IsNumber(depth)); // 1. Let targetIndex be start. - VARIABLE(var_target_index, MachineRepresentation::kTagged, start); + TVARIABLE(Number, var_target_index, start); // 2. Let sourceIndex be 0. - VARIABLE(var_source_index, MachineRepresentation::kTagged, SmiConstant(0)); + TVARIABLE(Number, var_source_index, SmiConstant(0)); // 3. Repeat... Label loop(this, {&var_target_index, &var_source_index}), done_loop(this); Goto(&loop); BIND(&loop); { - Node* const source_index = var_source_index.value(); - Node* const target_index = var_target_index.value(); + TNode source_index = var_source_index.value(); + TNode target_index = var_target_index.value(); // ...while sourceIndex < sourceLen GotoIfNumberGreaterThanOrEqual(source_index, source_length, &done_loop); @@ -1716,16 +1653,16 @@ class ArrayFlattenAssembler : public CodeStubAssembler { GetProperty(context, source, source_index); // ii. If mapperFunction is present, then - if (mapper_function != nullptr) { - CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function), - IsCallable(mapper_function))); - DCHECK_NOT_NULL(this_arg); + if (mapper_function) { + CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function.value()), + IsCallable(mapper_function.value()))); + DCHECK(this_arg.has_value()); // 1. Set element to ? Call(mapperFunction, thisArg , « element, // sourceIndex, source »). - element_maybe_smi = CAST( - CallJS(CodeFactory::Call(isolate()), context, mapper_function, - this_arg, element_maybe_smi, source_index, source)); + element_maybe_smi = CallJS(CodeFactory::Call(isolate()), context, + mapper_function.value(), this_arg.value(), + element_maybe_smi, source_index, source); } // iii. Let shouldFlatten be false. @@ -1752,7 +1689,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { // 2. Set targetIndex to ? FlattenIntoArray(target, element, // elementLen, targetIndex, // depth - 1). - var_target_index.Bind( + var_target_index = CAST( CallBuiltin(Builtins::kFlattenIntoArray, context, target, element, element_length, target_index, NumberDec(depth))); Goto(&next); @@ -1769,7 +1706,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { // 2. Set targetIndex to ? FlattenIntoArray(target, element, // elementLen, targetIndex, // depth - 1). - var_target_index.Bind( + var_target_index = CAST( CallBuiltin(Builtins::kFlattenIntoArray, context, target, element, element_length, target_index, NumberDec(depth))); Goto(&next); @@ -1789,7 +1726,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { target_index, element); // 3. Increase targetIndex by 1. - var_target_index.Bind(NumberInc(target_index)); + var_target_index = NumberInc(target_index); Goto(&next); BIND(&throw_error); @@ -1800,7 +1737,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler { BIND(&next); // d. Increase sourceIndex by 1. - var_source_index.Bind(NumberInc(source_index)); + var_source_index = NumberInc(source_index); Goto(&loop); } @@ -1811,16 +1748,16 @@ class ArrayFlattenAssembler : public CodeStubAssembler { // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const target = Parameter(Descriptor::kTarget); - Node* const source = Parameter(Descriptor::kSource); - Node* const source_length = Parameter(Descriptor::kSourceLength); - Node* const start = Parameter(Descriptor::kStart); - Node* const depth = Parameter(Descriptor::kDepth); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode source = CAST(Parameter(Descriptor::kSource)); + TNode source_length = CAST(Parameter(Descriptor::kSourceLength)); + TNode start = CAST(Parameter(Descriptor::kStart)); + TNode depth = CAST(Parameter(Descriptor::kDepth)); // FlattenIntoArray might get called recursively, check stack for overflow // manually as it has stub linkage. - PerformStackCheck(CAST(context)); + PerformStackCheck(context); Return( FlattenIntoArray(context, target, source, source_length, start, depth)); @@ -1828,14 +1765,15 @@ TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) { // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const target = Parameter(Descriptor::kTarget); - Node* const source = Parameter(Descriptor::kSource); - Node* const source_length = Parameter(Descriptor::kSourceLength); - Node* const start = Parameter(Descriptor::kStart); - Node* const depth = Parameter(Descriptor::kDepth); - Node* const mapper_function = Parameter(Descriptor::kMapperFunction); - Node* const this_arg = Parameter(Descriptor::kThisArg); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode source = CAST(Parameter(Descriptor::kSource)); + TNode source_length = CAST(Parameter(Descriptor::kSourceLength)); + TNode start = CAST(Parameter(Descriptor::kStart)); + TNode depth = CAST(Parameter(Descriptor::kDepth)); + TNode mapper_function = + CAST(Parameter(Descriptor::kMapperFunction)); + TNode this_arg = CAST(Parameter(Descriptor::kThisArg)); Return(FlattenIntoArray(context, target, source, source_length, start, depth, mapper_function, this_arg)); @@ -2127,8 +2065,9 @@ TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) { } void ArrayBuiltinsAssembler::GenerateConstructor( - Node* context, Node* array_function, Node* array_map, Node* array_size, - Node* allocation_site, ElementsKind elements_kind, + TNode context, TNode array_function, + TNode array_map, TNode array_size, + TNode allocation_site, ElementsKind elements_kind, AllocationSiteMode mode) { Label ok(this); Label smi_size(this); @@ -2138,33 +2077,37 @@ void ArrayBuiltinsAssembler::GenerateConstructor( Branch(TaggedIsSmi(array_size), &smi_size, &call_runtime); BIND(&smi_size); - - if (IsFastPackedElementsKind(elements_kind)) { - Label abort(this, Label::kDeferred); - Branch(SmiEqual(CAST(array_size), SmiConstant(0)), &small_smi_size, &abort); - - BIND(&abort); - TNode reason = - SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray); - TailCallRuntime(Runtime::kAbort, context, reason); - } else { - int element_size = - IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize; - int max_fast_elements = - (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize - - AllocationMemento::kSize) / - element_size; - Branch(SmiAboveOrEqual(CAST(array_size), SmiConstant(max_fast_elements)), - &call_runtime, &small_smi_size); - } - - BIND(&small_smi_size); { - TNode array = AllocateJSArray( - elements_kind, CAST(array_map), array_size, CAST(array_size), - mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site, - CodeStubAssembler::SMI_PARAMETERS); - Return(array); + TNode array_size_smi = CAST(array_size); + + if (IsFastPackedElementsKind(elements_kind)) { + Label abort(this, Label::kDeferred); + Branch(SmiEqual(array_size_smi, SmiConstant(0)), &small_smi_size, &abort); + + BIND(&abort); + TNode reason = + SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray); + TailCallRuntime(Runtime::kAbort, context, reason); + } else { + int element_size = + IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize; + int max_fast_elements = + (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - + JSArray::kSize - AllocationMemento::kSize) / + element_size; + Branch(SmiAboveOrEqual(array_size_smi, SmiConstant(max_fast_elements)), + &call_runtime, &small_smi_size); + } + + BIND(&small_smi_size); + { + TNode array = AllocateJSArray( + elements_kind, array_map, array_size_smi, array_size_smi, + mode == DONT_TRACK_ALLOCATION_SITE ? TNode() + : CAST(allocation_site), + CodeStubAssembler::SMI_PARAMETERS); + Return(array); + } } BIND(&call_runtime); @@ -2181,8 +2124,9 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor( Parameter(Descriptor::kFunction), JSFunction::kContextOffset)); bool track_allocation_site = AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES; - Node* allocation_site = - track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr; + TNode allocation_site = + track_allocation_site ? CAST(Parameter(Descriptor::kAllocationSite)) + : TNode(); TNode array_map = LoadJSArrayElementsMap(kind, native_context); TNode array = AllocateJSArray( kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements), @@ -2194,7 +2138,7 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor( ElementsKind kind, AllocationSiteOverrideMode mode) { using Descriptor = ArraySingleArgumentConstructorDescriptor; TNode context = CAST(Parameter(Descriptor::kContext)); - Node* function = Parameter(Descriptor::kFunction); + TNode function = CAST(Parameter(Descriptor::kFunction)); TNode native_context = CAST(LoadObjectField(function, JSFunction::kContextOffset)); TNode array_map = LoadJSArrayElementsMap(kind, native_context); @@ -2206,8 +2150,11 @@ void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor( : DONT_TRACK_ALLOCATION_SITE; } - Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter); - Node* allocation_site = Parameter(Descriptor::kAllocationSite); + TNode array_size = + CAST(Parameter(Descriptor::kArraySizeSmiParameter)); + // allocation_site can be Undefined or an AllocationSite + TNode allocation_site = + CAST(Parameter(Descriptor::kAllocationSite)); GenerateConstructor(context, function, array_map, array_size, allocation_site, kind, allocation_site_mode); @@ -2219,7 +2166,7 @@ void ArrayBuiltinsAssembler::GenerateArrayNArgumentsConstructor( // Replace incoming JS receiver argument with the target. // TODO(ishell): Avoid replacing the target on the stack and just add it // as another additional parameter for Runtime::kNewArray. - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); args.SetReceiver(target); // Adjust arguments count for the runtime call: +1 for implicit receiver diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h index 6b8c704038fe39..a19ba1a5da1baa 100644 --- a/deps/v8/src/builtins/builtins-array-gen.h +++ b/deps/v8/src/builtins/builtins-array-gen.h @@ -17,51 +17,13 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { using BuiltinResultGenerator = std::function; - using CallResultProcessor = std::function; - - using PostLoopAction = std::function; - - void FindResultGenerator(); - - Node* FindProcessor(Node* k_value, Node* k); - - void FindIndexResultGenerator(); - - Node* FindIndexProcessor(Node* k_value, Node* k); - - void ForEachResultGenerator(); - - Node* ForEachProcessor(Node* k_value, Node* k); - - void SomeResultGenerator(); - - Node* SomeProcessor(Node* k_value, Node* k); - - void EveryResultGenerator(); - - Node* EveryProcessor(Node* k_value, Node* k); - - void ReduceResultGenerator(); - - Node* ReduceProcessor(Node* k_value, Node* k); - - void ReducePostLoopAction(); + using CallResultProcessor = std::function( + ArrayBuiltinsAssembler* masm, TNode k_value, TNode k)>; void TypedArrayMapResultGenerator(); - Node* SpecCompliantMapProcessor(Node* k_value, Node* k); - - Node* FastMapProcessor(Node* k_value, Node* k); - // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map. - Node* TypedArrayMapProcessor(Node* k_value, Node* k); - - void NullPostLoopAction(); - - // Uses memset to effectively initialize the given FixedArray with Smi zeroes. - void FillFixedArrayWithSmiZero(TNode array, - TNode smi_length); + TNode TypedArrayMapProcessor(TNode k_value, TNode k); TNode CallJSArrayArrayJoinConcatToSequentialString( TNode fixed_array, TNode length, TNode sep, @@ -86,20 +48,22 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { TNode argc() { return argc_; } TNode o() { return o_; } TNode len() { return len_; } - Node* callbackfn() { return callbackfn_; } - Node* this_arg() { return this_arg_; } - TNode k() { return CAST(k_.value()); } - Node* a() { return a_.value(); } + TNode callbackfn() { return callbackfn_; } + TNode this_arg() { return this_arg_; } + TNode k() { return k_.value(); } + TNode a() { return a_.value(); } - void ReturnFromBuiltin(Node* value); + void ReturnFromBuiltin(TNode value); void InitIteratingArrayBuiltinBody(TNode context, - TNode receiver, Node* callbackfn, - Node* this_arg, TNode argc); + TNode receiver, + TNode callbackfn, + TNode this_arg, + TNode argc); void GenerateIteratingTypedArrayBuiltinBody( const char* name, const BuiltinResultGenerator& generator, - const CallResultProcessor& processor, const PostLoopAction& action, + const CallResultProcessor& processor, ForEachDirection direction = ForEachDirection::kForward); void TailCallArrayConstructorStub( @@ -107,23 +71,25 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { TNode target, TNode allocation_site_or_undefined, TNode argc); - void GenerateDispatchToArrayStub( - TNode context, TNode target, TNode argc, - AllocationSiteOverrideMode mode, - TNode allocation_site = TNode()); + void GenerateDispatchToArrayStub(TNode context, + TNode target, TNode argc, + AllocationSiteOverrideMode mode, + TNode allocation_site = {}); void CreateArrayDispatchNoArgument( TNode context, TNode target, TNode argc, AllocationSiteOverrideMode mode, - TNode allocation_site = TNode()); + TNode allocation_site = {}); void CreateArrayDispatchSingleArgument( TNode context, TNode target, TNode argc, AllocationSiteOverrideMode mode, - TNode allocation_site = TNode()); + TNode allocation_site = {}); - void GenerateConstructor(Node* context, Node* array_function, Node* array_map, - Node* array_size, Node* allocation_site, + void GenerateConstructor(TNode context, + TNode array_function, + TNode array_map, TNode array_size, + TNode allocation_site, ElementsKind elements_kind, AllocationSiteMode mode); void GenerateArrayNoArgumentConstructor(ElementsKind kind, AllocationSiteOverrideMode mode); @@ -135,33 +101,22 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { TNode maybe_allocation_site); private: - static ElementsKind ElementsKindForInstanceType(InstanceType type); - - void VisitAllTypedArrayElements(Node* array_buffer, + void VisitAllTypedArrayElements(TNode array_buffer, const CallResultProcessor& processor, Label* detached, ForEachDirection direction, TNode typed_array); - // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate). - // This version is specialized to create a zero length array - // of the elements kind of the input array. - void GenerateArraySpeciesCreate(); - - // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate). - void GenerateArraySpeciesCreate(TNode len); - - Node* callbackfn_ = nullptr; + TNode callbackfn_; TNode o_; - Node* this_arg_ = nullptr; + TNode this_arg_; TNode len_; TNode context_; TNode receiver_; TNode argc_; - Node* fast_typed_array_target_ = nullptr; + TNode fast_typed_array_target_; const char* name_ = nullptr; - Variable k_; - Variable a_; - Variable to_; + TVariable k_; + TVariable a_; Label fully_spec_compliant_; ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS; }; diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index 6c3e7246492157..8002c069962a10 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -1189,7 +1189,8 @@ bool IterateElements(Isolate* isolate, Handle receiver, static Maybe IsConcatSpreadable(Isolate* isolate, Handle obj) { HandleScope handle_scope(isolate); if (!obj->IsJSReceiver()) return Just(false); - if (!isolate->IsIsConcatSpreadableLookupChainIntact(JSReceiver::cast(*obj))) { + if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate) || + JSReceiver::cast(*obj).HasProxyInPrototype(isolate)) { // Slow path if @@isConcatSpreadable has been used. Handle key(isolate->factory()->is_concat_spreadable_symbol()); Handle value; @@ -1258,7 +1259,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle species, // dictionary. bool fast_case = is_array_species && (estimate_nof * 2) >= estimate_result_length && - isolate->IsIsConcatSpreadableLookupChainIntact(); + Protectors::IsIsConcatSpreadableLookupChainIntact(isolate); if (fast_case && kind == PACKED_DOUBLE_ELEMENTS) { Handle storage = @@ -1406,7 +1407,7 @@ bool IsSimpleArray(Isolate* isolate, Handle obj) { MaybeHandle Fast_ArrayConcat(Isolate* isolate, BuiltinArguments* args) { - if (!isolate->IsIsConcatSpreadableLookupChainIntact()) { + if (!Protectors::IsIsConcatSpreadableLookupChainIntact(isolate)) { return MaybeHandle(); } // We shouldn't overflow when adding another len. diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc index 9ecb1815bcc457..b062b9ca3ce166 100644 --- a/deps/v8/src/builtins/builtins-arraybuffer.cc +++ b/deps/v8/src/builtins/builtins-arraybuffer.cc @@ -30,29 +30,38 @@ namespace { Object ConstructBuffer(Isolate* isolate, Handle target, Handle new_target, Handle length, - bool initialize) { + InitializedFlag initialized) { + SharedFlag shared = (*target != target->native_context().array_buffer_fun()) + ? SharedFlag::kShared + : SharedFlag::kNotShared; Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, JSObject::New(target, new_target, Handle::null())); + auto array_buffer = Handle::cast(result); + // Ensure that all fields are initialized because BackingStore::Allocate is + // allowed to GC. Note that we cannot move the allocation of the ArrayBuffer + // after BackingStore::Allocate because of the spec. + array_buffer->Setup(shared, nullptr); + size_t byte_length; if (!TryNumberToSize(*length, &byte_length) || byte_length > JSArrayBuffer::kMaxByteLength) { - JSArrayBuffer::SetupAsEmpty(Handle::cast(result), isolate); + // ToNumber failed. THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - SharedFlag shared_flag = - (*target == target->native_context().array_buffer_fun()) - ? SharedFlag::kNotShared - : SharedFlag::kShared; - if (!JSArrayBuffer::SetupAllocatingData(Handle::cast(result), - isolate, byte_length, initialize, - shared_flag)) { + + auto backing_store = + BackingStore::Allocate(isolate, byte_length, shared, initialized); + if (!backing_store) { + // Allocation of backing store failed. THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed)); } - return *result; + + array_buffer->Attach(std::move(backing_store)); + return *array_buffer; } } // namespace @@ -80,7 +89,8 @@ BUILTIN(ArrayBufferConstructor) { isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - return ConstructBuffer(isolate, target, new_target, number_length, true); + return ConstructBuffer(isolate, target, new_target, number_length, + InitializedFlag::kZeroInitialized); } // This is a helper to construct an ArrayBuffer with uinitialized memory. @@ -91,7 +101,8 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) { Handle target(isolate->native_context()->array_buffer_fun(), isolate); Handle length = args.atOrUndefined(isolate, 1); - return ConstructBuffer(isolate, target, target, length, false); + return ConstructBuffer(isolate, target, target, length, + InitializedFlag::kUninitialized); } // ES6 section 24.1.4.1 get ArrayBuffer.prototype.byteLength diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc index 6ac37da3f6f6e9..cfd355724e1409 100644 --- a/deps/v8/src/builtins/builtins-async-function-gen.cc +++ b/deps/v8/src/builtins/builtins-async-function-gen.cc @@ -263,7 +263,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait( TNode value = CAST(Parameter(Descriptor::kValue)); TNode context = CAST(Parameter(Descriptor::kContext)); - TNode outer_promise = LoadObjectField( + TNode outer_promise = LoadObjectField( async_function_object, JSAsyncFunctionObject::kPromiseOffset); Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred); diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc index 70d4eac9c8be28..edcb0272265cde 100644 --- a/deps/v8/src/builtins/builtins-async-gen.cc +++ b/deps/v8/src/builtins/builtins-async-gen.cc @@ -6,6 +6,7 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/heap/factory-inl.h" +#include "src/objects/js-generator.h" #include "src/objects/js-promise.h" #include "src/objects/shared-function-info.h" @@ -23,11 +24,12 @@ class ValueUnwrapContext { } // namespace -Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, - Node* value, Node* outer_promise, - Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught) { +TNode AsyncBuiltinsAssembler::AwaitOld( + TNode context, TNode generator, + TNode value, TNode outer_promise, + TNode on_resolve_context_index, + TNode on_reject_context_index, + TNode is_predicted_as_caught) { TNode const native_context = LoadNativeContext(context); static const int kWrappedPromiseOffset = @@ -91,8 +93,7 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, InitializeNativeClosure(closure_context, native_context, on_reject, on_reject_context_index); - VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer, - UndefinedConstant()); + TVARIABLE(HeapObject, var_throwaway, UndefinedConstant()); // Deal with PromiseHooks and debug support in the runtime. This // also allocates the throwaway promise, which is only needed in @@ -101,9 +102,9 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &if_debugging, &do_resolve_promise); BIND(&if_debugging); - var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInitOld, context, value, - wrapped_value, outer_promise, on_reject, - is_predicted_as_caught)); + var_throwaway = CAST(CallRuntime(Runtime::kAwaitPromisesInitOld, context, + value, wrapped_value, outer_promise, + on_reject, is_predicted_as_caught)); Goto(&do_resolve_promise); BIND(&do_resolve_promise); @@ -114,13 +115,13 @@ Node* AsyncBuiltinsAssembler::AwaitOld(Node* context, Node* generator, on_resolve, on_reject, var_throwaway.value()); } -Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, - Node* promise, Node* outer_promise, - Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught) { +TNode AsyncBuiltinsAssembler::AwaitOptimized( + TNode context, TNode generator, + TNode promise, TNode outer_promise, + TNode on_resolve_context_index, + TNode on_reject_context_index, + TNode is_predicted_as_caught) { TNode const native_context = LoadNativeContext(context); - CSA_ASSERT(this, IsJSPromise(promise)); static const int kResolveClosureOffset = FixedArray::SizeFor(Context::MIN_CONTEXT_SLOTS); @@ -130,8 +131,8 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, kRejectClosureOffset + JSFunction::kSizeWithoutPrototype; // 2. Let promise be ? PromiseResolve(« promise »). - // Node* const promise = - // CallBuiltin(Builtins::kPromiseResolve, context, promise_fun, value); + // We skip this step, because promise is already guaranteed to be a + // JSPRomise at this point. TNode base = AllocateInNewSpace(kTotalSize); TNode closure_context = UncheckedCast(base); @@ -162,8 +163,7 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, InitializeNativeClosure(closure_context, native_context, on_reject, on_reject_context_index); - VARIABLE(var_throwaway, MachineRepresentation::kTaggedPointer, - UndefinedConstant()); + TVARIABLE(HeapObject, var_throwaway, UndefinedConstant()); // Deal with PromiseHooks and debug support in the runtime. This // also allocates the throwaway promise, which is only needed in @@ -172,9 +172,9 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &if_debugging, &do_perform_promise_then); BIND(&if_debugging); - var_throwaway.Bind(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, - promise, outer_promise, on_reject, - is_predicted_as_caught)); + var_throwaway = + CAST(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, promise, + outer_promise, on_reject, is_predicted_as_caught)); Goto(&do_perform_promise_then); BIND(&do_perform_promise_then); @@ -182,12 +182,13 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(Node* context, Node* generator, on_resolve, on_reject, var_throwaway.value()); } -Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value, - Node* outer_promise, - Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught) { - VARIABLE(result, MachineRepresentation::kTagged); +TNode AsyncBuiltinsAssembler::Await( + TNode context, TNode generator, + TNode value, TNode outer_promise, + TNode on_resolve_context_index, + TNode on_reject_context_index, + TNode is_predicted_as_caught) { + TVARIABLE(Object, result); Label if_old(this), if_new(this), done(this), if_slow_constructor(this, Label::kDeferred); @@ -197,7 +198,8 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value, // to allocate the wrapper promise and can just use the `AwaitOptimized` // logic. GotoIf(TaggedIsSmi(value), &if_old); - TNode const value_map = LoadMap(value); + TNode value_object = CAST(value); + TNode const value_map = LoadMap(value_object); GotoIfNot(IsJSPromiseMap(value_map), &if_old); // We can skip the "constructor" lookup on {value} if it's [[Prototype]] // is the (initial) Promise.prototype and the @@species protector is @@ -223,25 +225,24 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value, } BIND(&if_old); - result.Bind(AwaitOld(context, generator, value, outer_promise, - on_resolve_context_index, on_reject_context_index, - is_predicted_as_caught)); + result = AwaitOld(context, generator, value, outer_promise, + on_resolve_context_index, on_reject_context_index, + is_predicted_as_caught); Goto(&done); BIND(&if_new); - result.Bind(AwaitOptimized(context, generator, value, outer_promise, - on_resolve_context_index, on_reject_context_index, - is_predicted_as_caught)); + result = AwaitOptimized(context, generator, CAST(value), outer_promise, + on_resolve_context_index, on_reject_context_index, + is_predicted_as_caught); Goto(&done); BIND(&done); return result.value(); } -void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context, - Node* native_context, - Node* function, - Node* context_index) { +void AsyncBuiltinsAssembler::InitializeNativeClosure( + TNode context, TNode native_context, + TNode function, TNode context_index) { TNode function_map = CAST(LoadContextElement( native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); // Ensure that we don't have to initialize prototype_or_initial_map field of @@ -276,24 +277,23 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context, StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code); } -Node* AsyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context, - Node* done) { - TNode const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode const on_fulfilled_shared = CAST(LoadContextElement( +TNode AsyncBuiltinsAssembler::CreateUnwrapClosure( + TNode native_context, TNode done) { + const TNode map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode on_fulfilled_shared = CAST(LoadContextElement( native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN)); - Node* const closure_context = + const TNode closure_context = AllocateAsyncIteratorValueUnwrapContext(native_context, done); return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared, closure_context); } -Node* AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( - Node* native_context, Node* done) { - CSA_ASSERT(this, IsNativeContext(native_context)); +TNode AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( + TNode native_context, TNode done) { CSA_ASSERT(this, IsBoolean(done)); - Node* const context = + TNode context = CreatePromiseContext(native_context, ValueUnwrapContext::kLength); StoreContextElementNoWriteBarrier(context, ValueUnwrapContext::kDoneSlot, done); @@ -301,8 +301,8 @@ Node* AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext( } TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) { - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + TNode value = CAST(Parameter(Descriptor::kValue)); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot); diff --git a/deps/v8/src/builtins/builtins-async-gen.h b/deps/v8/src/builtins/builtins-async-gen.h index 9dafddef2102a6..7b9c944f4acaea 100644 --- a/deps/v8/src/builtins/builtins-async-gen.h +++ b/deps/v8/src/builtins/builtins-async-gen.h @@ -21,20 +21,27 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler { // point to a SharedFunctioninfo instance used to create the closure. The // value following the reject index should be a similar value for the resolve // closure. Returns the Promise-wrapped `value`. - Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise, - Node* on_resolve_context_index, Node* on_reject_context_index, - Node* is_predicted_as_caught); - Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise, - int on_resolve_context_index, int on_reject_context_index, - Node* is_predicted_as_caught) { + TNode Await(TNode context, + TNode generator, TNode value, + TNode outer_promise, + TNode on_resolve_context_index, + TNode on_reject_context_index, + TNode is_predicted_as_caught); + TNode Await(TNode context, + TNode generator, TNode value, + TNode outer_promise, + int on_resolve_context_index, int on_reject_context_index, + TNode is_predicted_as_caught) { return Await(context, generator, value, outer_promise, IntPtrConstant(on_resolve_context_index), IntPtrConstant(on_reject_context_index), is_predicted_as_caught); } - Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise, - int on_resolve_context_index, int on_reject_context_index, - bool is_predicted_as_caught) { + TNode Await(TNode context, + TNode generator, TNode value, + TNode outer_promise, + int on_resolve_context_index, int on_reject_context_index, + bool is_predicted_as_caught) { return Await(context, generator, value, outer_promise, on_resolve_context_index, on_reject_context_index, BooleanConstant(is_predicted_as_caught)); @@ -42,21 +49,30 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler { // Return a new built-in function object as defined in // Async Iterator Value Unwrap Functions - Node* CreateUnwrapClosure(Node* const native_context, Node* const done); + TNode CreateUnwrapClosure(TNode native_context, + TNode done); private: - void InitializeNativeClosure(Node* context, Node* native_context, - Node* function, Node* context_index); - Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context, - Node* done); + void InitializeNativeClosure(TNode context, + TNode native_context, + TNode function, + TNode context_index); + TNode AllocateAsyncIteratorValueUnwrapContext( + TNode native_context, TNode done); - Node* AwaitOld(Node* context, Node* generator, Node* value, - Node* outer_promise, Node* on_resolve_context_index, - Node* on_reject_context_index, Node* is_predicted_as_caught); - Node* AwaitOptimized(Node* context, Node* generator, Node* value, - Node* outer_promise, Node* on_resolve_context_index, - Node* on_reject_context_index, - Node* is_predicted_as_caught); + TNode AwaitOld(TNode context, + TNode generator, + TNode value, TNode outer_promise, + TNode on_resolve_context_index, + TNode on_reject_context_index, + TNode is_predicted_as_caught); + TNode AwaitOptimized(TNode context, + TNode generator, + TNode promise, + TNode outer_promise, + TNode on_resolve_context_index, + TNode on_reject_context_index, + TNode is_predicted_as_caught); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc index 8053cf0dc8b268..2ed7e8c83e0663 100644 --- a/deps/v8/src/builtins/builtins-async-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc @@ -23,146 +23,142 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler { explicit AsyncGeneratorBuiltinsAssembler(CodeAssemblerState* state) : AsyncBuiltinsAssembler(state) {} - inline Node* TaggedIsAsyncGenerator(Node* tagged_object) { - TNode if_notsmi = TaggedIsNotSmi(tagged_object); - return Select( - if_notsmi, - [=] { - return HasInstanceType(tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE); - }, - [=] { return if_notsmi; }); - } - inline Node* LoadGeneratorState(Node* const generator) { - return LoadObjectField(generator, JSGeneratorObject::kContinuationOffset); + inline TNode LoadGeneratorState( + const TNode generator) { + return LoadObjectField(generator, + JSGeneratorObject::kContinuationOffset); } - inline TNode IsGeneratorStateClosed(SloppyTNode const state) { + inline TNode IsGeneratorStateClosed(const TNode state) { return SmiEqual(state, SmiConstant(JSGeneratorObject::kGeneratorClosed)); } - inline TNode IsGeneratorClosed(Node* const generator) { + inline TNode IsGeneratorClosed( + const TNode generator) { return IsGeneratorStateClosed(LoadGeneratorState(generator)); } - inline TNode IsGeneratorStateSuspended(SloppyTNode const state) { + inline TNode IsGeneratorStateSuspended(const TNode state) { return SmiGreaterThanOrEqual(state, SmiConstant(0)); } - inline TNode IsGeneratorSuspended(Node* const generator) { + inline TNode IsGeneratorSuspended( + const TNode generator) { return IsGeneratorStateSuspended(LoadGeneratorState(generator)); } - inline TNode IsGeneratorStateSuspendedAtStart( - SloppyTNode const state) { + inline TNode IsGeneratorStateSuspendedAtStart(const TNode state) { return SmiEqual(state, SmiConstant(0)); } - inline TNode IsGeneratorStateNotExecuting( - SloppyTNode const state) { + inline TNode IsGeneratorStateNotExecuting(const TNode state) { return SmiNotEqual(state, SmiConstant(JSGeneratorObject::kGeneratorExecuting)); } - inline TNode IsGeneratorNotExecuting(Node* const generator) { + inline TNode IsGeneratorNotExecuting( + const TNode generator) { return IsGeneratorStateNotExecuting(LoadGeneratorState(generator)); } - inline TNode IsGeneratorAwaiting(Node* const generator) { + inline TNode IsGeneratorAwaiting( + const TNode generator) { TNode is_generator_awaiting = LoadObjectField(generator, JSAsyncGeneratorObject::kIsAwaitingOffset); return TaggedEqual(is_generator_awaiting, SmiConstant(1)); } - inline void SetGeneratorAwaiting(Node* const generator) { + inline void SetGeneratorAwaiting(const TNode generator) { CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator))); StoreObjectFieldNoWriteBarrier( generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1)); CSA_ASSERT(this, IsGeneratorAwaiting(generator)); } - inline void SetGeneratorNotAwaiting(Node* const generator) { + inline void SetGeneratorNotAwaiting( + const TNode generator) { CSA_ASSERT(this, IsGeneratorAwaiting(generator)); StoreObjectFieldNoWriteBarrier( generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0)); CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator))); } - inline void CloseGenerator(Node* const generator) { + inline void CloseGenerator(const TNode generator) { StoreObjectFieldNoWriteBarrier( generator, JSGeneratorObject::kContinuationOffset, SmiConstant(JSGeneratorObject::kGeneratorClosed)); } - inline Node* IsFastJSIterResult(Node* const value, Node* const context) { - CSA_ASSERT(this, TaggedIsNotSmi(value)); - TNode const native_context = LoadNativeContext(context); - return TaggedEqual( - LoadMap(value), - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); - } - - inline Node* LoadFirstAsyncGeneratorRequestFromQueue(Node* const generator) { - return LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset); + inline TNode LoadFirstAsyncGeneratorRequestFromQueue( + const TNode generator) { + return LoadObjectField(generator, + JSAsyncGeneratorObject::kQueueOffset); } - inline Node* LoadResumeTypeFromAsyncGeneratorRequest(Node* const request) { - return LoadObjectField(request, AsyncGeneratorRequest::kResumeModeOffset); + inline TNode LoadResumeTypeFromAsyncGeneratorRequest( + const TNode request) { + return LoadObjectField(request, + AsyncGeneratorRequest::kResumeModeOffset); } - inline Node* LoadPromiseFromAsyncGeneratorRequest(Node* const request) { - return LoadObjectField(request, AsyncGeneratorRequest::kPromiseOffset); + inline TNode LoadPromiseFromAsyncGeneratorRequest( + const TNode request) { + return LoadObjectField(request, + AsyncGeneratorRequest::kPromiseOffset); } - inline Node* LoadValueFromAsyncGeneratorRequest(Node* const request) { + inline TNode LoadValueFromAsyncGeneratorRequest( + const TNode request) { return LoadObjectField(request, AsyncGeneratorRequest::kValueOffset); } - inline TNode IsAbruptResumeType(SloppyTNode const resume_type) { + inline TNode IsAbruptResumeType(const TNode resume_type) { return SmiNotEqual(resume_type, SmiConstant(JSGeneratorObject::kNext)); } - void AsyncGeneratorEnqueue(CodeStubArguments* args, Node* context, - Node* generator, Node* value, + void AsyncGeneratorEnqueue(CodeStubArguments* args, TNode context, + TNode receiver, TNode value, JSAsyncGeneratorObject::ResumeMode resume_mode, const char* method_name); - Node* TakeFirstAsyncGeneratorRequestFromQueue(Node* generator); - Node* TakeFirstAsyncGeneratorRequestFromQueueIfPresent(Node* generator, - Label* if_not_present); - void AddAsyncGeneratorRequestToQueue(Node* generator, Node* request); + TNode TakeFirstAsyncGeneratorRequestFromQueue( + TNode generator); + void AddAsyncGeneratorRequestToQueue(TNode generator, + TNode request); - Node* AllocateAsyncGeneratorRequest( - JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value, - Node* promise); + TNode AllocateAsyncGeneratorRequest( + JSAsyncGeneratorObject::ResumeMode resume_mode, + TNode resume_value, TNode promise); // Shared implementation of the catchable and uncatchable variations of Await // for AsyncGenerators. template void AsyncGeneratorAwait(bool is_catchable); void AsyncGeneratorAwaitResumeClosure( - Node* context, Node* value, + TNode context, TNode value, JSAsyncGeneratorObject::ResumeMode resume_mode); }; // Shared implementation for the 3 Async Iterator protocol methods of Async // Generators. void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue( - CodeStubArguments* args, Node* context, Node* generator, Node* value, - JSAsyncGeneratorObject::ResumeMode resume_mode, const char* method_name) { + CodeStubArguments* args, TNode context, TNode receiver, + TNode value, JSAsyncGeneratorObject::ResumeMode resume_mode, + const char* method_name) { // AsyncGeneratorEnqueue produces a new Promise, and appends it to the list // of async generator requests to be executed. If the generator is not // presently executing, then this method will loop through, processing each // request from front to back. // This loop resides in AsyncGeneratorResumeNext. - Node* promise = AllocateAndInitJSPromise(context); - - Label enqueue(this), if_receiverisincompatible(this, Label::kDeferred); + TNode promise = AllocateAndInitJSPromise(context); - Branch(TaggedIsAsyncGenerator(generator), &enqueue, - &if_receiverisincompatible); + Label if_receiverisincompatible(this, Label::kDeferred); + GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible); + GotoIfNot(HasInstanceType(CAST(receiver), JS_ASYNC_GENERATOR_OBJECT_TYPE), + &if_receiverisincompatible); - BIND(&enqueue); { Label done(this); - Node* const req = + const TNode generator = CAST(receiver); + const TNode req = AllocateAsyncGeneratorRequest(resume_mode, value, promise); AddAsyncGeneratorRequestToQueue(generator, req); @@ -171,7 +167,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue( // If state is not "executing", then // Perform AsyncGeneratorResumeNext(Generator) // Check if the {receiver} is running or already closed. - TNode continuation = CAST(LoadGeneratorState(generator)); + TNode continuation = LoadGeneratorState(generator); GotoIf(SmiEqual(continuation, SmiConstant(JSAsyncGeneratorObject::kGeneratorExecuting)), @@ -186,20 +182,18 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorEnqueue( BIND(&if_receiverisincompatible); { - Node* const error = - MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context, - StringConstant(method_name), generator); - - CallBuiltin(Builtins::kRejectPromise, context, promise, error, + CallBuiltin(Builtins::kRejectPromise, context, promise, + MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, + context, StringConstant(method_name), receiver), TrueConstant()); args->PopAndReturn(promise); } } -Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( - JSAsyncGeneratorObject::ResumeMode resume_mode, Node* resume_value, - Node* promise) { - CSA_SLOW_ASSERT(this, HasInstanceType(promise, JS_PROMISE_TYPE)); +TNode +AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( + JSAsyncGeneratorObject::ResumeMode resume_mode, TNode resume_value, + TNode promise) { TNode request = Allocate(AsyncGeneratorRequest::kSize); StoreMapNoWriteBarrier(request, RootIndex::kAsyncGeneratorRequestMap); StoreObjectFieldNoWriteBarrier(request, AsyncGeneratorRequest::kNextOffset, @@ -213,15 +207,14 @@ Node* AsyncGeneratorBuiltinsAssembler::AllocateAsyncGeneratorRequest( promise); StoreObjectFieldRoot(request, AsyncGeneratorRequest::kNextOffset, RootIndex::kUndefinedValue); - return request; + return CAST(request); } void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure( - Node* context, Node* value, + TNode context, TNode value, JSAsyncGeneratorObject::ResumeMode resume_mode) { - TNode const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); - CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator)); + const TNode generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); @@ -259,12 +252,13 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwait(bool is_catchable) { } void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue( - Node* generator, Node* request) { - VARIABLE(var_current, MachineRepresentation::kTagged); + TNode generator, + TNode request) { + TVARIABLE(HeapObject, var_current); Label empty(this), loop(this, &var_current), done(this); - var_current.Bind( - LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset)); + var_current = LoadObjectField( + generator, JSAsyncGeneratorObject::kQueueOffset); Branch(IsUndefined(var_current.value()), &empty, &loop); BIND(&empty); @@ -276,9 +270,9 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue( BIND(&loop); { Label loop_next(this), next_empty(this); - Node* current = var_current.value(); - TNode next = - LoadObjectField(current, AsyncGeneratorRequest::kNextOffset); + TNode current = CAST(var_current.value()); + TNode next = LoadObjectField( + current, AsyncGeneratorRequest::kNextOffset); Branch(IsUndefined(next), &next_empty, &loop_next); BIND(&next_empty); @@ -289,20 +283,20 @@ void AsyncGeneratorBuiltinsAssembler::AddAsyncGeneratorRequestToQueue( BIND(&loop_next); { - var_current.Bind(next); + var_current = next; Goto(&loop); } } BIND(&done); } -Node* AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue( - Node* generator) { +TNode +AsyncGeneratorBuiltinsAssembler::TakeFirstAsyncGeneratorRequestFromQueue( + TNode generator) { // Removes and returns the first AsyncGeneratorRequest from a // JSAsyncGeneratorObject's queue. Asserts that the queue is not empty. - CSA_ASSERT(this, TaggedIsAsyncGenerator(generator)); - TNode request = - CAST(LoadObjectField(generator, JSAsyncGeneratorObject::kQueueOffset)); + TNode request = LoadObjectField( + generator, JSAsyncGeneratorObject::kQueueOffset); TNode next = LoadObjectField(request, AsyncGeneratorRequest::kNextOffset); @@ -323,7 +317,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeNext, AsyncGeneratorBuiltinsAssembler) { TNode generator = args.GetReceiver(); TNode value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorEnqueue(&args, context, generator, value, JSAsyncGeneratorObject::kNext, @@ -341,7 +335,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeReturn, AsyncGeneratorBuiltinsAssembler) { TNode generator = args.GetReceiver(); TNode value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorEnqueue(&args, context, generator, value, JSAsyncGeneratorObject::kReturn, @@ -359,7 +353,7 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) { TNode generator = args.GetReceiver(); TNode value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorEnqueue(&args, context, generator, value, JSAsyncGeneratorObject::kThrow, @@ -367,15 +361,15 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) { } TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* value = Parameter(Descriptor::kValue); - Node* context = Parameter(Descriptor::kContext); + TNode value = CAST(Parameter(Descriptor::kValue)); + TNode context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorAwaitResumeClosure(context, value, JSAsyncGeneratorObject::kNext); } TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) { - Node* value = Parameter(Descriptor::kValue); - Node* context = Parameter(Descriptor::kContext); + TNode value = CAST(Parameter(Descriptor::kValue)); + TNode context = CAST(Parameter(Descriptor::kContext)); AsyncGeneratorAwaitResumeClosure(context, value, JSAsyncGeneratorObject::kThrow); } @@ -392,8 +386,9 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) { TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { using Descriptor = AsyncGeneratorResumeNextDescriptor; - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const context = Parameter(Descriptor::kContext); + const TNode generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode context = CAST(Parameter(Descriptor::kContext)); // The penultimate step of proposal-async-iteration/#sec-asyncgeneratorresolve // and proposal-async-iteration/#sec-asyncgeneratorreject both recursively @@ -403,12 +398,10 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { // performs a loop in AsyncGeneratorResumeNext, which continues as long as // there is an AsyncGeneratorRequest in the queue, and as long as the // generator is not suspended due to an AwaitExpression. - VARIABLE(var_state, MachineRepresentation::kTaggedSigned, - LoadGeneratorState(generator)); - VARIABLE(var_next, MachineRepresentation::kTagged, - LoadFirstAsyncGeneratorRequestFromQueue(generator)); - Variable* loop_variables[] = {&var_state, &var_next}; - Label start(this, 2, loop_variables); + TVARIABLE(Smi, var_state, LoadGeneratorState(generator)); + TVARIABLE(HeapObject, var_next, + LoadFirstAsyncGeneratorRequestFromQueue(generator)); + Label start(this, {&var_state, &var_next}); Goto(&start); BIND(&start); @@ -420,9 +413,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { // Stop resuming if request queue is empty. ReturnIf(IsUndefined(var_next.value()), UndefinedConstant()); - Node* const next = var_next.value(); - TNode const resume_type = - CAST(LoadResumeTypeFromAsyncGeneratorRequest(next)); + const TNode next = CAST(var_next.value()); + const TNode resume_type = LoadResumeTypeFromAsyncGeneratorRequest(next); Label if_abrupt(this), if_normal(this), resume_generator(this); Branch(IsAbruptResumeType(resume_type), &if_abrupt, &if_normal); @@ -432,11 +424,11 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { GotoIfNot(IsGeneratorStateSuspendedAtStart(var_state.value()), &settle_promise); CloseGenerator(generator); - var_state.Bind(SmiConstant(JSGeneratorObject::kGeneratorClosed)); + var_state = SmiConstant(JSGeneratorObject::kGeneratorClosed); Goto(&settle_promise); BIND(&settle_promise); - Node* next_value = LoadValueFromAsyncGeneratorRequest(next); + TNode next_value = LoadValueFromAsyncGeneratorRequest(next); Branch(SmiEqual(resume_type, SmiConstant(JSGeneratorObject::kReturn)), &if_return, &if_throw); @@ -457,7 +449,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { GotoIfNot(IsGeneratorStateClosed(var_state.value()), &resume_generator); CallBuiltin(Builtins::kAsyncGeneratorReject, context, generator, next_value); - var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator); Goto(&start); } @@ -466,8 +458,8 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { GotoIfNot(IsGeneratorStateClosed(var_state.value()), &resume_generator); CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, UndefinedConstant(), TrueConstant()); - var_state.Bind(LoadGeneratorState(generator)); - var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + var_state = LoadGeneratorState(generator); + var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator); Goto(&start); } @@ -478,19 +470,19 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { generator, JSGeneratorObject::kResumeModeOffset, resume_type); CallStub(CodeFactory::ResumeGenerator(isolate()), context, LoadValueFromAsyncGeneratorRequest(next), generator); - var_state.Bind(LoadGeneratorState(generator)); - var_next.Bind(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + var_state = LoadGeneratorState(generator); + var_next = LoadFirstAsyncGeneratorRequestFromQueue(generator); Goto(&start); } } TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const done = Parameter(Descriptor::kDone); - Node* const context = Parameter(Descriptor::kContext); + const TNode generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode done = CAST(Parameter(Descriptor::kDone)); + const TNode context = CAST(Parameter(Descriptor::kContext)); - CSA_SLOW_ASSERT(this, TaggedIsAsyncGenerator(generator)); CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator))); // This operation should be called only when the `value` parameter has been @@ -499,11 +491,12 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { // non-callable value. This can't be checked with assertions due to being // observable, but keep it in mind. - Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator); - Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next); + const TNode next = + TakeFirstAsyncGeneratorRequestFromQueue(generator); + const TNode promise = LoadPromiseFromAsyncGeneratorRequest(next); // Let iteratorResult be CreateIterResultObject(value, done). - TNode const iter_result = Allocate(JSIteratorResult::kSize); + const TNode iter_result = Allocate(JSIteratorResult::kSize); { TNode map = LoadContextElement(LoadNativeContext(context), Context::ITERATOR_RESULT_MAP_INDEX); @@ -555,25 +548,30 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) { using Descriptor = AsyncGeneratorRejectDescriptor; - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode context = CAST(Parameter(Descriptor::kContext)); - Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator); - Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next); + TNode next = + TakeFirstAsyncGeneratorRequestFromQueue(generator); + TNode promise = LoadPromiseFromAsyncGeneratorRequest(next); Return(CallBuiltin(Builtins::kRejectPromise, context, promise, value, TrueConstant())); } TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) { - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const is_caught = Parameter(Descriptor::kIsCaught); - Node* const context = Parameter(Descriptor::kContext); + const TNode generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode is_caught = CAST(Parameter(Descriptor::kIsCaught)); + const TNode context = CAST(Parameter(Descriptor::kContext)); - Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator); - Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request); + const TNode request = + CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator)); + const TNode outer_promise = + LoadPromiseFromAsyncGeneratorRequest(request); const int on_resolve = Context::ASYNC_GENERATOR_YIELD_RESOLVE_SHARED_FUN; const int on_reject = Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN; @@ -585,10 +583,10 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) { } TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); - TNode const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); + const TNode context = CAST(Parameter(Descriptor::kContext)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); @@ -617,33 +615,35 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) { // (per proposal-async-iteration/#sec-asyncgeneratorresumenext step 10.b.i) // // In all cases, the final step is to jump back to AsyncGeneratorResumeNext. - Node* const generator = Parameter(Descriptor::kGenerator); - Node* const value = Parameter(Descriptor::kValue); - Node* const is_caught = Parameter(Descriptor::kIsCaught); - Node* const req = LoadFirstAsyncGeneratorRequestFromQueue(generator); - CSA_ASSERT(this, IsNotUndefined(req)); + const TNode generator = + CAST(Parameter(Descriptor::kGenerator)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode is_caught = CAST(Parameter(Descriptor::kIsCaught)); + const TNode req = + CAST(LoadFirstAsyncGeneratorRequestFromQueue(generator)); Label perform_await(this); - VARIABLE(var_on_resolve, MachineType::PointerRepresentation(), - IntPtrConstant( - Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN)); - VARIABLE( - var_on_reject, MachineType::PointerRepresentation(), + TVARIABLE(IntPtrT, var_on_resolve, + IntPtrConstant( + Context::ASYNC_GENERATOR_RETURN_CLOSED_RESOLVE_SHARED_FUN)); + TVARIABLE( + IntPtrT, var_on_reject, IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_CLOSED_REJECT_SHARED_FUN)); - Node* const state = LoadGeneratorState(generator); + const TNode state = LoadGeneratorState(generator); GotoIf(IsGeneratorStateClosed(state), &perform_await); - var_on_resolve.Bind( - IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN)); - var_on_reject.Bind( - IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN)); + var_on_resolve = + IntPtrConstant(Context::ASYNC_GENERATOR_RETURN_RESOLVE_SHARED_FUN); + var_on_reject = + IntPtrConstant(Context::ASYNC_GENERATOR_AWAIT_REJECT_SHARED_FUN); Goto(&perform_await); BIND(&perform_await); SetGeneratorAwaiting(generator); - Node* const context = Parameter(Descriptor::kContext); - Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(req); + TNode context = CAST(Parameter(Descriptor::kContext)); + const TNode outer_promise = + LoadPromiseFromAsyncGeneratorRequest(req); Await(context, generator, value, outer_promise, var_on_resolve.value(), var_on_reject.value(), is_caught); @@ -656,8 +656,8 @@ TF_BUILTIN(AsyncGeneratorReturn, AsyncGeneratorBuiltinsAssembler) { // proposal-async-iteration/#sec-asyncgeneratoryield step 8.e TF_BUILTIN(AsyncGeneratorReturnResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); + const TNode context = CAST(Parameter(Descriptor::kContext)); + const TNode value = CAST(Parameter(Descriptor::kValue)); AsyncGeneratorAwaitResumeClosure(context, value, JSGeneratorObject::kReturn); } @@ -666,10 +666,10 @@ TF_BUILTIN(AsyncGeneratorReturnResolveClosure, // AsyncGeneratorResumeNext. TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); - TNode const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); + const TNode context = CAST(Parameter(Descriptor::kContext)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); @@ -684,10 +684,10 @@ TF_BUILTIN(AsyncGeneratorReturnClosedResolveClosure, TF_BUILTIN(AsyncGeneratorReturnClosedRejectClosure, AsyncGeneratorBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const value = Parameter(Descriptor::kValue); - TNode const generator = - LoadContextElement(context, Context::EXTENSION_INDEX); + const TNode context = CAST(Parameter(Descriptor::kContext)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode generator = + CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); SetGeneratorNotAwaiting(generator); diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc index 0b5c5ef8b962cd..39ff8c92172559 100644 --- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/base/optional.h" #include "src/builtins/builtins-async-gen.h" #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" @@ -20,29 +21,34 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { explicit AsyncFromSyncBuiltinsAssembler(compiler::CodeAssemblerState* state) : AsyncBuiltinsAssembler(state) {} - void ThrowIfNotAsyncFromSyncIterator(Node* const context, Node* const object, + void ThrowIfNotAsyncFromSyncIterator(const TNode context, + const TNode object, Label* if_exception, - Variable* var_exception, + TVariable* var_exception, const char* method_name); - using UndefinedMethodHandler = std::function; - using SyncIteratorNodeGenerator = std::function; + using UndefinedMethodHandler = + std::function native_context, + const TNode promise, Label* if_exception)>; + using SyncIteratorNodeGenerator = + std::function(TNode)>; void Generate_AsyncFromSyncIteratorMethod( - Node* const context, Node* const iterator, Node* const sent_value, + const TNode context, const TNode iterator, + const TNode sent_value, const SyncIteratorNodeGenerator& get_method, const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type = Label::kDeferred, - Node* const initial_exception_value = nullptr); + base::Optional> initial_exception_value = base::nullopt); void Generate_AsyncFromSyncIteratorMethod( - Node* const context, Node* const iterator, Node* const sent_value, - Handle name, const UndefinedMethodHandler& if_method_undefined, + const TNode context, const TNode iterator, + const TNode sent_value, Handle name, + const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type = Label::kDeferred, - Node* const initial_exception_value = nullptr) { - auto get_method = [=](Node* const sync_iterator) { + base::Optional> initial_exception_value = base::nullopt) { + auto get_method = [=](const TNode sync_iterator) { return GetProperty(context, sync_iterator, name); }; return Generate_AsyncFromSyncIteratorMethod( @@ -51,26 +57,26 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { } // Load "value" and "done" from an iterator result object. If an exception - // is thrown at any point, jumps to te `if_exception` label with exception + // is thrown at any point, jumps to the `if_exception` label with exception // stored in `var_exception`. // // Returns a Pair of Nodes, whose first element is the value of the "value" // property, and whose second element is the value of the "done" property, // converted to a Boolean if needed. - std::pair LoadIteratorResult(Node* const context, - Node* const native_context, - Node* const iter_result, - Label* if_exception, - Variable* var_exception); + std::pair, TNode> LoadIteratorResult( + const TNode context, const TNode native_context, + const TNode iter_result, Label* if_exception, + TVariable* var_exception); }; void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator( - Node* const context, Node* const object, Label* if_exception, - Variable* var_exception, const char* method_name) { + const TNode context, const TNode object, + Label* if_exception, TVariable* var_exception, + const char* method_name) { Label if_receiverisincompatible(this, Label::kDeferred), done(this); GotoIf(TaggedIsSmi(object), &if_receiverisincompatible); - Branch(HasInstanceType(object, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done, + Branch(HasInstanceType(CAST(object), JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done, &if_receiverisincompatible); BIND(&if_receiverisincompatible); @@ -79,13 +85,13 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator( // internal slot, then // Let badIteratorError be a new TypeError exception. - Node* const error = - MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context, - StringConstant(method_name), object); + TNode error = + CAST(MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, + context, StringConstant(method_name), object)); // Perform ! Call(promiseCapability.[[Reject]], undefined, // « badIteratorError »). - var_exception->Bind(error); + *var_exception = error; Goto(if_exception); } @@ -93,26 +99,27 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator( } void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( - Node* const context, Node* const iterator, Node* const sent_value, - const SyncIteratorNodeGenerator& get_method, + const TNode context, const TNode iterator, + const TNode sent_value, const SyncIteratorNodeGenerator& get_method, const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type, - Node* const initial_exception_value) { - TNode const native_context = LoadNativeContext(context); - Node* const promise = AllocateAndInitJSPromise(context); + base::Optional> initial_exception_value) { + const TNode native_context = LoadNativeContext(context); + const TNode promise = AllocateAndInitJSPromise(context); - VARIABLE(var_exception, MachineRepresentation::kTagged, - initial_exception_value == nullptr ? UndefinedConstant() - : initial_exception_value); + TVARIABLE( + Object, var_exception, + initial_exception_value ? *initial_exception_value : UndefinedConstant()); Label reject_promise(this, reject_label_type); ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise, &var_exception, operation_name); - TNode const sync_iterator = - LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset); + TNode async_iterator = CAST(iterator); + const TNode sync_iterator = LoadObjectField( + async_iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset); - Node* const method = get_method(sync_iterator); + TNode method = get_method(sync_iterator); if (if_method_undefined) { Label if_isnotundefined(this); @@ -123,21 +130,21 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( BIND(&if_isnotundefined); } - Node* const iter_result = CallJS(CodeFactory::Call(isolate()), context, - method, sync_iterator, sent_value); + const TNode iter_result = CallJS( + CodeFactory::Call(isolate()), context, method, sync_iterator, sent_value); GotoIfException(iter_result, &reject_promise, &var_exception); - Node* value; - Node* done; + TNode value; + TNode done; std::tie(value, done) = LoadIteratorResult( context, native_context, iter_result, &reject_promise, &var_exception); - TNode const promise_fun = + const TNode promise_fun = CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX)); CSA_ASSERT(this, IsConstructor(promise_fun)); // Let valueWrapper be PromiseResolve(%Promise%, « value »). - TNode const value_wrapper = CallBuiltin( + const TNode value_wrapper = CallBuiltin( Builtins::kPromiseResolve, native_context, promise_fun, value); // IfAbruptRejectPromise(valueWrapper, promiseCapability). GotoIfException(value_wrapper, &reject_promise, &var_exception); @@ -145,7 +152,8 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( // Let onFulfilled be a new built-in function object as defined in // Async Iterator Value Unwrap Functions. // Set onFulfilled.[[Done]] to throwDone. - Node* const on_fulfilled = CreateUnwrapClosure(native_context, done); + const TNode on_fulfilled = + CreateUnwrapClosure(native_context, done); // Perform ! PerformPromiseThen(valueWrapper, // onFulfilled, undefined, promiseCapability). @@ -154,35 +162,39 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( BIND(&reject_promise); { - Node* const exception = var_exception.value(); + const TNode exception = var_exception.value(); CallBuiltin(Builtins::kRejectPromise, context, promise, exception, TrueConstant()); Return(promise); } } -std::pair AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( - Node* const context, Node* const native_context, Node* const iter_result, - Label* if_exception, Variable* var_exception) { + +std::pair, TNode> +AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( + const TNode context, const TNode native_context, + const TNode iter_result, Label* if_exception, + TVariable* var_exception) { Label if_fastpath(this), if_slowpath(this), merge(this), to_boolean(this), done(this), if_notanobject(this, Label::kDeferred); GotoIf(TaggedIsSmi(iter_result), &if_notanobject); - TNode const iter_result_map = LoadMap(iter_result); + const TNode iter_result_map = LoadMap(CAST(iter_result)); GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject); - TNode const fast_iter_result_map = + const TNode fast_iter_result_map = LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); - VARIABLE(var_value, MachineRepresentation::kTagged); - VARIABLE(var_done, MachineRepresentation::kTagged); + TVARIABLE(Object, var_value); + TVARIABLE(Object, var_done); Branch(TaggedEqual(iter_result_map, fast_iter_result_map), &if_fastpath, &if_slowpath); BIND(&if_fastpath); { - var_done.Bind(LoadObjectField(iter_result, JSIteratorResult::kDoneOffset)); - var_value.Bind( - LoadObjectField(iter_result, JSIteratorResult::kValueOffset)); + TNode fast_iter_result = CAST(iter_result); + var_done = LoadObjectField(fast_iter_result, JSIteratorResult::kDoneOffset); + var_value = + LoadObjectField(fast_iter_result, JSIteratorResult::kValueOffset); Goto(&merge); } @@ -190,18 +202,18 @@ std::pair AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( { // Let nextDone be IteratorComplete(nextResult). // IfAbruptRejectPromise(nextDone, promiseCapability). - TNode const done = + const TNode done = GetProperty(context, iter_result, factory()->done_string()); GotoIfException(done, if_exception, var_exception); // Let nextValue be IteratorValue(nextResult). // IfAbruptRejectPromise(nextValue, promiseCapability). - TNode const value = + const TNode value = GetProperty(context, iter_result, factory()->value_string()); GotoIfException(value, if_exception, var_exception); - var_value.Bind(value); - var_done.Bind(done); + var_value = value; + var_done = done; Goto(&merge); } @@ -209,27 +221,27 @@ std::pair AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( { // Sync iterator result is not an object --- Produce a TypeError and jump // to the `if_exception` path. - Node* const error = MakeTypeError( - MessageTemplate::kIteratorResultNotAnObject, context, iter_result); - var_exception->Bind(error); + const TNode error = CAST(MakeTypeError( + MessageTemplate::kIteratorResultNotAnObject, context, iter_result)); + *var_exception = error; Goto(if_exception); } BIND(&merge); // Ensure `iterResult.done` is a Boolean. GotoIf(TaggedIsSmi(var_done.value()), &to_boolean); - Branch(IsBoolean(var_done.value()), &done, &to_boolean); + Branch(IsBoolean(CAST(var_done.value())), &done, &to_boolean); BIND(&to_boolean); { - TNode const result = + const TNode result = CallBuiltin(Builtins::kToBoolean, context, var_done.value()); - var_done.Bind(result); + var_done = result; Goto(&done); } BIND(&done); - return std::make_pair(var_value.value(), var_done.value()); + return std::make_pair(var_value.value(), CAST(var_done.value())); } } // namespace @@ -237,12 +249,13 @@ std::pair AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( // https://tc39.github.io/proposal-async-iteration/ // Section #sec-%asyncfromsynciteratorprototype%.next TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) { - Node* const iterator = Parameter(Descriptor::kReceiver); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode iterator = CAST(Parameter(Descriptor::kReceiver)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode context = CAST(Parameter(Descriptor::kContext)); - auto get_method = [=](Node* const unused) { - return LoadObjectField(iterator, JSAsyncFromSyncIterator::kNextOffset); + auto get_method = [=](const TNode unused) { + return LoadObjectField(CAST(iterator), + JSAsyncFromSyncIterator::kNextOffset); }; Generate_AsyncFromSyncIteratorMethod( context, iterator, value, get_method, UndefinedMethodHandler(), @@ -253,15 +266,16 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) { // Section #sec-%asyncfromsynciteratorprototype%.return TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn, AsyncFromSyncBuiltinsAssembler) { - Node* const iterator = Parameter(Descriptor::kReceiver); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode iterator = CAST(Parameter(Descriptor::kReceiver)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode context = CAST(Parameter(Descriptor::kContext)); - auto if_return_undefined = [=](Node* const native_context, - Node* const promise, Label* if_exception) { + auto if_return_undefined = [=](const TNode native_context, + const TNode promise, + Label* if_exception) { // If return is undefined, then // Let iterResult be ! CreateIterResultObject(value, true) - TNode const iter_result = CallBuiltin( + const TNode iter_result = CallBuiltin( Builtins::kCreateIterResultObject, context, value, TrueConstant()); // Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »). @@ -280,11 +294,12 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn, // Section #sec-%asyncfromsynciteratorprototype%.throw TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow, AsyncFromSyncBuiltinsAssembler) { - Node* const iterator = Parameter(Descriptor::kReceiver); - Node* const reason = Parameter(Descriptor::kReason); - Node* const context = Parameter(Descriptor::kContext); + const TNode iterator = CAST(Parameter(Descriptor::kReceiver)); + const TNode reason = CAST(Parameter(Descriptor::kReason)); + const TNode context = CAST(Parameter(Descriptor::kContext)); - auto if_throw_undefined = [=](Node* const native_context, Node* const promise, + auto if_throw_undefined = [=](const TNode native_context, + const TNode promise, Label* if_exception) { Goto(if_exception); }; Generate_AsyncFromSyncIteratorMethod( diff --git a/deps/v8/src/builtins/builtins-async-module.cc b/deps/v8/src/builtins/builtins-async-module.cc new file mode 100644 index 00000000000000..fecdb31cf3cdb9 --- /dev/null +++ b/deps/v8/src/builtins/builtins-async-module.cc @@ -0,0 +1,33 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/builtins-utils-inl.h" +#include "src/objects/module-inl.h" +#include "src/objects/objects-inl.h" + +namespace v8 { +namespace internal { + +BUILTIN(CallAsyncModuleFulfilled) { + HandleScope handle_scope(isolate); + Handle module( + isolate->global_handles()->Create(*args.at(0))); + SourceTextModule::AsyncModuleExecutionFulfilled(isolate, module); + return ReadOnlyRoots(isolate).undefined_value(); +} + +BUILTIN(CallAsyncModuleRejected) { + HandleScope handle_scope(isolate); + + // Arguments should be a SourceTextModule and an exception object. + DCHECK_EQ(args.length(), 2); + Handle module( + isolate->global_handles()->Create(*args.at(0))); + Handle exception(args.at(1)); + SourceTextModule::AsyncModuleExecutionRejected(isolate, module, exception); + return ReadOnlyRoots(isolate).undefined_value(); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc index 1201ce97300ec0..30da5207f90b7a 100644 --- a/deps/v8/src/builtins/builtins-bigint.cc +++ b/deps/v8/src/builtins/builtins-bigint.cc @@ -125,26 +125,21 @@ Object BigIntToStringImpl(Handle receiver, Handle radix, BUILTIN(BigIntPrototypeToLocaleString) { HandleScope scope(isolate); + const char* method = "BigInt.prototype.toLocaleString"; #ifdef V8_INTL_SUPPORT - if (FLAG_harmony_intl_bigint) { - // 1. Let x be ? thisBigIntValue(this value). - Handle x; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, x, - ThisBigIntValue(isolate, args.receiver(), - "BigInt.prototype.toLocaleString")); - - RETURN_RESULT_OR_FAILURE( - isolate, - Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1), - args.atOrUndefined(isolate, 2))); - } - // Fallbacks to old toString implemention if flag is off or no - // V8_INTL_SUPPORT + // 1. Let x be ? thisBigIntValue(this value). + Handle x; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, x, ThisBigIntValue(isolate, args.receiver(), method)); + + RETURN_RESULT_OR_FAILURE( + isolate, + Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1), + args.atOrUndefined(isolate, 2), method)); + // Fallbacks to old toString implemention if no V8_INTL_SUPPORT #endif // V8_INTL_SUPPORT Handle radix = isolate->factory()->undefined_value(); - return BigIntToStringImpl(args.receiver(), radix, isolate, - "BigInt.prototype.toLocaleString"); + return BigIntToStringImpl(args.receiver(), radix, isolate, method); } BUILTIN(BigIntPrototypeToString) { diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index 91370b089679f6..fd1ad5bb67c0de 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -9,6 +9,7 @@ #include "src/codegen/macro-assembler.h" #include "src/common/globals.h" #include "src/execution/isolate.h" +#include "src/execution/protectors.h" #include "src/objects/api-callbacks.h" #include "src/objects/arguments.h" #include "src/objects/property-cell.h" @@ -17,9 +18,6 @@ namespace v8 { namespace internal { -template -using TNode = compiler::TNode; - void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined( MacroAssembler* masm) { Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined); @@ -297,7 +295,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread( TNode protector_cell = ArrayIteratorProtectorConstant(); GotoIf( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorInvalid)), + SmiConstant(Protectors::kProtectorInvalid)), &if_generic); { // The fast-path accesses the {spread} elements directly. diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index dec4142c65fc9e..c0ca74a577b886 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -8,6 +8,7 @@ #include "src/builtins/builtins-iterator-gen.h" #include "src/builtins/builtins-utils-gen.h" #include "src/codegen/code-stub-assembler.h" +#include "src/execution/protectors.h" #include "src/heap/factory-inl.h" #include "src/heap/heap-inl.h" #include "src/objects/hash-table-inl.h" @@ -19,8 +20,6 @@ namespace internal { using compiler::Node; template -using TNode = compiler::TNode; -template using TVariable = compiler::TypedCodeAssemblerVariable; class BaseCollectionsAssembler : public CodeStubAssembler { @@ -81,8 +80,8 @@ class BaseCollectionsAssembler : public CodeStubAssembler { TNode new_target); // Allocates the backing store for a collection. - virtual TNode AllocateTable(Variant variant, TNode context, - TNode at_least_space_for) = 0; + virtual TNode AllocateTable( + Variant variant, TNode at_least_space_for) = 0; // Main entry point for a collection constructor builtin. void GenerateConstructor(Variant variant, @@ -124,7 +123,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler { TNode EstimatedInitialSize(TNode initial_entries, TNode is_fast_jsarray); - void GotoIfNotJSReceiver(Node* const obj, Label* if_not_receiver); + void GotoIfNotJSReceiver(TNode const obj, Label* if_not_receiver); // Determines whether the collection's prototype has been modified. TNode HasInitialCollectionPrototype(Variant variant, @@ -160,8 +159,8 @@ void BaseCollectionsAssembler::AddConstructorEntry( ? LoadKeyValuePairNoSideEffects(context, key_value, if_may_have_side_effects) : LoadKeyValuePair(context, key_value); - Node* key_n = pair.key; - Node* value_n = pair.value; + TNode key_n = pair.key; + TNode value_n = pair.value; CallJS(CodeFactory::Call(isolate()), context, add_function, collection, key_n, value_n); } else { @@ -183,7 +182,7 @@ void BaseCollectionsAssembler::AddConstructorEntries( Goto(&allocate_table); BIND(&allocate_table); { - TNode table = AllocateTable(variant, context, at_least_space_for); + TNode table = AllocateTable(variant, at_least_space_for); StoreObjectField(collection, GetTableOffset(variant), table); GotoIf(IsNullOrUndefined(initial_entries), &exit); GotoIfInitialAddFunctionModified(variant, CAST(native_context), @@ -261,7 +260,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( &if_doubles); BIND(&if_smiorobjects); { - auto set_entry = [&](Node* index) { + auto set_entry = [&](TNode index) { TNode element = LoadAndNormalizeFixedArrayElement( CAST(elements), UncheckedCast(index)); AddConstructorEntry(variant, context, collection, add_func, element, @@ -272,8 +271,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( // elements, a fast loop is used. This assumes that adding an element // to the collection does not call user code that could mutate the elements // or collection. - BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, - ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, + IndexAdvanceMode::kPost); Goto(&exit); } BIND(&if_doubles); @@ -288,13 +287,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( element); } else { DCHECK(variant == kSet || variant == kWeakSet); - auto set_entry = [&](Node* index) { + auto set_entry = [&](TNode index) { TNode entry = LoadAndNormalizeFixedDoubleArrayElement( elements, UncheckedCast(index)); AddConstructorEntry(variant, context, collection, add_func, entry); }; - BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, - ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, + IndexAdvanceMode::kPost); Goto(&exit); } } @@ -523,10 +522,10 @@ TNode BaseCollectionsAssembler::EstimatedInitialSize( [=] { return IntPtrConstant(0); }); } -void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj, +void BaseCollectionsAssembler::GotoIfNotJSReceiver(TNode const obj, Label* if_not_receiver) { GotoIf(TaggedIsSmi(obj), if_not_receiver); - GotoIfNot(IsJSReceiver(obj), if_not_receiver); + GotoIfNot(IsJSReceiver(CAST(obj)), if_not_receiver); } TNode BaseCollectionsAssembler::GetInitialCollectionPrototype( @@ -608,22 +607,24 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { protected: template - Node* AllocateJSCollectionIterator(SloppyTNode context, - int map_index, Node* collection); - TNode AllocateTable(Variant variant, TNode context, - TNode at_least_space_for) override; - TNode GetHash(SloppyTNode const key); - TNode CallGetHashRaw(SloppyTNode const key); - TNode CallGetOrCreateHashRaw(SloppyTNode const key); + TNode AllocateJSCollectionIterator( + const TNode context, int map_index, + const TNode collection); + TNode AllocateTable(Variant variant, + TNode at_least_space_for) override; + TNode GetHash(const TNode key); + TNode CallGetHashRaw(const TNode key); + TNode CallGetOrCreateHashRaw(const TNode key); // Transitions the iterator to the non obsolete backing store. // This is a NOP if the [table] is not obsolete. - using UpdateInTransition = - std::function; + template + using UpdateInTransition = std::function table, + const TNode index)>; template std::pair, TNode> Transition( TNode const table, TNode const index, - UpdateInTransition const& update_in_transition); + UpdateInTransition const& update_in_transition); template std::pair, TNode> TransitionAndUpdate( TNode const iterator); @@ -635,35 +636,33 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. template - void FindOrderedHashTableEntryForSmiKey(Node* table, - SloppyTNode key_tagged, - Variable* result, Label* entry_found, - Label* not_found); - void SameValueZeroSmi(SloppyTNode key_smi, - SloppyTNode candidate_key, Label* if_same, - Label* if_not_same); + void FindOrderedHashTableEntryForSmiKey(TNode table, + TNode key_tagged, + TVariable* result, + Label* entry_found, Label* not_found); + void SameValueZeroSmi(TNode key_smi, TNode candidate_key, + Label* if_same, Label* if_not_same); // Specialization for heap numbers. // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. - void SameValueZeroHeapNumber(SloppyTNode key_float, - SloppyTNode candidate_key, - Label* if_same, Label* if_not_same); + void SameValueZeroHeapNumber(TNode key_float, + TNode candidate_key, Label* if_same, + Label* if_not_same); template void FindOrderedHashTableEntryForHeapNumberKey( - SloppyTNode context, Node* table, - SloppyTNode key_heap_number, Variable* result, - Label* entry_found, Label* not_found); + TNode table, TNode key_heap_number, + TVariable* result, Label* entry_found, Label* not_found); // Specialization for bigints. // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. - void SameValueZeroBigInt(Node* key, Node* candidate_key, Label* if_same, - Label* if_not_same); + void SameValueZeroBigInt(TNode key, TNode candidate_key, + Label* if_same, Label* if_not_same); template - void FindOrderedHashTableEntryForBigIntKey(SloppyTNode context, - Node* table, Node* key, - Variable* result, + void FindOrderedHashTableEntryForBigIntKey(TNode table, + TNode key_big_int, + TVariable* result, Label* entry_found, Label* not_found); @@ -671,14 +670,14 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { // The {result} variable will contain the entry index if the key was found, // or the hash code otherwise. template - void FindOrderedHashTableEntryForStringKey( - SloppyTNode context, Node* table, SloppyTNode key_tagged, - Variable* result, Label* entry_found, Label* not_found); - TNode ComputeStringHash(TNode context, - TNode string_key); - void SameValueZeroString(SloppyTNode context, - SloppyTNode key_string, - SloppyTNode candidate_key, Label* if_same, + void FindOrderedHashTableEntryForStringKey(TNode table, + TNode key_tagged, + TVariable* result, + Label* entry_found, + Label* not_found); + TNode ComputeStringHash(TNode string_key); + void SameValueZeroString(TNode key_string, + TNode candidate_key, Label* if_same, Label* if_not_same); // Specialization for non-strings, non-numbers. For those we only need @@ -687,26 +686,32 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { // or the hash code otherwise. If the hash-code has not been computed, it // should be Smi -1. template - void FindOrderedHashTableEntryForOtherKey( - SloppyTNode context, Node* table, SloppyTNode key, - Variable* result, Label* entry_found, Label* not_found); + void FindOrderedHashTableEntryForOtherKey(TNode table, + TNode key_heap_object, + TVariable* result, + Label* entry_found, + Label* not_found); template - void TryLookupOrderedHashTableIndex(Node* const table, Node* const key, - Node* const context, Variable* result, + void TryLookupOrderedHashTableIndex(const TNode table, + const TNode key, + TVariable* result, Label* if_entry_found, Label* if_not_found); - Node* NormalizeNumberKey(Node* key); + const TNode NormalizeNumberKey(const TNode key); void StoreOrderedHashMapNewEntry(TNode const table, - Node* const key, Node* const value, - Node* const hash, - Node* const number_of_buckets, - Node* const occupancy); + const TNode key, + const TNode value, + const TNode hash, + const TNode number_of_buckets, + const TNode occupancy); + void StoreOrderedHashSetNewEntry(TNode const table, - Node* const key, Node* const hash, - Node* const number_of_buckets, - Node* const occupancy); + const TNode key, + const TNode hash, + const TNode number_of_buckets, + const TNode occupancy); // Create a JSArray with PACKED_ELEMENTS kind from a Map.prototype.keys() or // Map.prototype.values() iterator. The iterator is assumed to satisfy @@ -727,11 +732,97 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { void BranchIfMapIteratorProtectorValid(Label* if_true, Label* if_false); void BranchIfSetIteratorProtectorValid(Label* if_true, Label* if_false); + + // Builds code that finds OrderedHashTable entry for a key with hash code + // {hash} with using the comparison code generated by {key_compare}. The code + // jumps to {entry_found} if the key is found, or to {not_found} if the key + // was not found. In the {entry_found} branch, the variable + // entry_start_position will be bound to the index of the entry (relative to + // OrderedHashTable::kHashTableStartIndex). + // + // The {CollectionType} template parameter stands for the particular instance + // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. + template + void FindOrderedHashTableEntry( + Node* table, Node* hash, + const std::function, Label*, Label*)>& key_compare, + Variable* entry_start_position, Label* entry_found, Label* not_found); }; +template +void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry( + Node* table, Node* hash, + const std::function, Label*, Label*)>& key_compare, + Variable* entry_start_position, Label* entry_found, Label* not_found) { + // Get the index of the bucket. + TNode const number_of_buckets = + SmiUntag(CAST(UnsafeLoadFixedArrayElement( + CAST(table), CollectionType::NumberOfBucketsIndex()))); + TNode const bucket = + WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); + TNode const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + CAST(table), bucket, + CollectionType::HashTableStartIndex() * kTaggedSize))); + + // Walk the bucket chain. + TNode entry_start; + Label if_key_found(this); + { + TVARIABLE(IntPtrT, var_entry, first_entry); + Label loop(this, {&var_entry, entry_start_position}), + continue_next_entry(this); + Goto(&loop); + BIND(&loop); + + // If the entry index is the not-found sentinel, we are done. + GotoIf(IntPtrEqual(var_entry.value(), + IntPtrConstant(CollectionType::kNotFound)), + not_found); + + // Make sure the entry index is within range. + CSA_ASSERT( + this, + UintPtrLessThan( + var_entry.value(), + SmiUntag(SmiAdd( + CAST(UnsafeLoadFixedArrayElement( + CAST(table), CollectionType::NumberOfElementsIndex())), + CAST(UnsafeLoadFixedArrayElement( + CAST(table), + CollectionType::NumberOfDeletedElementsIndex())))))); + + // Compute the index of the entry relative to kHashTableStartIndex. + entry_start = + IntPtrAdd(IntPtrMul(var_entry.value(), + IntPtrConstant(CollectionType::kEntrySize)), + number_of_buckets); + + // Load the key from the entry. + TNode const candidate_key = UnsafeLoadFixedArrayElement( + CAST(table), entry_start, + CollectionType::HashTableStartIndex() * kTaggedSize); + + key_compare(candidate_key, &if_key_found, &continue_next_entry); + + BIND(&continue_next_entry); + // Load the index of the next entry in the bucket chain. + var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + CAST(table), entry_start, + (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) * + kTaggedSize))); + + Goto(&loop); + } + + BIND(&if_key_found); + entry_start_position->Bind(entry_start); + Goto(entry_found); +} + template -Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator( - SloppyTNode context, int map_index, Node* collection) { +TNode CollectionsBuiltinsAssembler::AllocateJSCollectionIterator( + TNode const context, int map_index, + TNode const collection) { TNode const table = LoadObjectField(collection, JSCollection::kTableOffset); TNode const native_context = LoadNativeContext(context); @@ -749,9 +840,8 @@ Node* CollectionsBuiltinsAssembler::AllocateJSCollectionIterator( return iterator; } -TNode CollectionsBuiltinsAssembler::AllocateTable( - Variant variant, TNode context, - TNode at_least_space_for) { +TNode CollectionsBuiltinsAssembler::AllocateTable( + Variant variant, TNode at_least_space_for) { return CAST((variant == kMap || variant == kWeakMap) ? AllocateOrderedHashTable() : AllocateOrderedHashTable()); @@ -778,7 +868,7 @@ TF_BUILTIN(SetConstructor, CollectionsBuiltinsAssembler) { } TNode CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw( - SloppyTNode const key) { + const TNode key) { TNode const function_addr = ExternalConstant(ExternalReference::get_or_create_hash_raw()); TNode const isolate_ptr = @@ -787,15 +877,15 @@ TNode CollectionsBuiltinsAssembler::CallGetOrCreateHashRaw( MachineType type_ptr = MachineType::Pointer(); MachineType type_tagged = MachineType::AnyTagged(); - Node* const result = CallCFunction(function_addr, type_tagged, - std::make_pair(type_ptr, isolate_ptr), - std::make_pair(type_tagged, key)); + TNode result = CAST(CallCFunction(function_addr, type_tagged, + std::make_pair(type_ptr, isolate_ptr), + std::make_pair(type_tagged, key))); - return CAST(result); + return result; } TNode CollectionsBuiltinsAssembler::CallGetHashRaw( - SloppyTNode const key) { + const TNode key) { TNode const function_addr = ExternalConstant(ExternalReference::orderedhashmap_gethash_raw()); TNode const isolate_ptr = @@ -804,15 +894,15 @@ TNode CollectionsBuiltinsAssembler::CallGetHashRaw( MachineType type_ptr = MachineType::Pointer(); MachineType type_tagged = MachineType::AnyTagged(); - Node* const result = CallCFunction(function_addr, type_tagged, - std::make_pair(type_ptr, isolate_ptr), - std::make_pair(type_tagged, key)); + TNode result = CAST(CallCFunction(function_addr, type_tagged, + std::make_pair(type_ptr, isolate_ptr), + std::make_pair(type_tagged, key))); return SmiUntag(result); } TNode CollectionsBuiltinsAssembler::GetHash( - SloppyTNode const key) { + const TNode key) { TVARIABLE(IntPtrT, var_hash); Label if_receiver(this), if_other(this), done(this); Branch(IsJSReceiver(key), &if_receiver, &if_other); @@ -833,9 +923,10 @@ TNode CollectionsBuiltinsAssembler::GetHash( return var_hash.value(); } -void CollectionsBuiltinsAssembler::SameValueZeroSmi( - SloppyTNode key_smi, SloppyTNode candidate_key, Label* if_same, - Label* if_not_same) { +void CollectionsBuiltinsAssembler::SameValueZeroSmi(TNode key_smi, + TNode candidate_key, + Label* if_same, + Label* if_not_same) { // If the key is the same, we are done. GotoIf(TaggedEqual(candidate_key, key_smi), if_same); @@ -862,7 +953,7 @@ void CollectionsBuiltinsAssembler::BranchIfMapIteratorProtectorValid( DCHECK(isolate()->heap()->map_iterator_protector().IsPropertyCell()); Branch( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), if_true, if_false); } @@ -921,7 +1012,7 @@ void CollectionsBuiltinsAssembler::BranchIfSetIteratorProtectorValid( DCHECK(isolate()->heap()->set_iterator_protector().IsPropertyCell()); Branch( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), if_true, if_false); } @@ -998,15 +1089,15 @@ TNode CollectionsBuiltinsAssembler::MapIteratorToList( TNode array_map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); TNode array = - AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr, + AllocateJSArray(kind, array_map, size, SmiTag(size), {}, INTPTR_PARAMETERS, kAllowLargeObjectAllocation); TNode elements = CAST(LoadElements(array)); const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; TNode first_to_element_offset = - ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0); - VARIABLE( - var_offset, MachineType::PointerRepresentation(), + ElementOffsetFromIndex(IntPtrConstant(0), kind, 0); + TVARIABLE( + IntPtrT, var_offset, IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset))); TVARIABLE(IntPtrT, var_index, index); VariableList vars({&var_index, &var_offset}, zone()); @@ -1053,8 +1144,7 @@ TNode CollectionsBuiltinsAssembler::MapIteratorToList( { // Increment the array offset and continue the loop to the next entry. var_index = cur_index; - var_offset.Bind( - IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize))); + var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)); Goto(&loop); } } @@ -1111,15 +1201,15 @@ TNode CollectionsBuiltinsAssembler::SetOrSetIteratorToList( TNode array_map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); TNode array = - AllocateJSArray(kind, array_map, size, SmiTag(size), nullptr, + AllocateJSArray(kind, array_map, size, SmiTag(size), {}, INTPTR_PARAMETERS, kAllowLargeObjectAllocation); TNode elements = CAST(LoadElements(array)); const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; TNode first_to_element_offset = - ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0); - VARIABLE( - var_offset, MachineType::PointerRepresentation(), + ElementOffsetFromIndex(IntPtrConstant(0), kind, 0); + TVARIABLE( + IntPtrT, var_offset, IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset))); TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); Label done(this), finalize(this, {&var_index}), @@ -1139,7 +1229,7 @@ TNode CollectionsBuiltinsAssembler::SetOrSetIteratorToList( Store(elements, var_offset.value(), entry_key); var_index = cur_index; - var_offset.Bind(IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize))); + var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize)); Goto(&loop); } @@ -1164,13 +1254,13 @@ TF_BUILTIN(SetOrSetIteratorToList, CollectionsBuiltinsAssembler) { template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey( - Node* table, SloppyTNode smi_key, Variable* result, Label* entry_found, - Label* not_found) { + TNode table, TNode smi_key, TVariable* result, + Label* entry_found, Label* not_found) { TNode const key_untagged = SmiUntag(smi_key); TNode const hash = ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged)); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry( table, hash, [&](TNode other_key, Label* if_same, Label* if_not_same) { @@ -1181,28 +1271,26 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey( template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey( - SloppyTNode context, Node* table, SloppyTNode key_tagged, - Variable* result, Label* entry_found, Label* not_found) { - TNode const hash = ComputeStringHash(context, key_tagged); + TNode table, TNode key_tagged, + TVariable* result, Label* entry_found, Label* not_found) { + TNode const hash = ComputeStringHash(key_tagged); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry( table, hash, [&](TNode other_key, Label* if_same, Label* if_not_same) { - SameValueZeroString(context, key_tagged, other_key, if_same, - if_not_same); + SameValueZeroString(key_tagged, other_key, if_same, if_not_same); }, result, entry_found, not_found); } template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey( - SloppyTNode context, Node* table, - SloppyTNode key_heap_number, Variable* result, - Label* entry_found, Label* not_found) { + TNode table, TNode key_heap_number, + TVariable* result, Label* entry_found, Label* not_found) { TNode const hash = CallGetHashRaw(key_heap_number); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; TNode const key_float = LoadHeapNumberValue(key_heap_number); FindOrderedHashTableEntry( table, hash, @@ -1214,36 +1302,36 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey( template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey( - SloppyTNode context, Node* table, Node* key, Variable* result, - Label* entry_found, Label* not_found) { - TNode const hash = CallGetHashRaw(key); + TNode table, TNode key_big_int, + TVariable* result, Label* entry_found, Label* not_found) { + TNode const hash = CallGetHashRaw(key_big_int); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry( table, hash, [&](TNode other_key, Label* if_same, Label* if_not_same) { - SameValueZeroBigInt(key, other_key, if_same, if_not_same); + SameValueZeroBigInt(key_big_int, other_key, if_same, if_not_same); }, result, entry_found, not_found); } template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey( - SloppyTNode context, Node* table, SloppyTNode key, - Variable* result, Label* entry_found, Label* not_found) { - TNode const hash = GetHash(key); + TNode table, TNode key_heap_object, + TVariable* result, Label* entry_found, Label* not_found) { + TNode const hash = GetHash(key_heap_object); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0))); - result->Bind(hash); + *result = hash; FindOrderedHashTableEntry( table, hash, [&](TNode other_key, Label* if_same, Label* if_not_same) { - Branch(TaggedEqual(key, other_key), if_same, if_not_same); + Branch(TaggedEqual(key_heap_object, other_key), if_same, if_not_same); }, result, entry_found, not_found); } TNode CollectionsBuiltinsAssembler::ComputeStringHash( - TNode context, TNode string_key) { + TNode string_key) { TVARIABLE(IntPtrT, var_result); Label hash_not_computed(this), done(this, &var_result); @@ -1261,25 +1349,23 @@ TNode CollectionsBuiltinsAssembler::ComputeStringHash( } void CollectionsBuiltinsAssembler::SameValueZeroString( - SloppyTNode context, SloppyTNode key_string, - SloppyTNode candidate_key, Label* if_same, Label* if_not_same) { + TNode key_string, TNode candidate_key, Label* if_same, + Label* if_not_same) { // If the candidate is not a string, the keys are not equal. GotoIf(TaggedIsSmi(candidate_key), if_not_same); GotoIfNot(IsString(CAST(candidate_key)), if_not_same); - Branch(TaggedEqual(CallBuiltin(Builtins::kStringEqual, context, key_string, - candidate_key), + Branch(TaggedEqual(CallBuiltin(Builtins::kStringEqual, NoContextConstant(), + key_string, candidate_key), TrueConstant()), if_same, if_not_same); } -void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key, - Node* candidate_key, - Label* if_same, - Label* if_not_same) { - CSA_ASSERT(this, IsBigInt(key)); +void CollectionsBuiltinsAssembler::SameValueZeroBigInt( + TNode key, TNode candidate_key, Label* if_same, + Label* if_not_same) { GotoIf(TaggedIsSmi(candidate_key), if_not_same); - GotoIfNot(IsBigInt(candidate_key), if_not_same); + GotoIfNot(IsBigInt(CAST(candidate_key)), if_not_same); Branch(TaggedEqual(CallRuntime(Runtime::kBigIntEqualToBigInt, NoContextConstant(), key, candidate_key), @@ -1288,8 +1374,8 @@ void CollectionsBuiltinsAssembler::SameValueZeroBigInt(Node* key, } void CollectionsBuiltinsAssembler::SameValueZeroHeapNumber( - SloppyTNode key_float, SloppyTNode candidate_key, - Label* if_same, Label* if_not_same) { + TNode key_float, TNode candidate_key, Label* if_same, + Label* if_not_same) { Label if_smi(this), if_keyisnan(this); GotoIf(TaggedIsSmi(candidate_key), &if_smi); @@ -1339,20 +1425,20 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) { IntPtrConstant(OrderedHashMap::kClearedTableSentinel)), &return_zero); - VARIABLE(var_i, MachineType::PointerRepresentation(), IntPtrConstant(0)); - VARIABLE(var_index, MachineRepresentation::kTagged, index); + TVARIABLE(IntPtrT, var_i, IntPtrConstant(0)); + TVARIABLE(Smi, var_index, index); Label loop(this, {&var_i, &var_index}); Goto(&loop); BIND(&loop); { - Node* i = var_i.value(); + TNode i = var_i.value(); GotoIfNot(IntPtrLessThan(i, number_of_deleted_elements), &return_index); STATIC_ASSERT(OrderedHashMap::RemovedHolesIndex() == OrderedHashSet::RemovedHolesIndex()); TNode removed_index = CAST(LoadFixedArrayElement( CAST(table), i, OrderedHashMap::RemovedHolesIndex() * kTaggedSize)); GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index); - Decrement(&var_index, 1, SMI_PARAMETERS); + Decrement(&var_index); Increment(&var_i); Goto(&loop); } @@ -1368,7 +1454,7 @@ template std::pair, TNode> CollectionsBuiltinsAssembler::Transition( TNode const table, TNode const index, - UpdateInTransition const& update_in_transition) { + UpdateInTransition const& update_in_transition) { TVARIABLE(IntPtrT, var_index, index); TVARIABLE(TableType, var_table, table); Label if_done(this), if_transition(this, Label::kDeferred); @@ -1413,7 +1499,8 @@ CollectionsBuiltinsAssembler::TransitionAndUpdate( return Transition( CAST(LoadObjectField(iterator, IteratorType::kTableOffset)), LoadAndUntagObjectField(iterator, IteratorType::kIndexOffset), - [this, iterator](Node* const table, Node* const index) { + [this, iterator](const TNode table, + const TNode index) { // Update the {iterator} with the new state. StoreObjectField(iterator, IteratorType::kTableOffset, table); StoreObjectFieldNoWriteBarrier(iterator, IteratorType::kIndexOffset, @@ -1460,13 +1547,14 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, } TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.get"); - TNode const table = LoadObjectField(receiver, JSMap::kTableOffset); + TNode const table = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); TNode index = CAST( CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key)); @@ -1485,13 +1573,14 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) { } TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has"); - TNode const table = LoadObjectField(receiver, JSMap::kTableOffset); + TNode const table = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); TNode index = CAST( CallBuiltin(Builtins::kFindOrderedHashMapEntry, context, table, key)); @@ -1506,17 +1595,18 @@ TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) { Return(FalseConstant()); } -Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) { - VARIABLE(result, MachineRepresentation::kTagged, key); +const TNode CollectionsBuiltinsAssembler::NormalizeNumberKey( + const TNode key) { + TVARIABLE(Object, result, key); Label done(this); GotoIf(TaggedIsSmi(key), &done); - GotoIfNot(IsHeapNumber(key), &done); - TNode const number = LoadHeapNumberValue(key); + GotoIfNot(IsHeapNumber(CAST(key)), &done); + TNode const number = LoadHeapNumberValue(CAST(key)); GotoIfNot(Float64Equal(number, Float64Constant(0.0)), &done); // We know the value is zero, so we take the key to be Smi 0. // Another option would be to normalize to Smi here. - result.Bind(SmiConstant(0)); + result = SmiConstant(0); Goto(&done); BIND(&done); @@ -1524,25 +1614,23 @@ Node* CollectionsBuiltinsAssembler::NormalizeNumberKey(Node* const key) { } TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const value = Parameter(Descriptor::kValue); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode value = CAST(Parameter(Descriptor::kValue)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set"); key = NormalizeNumberKey(key); TNode const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(&entry_found); // If we found the entry, we just store the value there. @@ -1561,18 +1649,18 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { &add_entry); // Otherwise, go to runtime to compute the hash code. - entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key))); + entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key))); Goto(&add_entry); } BIND(&add_entry); - VARIABLE(number_of_buckets, MachineType::PointerRepresentation()); - VARIABLE(occupancy, MachineType::PointerRepresentation()); + TVARIABLE(IntPtrT, number_of_buckets); + TVARIABLE(IntPtrT, occupancy); TVARIABLE(OrderedHashMap, table_var, table); { // Check we have enough space for the entry. - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table, OrderedHashMap::NumberOfBucketsIndex())))); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table, OrderedHashMap::NumberOfBucketsIndex()))); STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2); TNode const capacity = WordShl(number_of_buckets.value(), 1); @@ -1580,20 +1668,21 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset()))); TNode const number_of_deleted = SmiUntag(CAST(LoadObjectField( table, OrderedHashMap::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted)); + occupancy = IntPtrAdd(number_of_elements, number_of_deleted); GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry); // We do not have enough space, grow the table and reload the relevant // fields. CallRuntime(Runtime::kMapGrow, context, receiver); - table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset)); - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table_var.value(), OrderedHashMap::NumberOfBucketsIndex())))); + table_var = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table_var.value(), OrderedHashMap::NumberOfBucketsIndex()))); TNode const new_number_of_elements = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashMap::NumberOfElementsOffset()))); TNode const new_number_of_deleted = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashMap::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted)); + occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted); Goto(&store_new_entry); } BIND(&store_new_entry); @@ -1605,15 +1694,16 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { } void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry( - TNode const table, Node* const key, Node* const value, - Node* const hash, Node* const number_of_buckets, Node* const occupancy) { - TNode const bucket = + TNode const table, const TNode key, + const TNode value, const TNode hash, + const TNode number_of_buckets, const TNode occupancy) { + TNode const bucket = WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); TNode bucket_entry = CAST(UnsafeLoadFixedArrayElement( table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize)); // Store the entry elements. - TNode const entry_start = IntPtrAdd( + TNode const entry_start = IntPtrAdd( IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)), number_of_buckets); UnsafeStoreFixedArrayElement( @@ -1642,23 +1732,21 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry( } TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.delete"); TNode const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(¬_found); Return(FalseConstant()); @@ -1703,24 +1791,22 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.add"); key = NormalizeNumberKey(key); TNode const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(&entry_found); // The entry was found, there is nothing to do. @@ -1735,18 +1821,18 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { &add_entry); // Otherwise, go to runtime to compute the hash code. - entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key))); + entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key))); Goto(&add_entry); } BIND(&add_entry); - VARIABLE(number_of_buckets, MachineType::PointerRepresentation()); - VARIABLE(occupancy, MachineType::PointerRepresentation()); + TVARIABLE(IntPtrT, number_of_buckets); + TVARIABLE(IntPtrT, occupancy); TVARIABLE(OrderedHashSet, table_var, table); { // Check we have enough space for the entry. - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table, OrderedHashSet::NumberOfBucketsIndex())))); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table, OrderedHashSet::NumberOfBucketsIndex()))); STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2); TNode const capacity = WordShl(number_of_buckets.value(), 1); @@ -1754,20 +1840,21 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset()))); TNode const number_of_deleted = SmiUntag(CAST(LoadObjectField( table, OrderedHashSet::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(number_of_elements, number_of_deleted)); + occupancy = IntPtrAdd(number_of_elements, number_of_deleted); GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry); // We do not have enough space, grow the table and reload the relevant // fields. CallRuntime(Runtime::kSetGrow, context, receiver); - table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset)); - number_of_buckets.Bind(SmiUntag(CAST(UnsafeLoadFixedArrayElement( - table_var.value(), OrderedHashSet::NumberOfBucketsIndex())))); + table_var = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); + number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( + table_var.value(), OrderedHashSet::NumberOfBucketsIndex()))); TNode const new_number_of_elements = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashSet::NumberOfElementsOffset()))); TNode const new_number_of_deleted = SmiUntag(CAST(LoadObjectField( table_var.value(), OrderedHashSet::NumberOfDeletedElementsOffset()))); - occupancy.Bind(IntPtrAdd(new_number_of_elements, new_number_of_deleted)); + occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted); Goto(&store_new_entry); } BIND(&store_new_entry); @@ -1779,15 +1866,16 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { } void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry( - TNode const table, Node* const key, Node* const hash, - Node* const number_of_buckets, Node* const occupancy) { - TNode const bucket = + TNode const table, const TNode key, + const TNode hash, const TNode number_of_buckets, + const TNode occupancy) { + TNode const bucket = WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); TNode bucket_entry = CAST(UnsafeLoadFixedArrayElement( table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize)); // Store the entry elements. - TNode const entry_start = IntPtrAdd( + TNode const entry_start = IntPtrAdd( IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)), number_of_buckets); UnsafeStoreFixedArrayElement( @@ -1812,23 +1900,21 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry( } TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.delete"); TNode const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); Label entry_found(this), not_found(this); - TryLookupOrderedHashTableIndex(table, key, context, - &entry_start_position_or_hash, - &entry_found, ¬_found); + TryLookupOrderedHashTableIndex( + table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(¬_found); Return(FalseConstant()); @@ -1869,29 +1955,30 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { } TF_BUILTIN(MapPrototypeEntries, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.entries"); Return(AllocateJSCollectionIterator( - context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(MapPrototypeGetSize, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "get Map.prototype.size"); TNode const table = - CAST(LoadObjectField(receiver, JSMap::kTableOffset)); + LoadObjectField(CAST(receiver), JSMap::kTableOffset); Return(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset())); } TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Map.prototype.forEach"; - Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount); - Node* const context = Parameter(Descriptor::kContext); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); + const TNode context = CAST(Parameter(Descriptor::kContext)); + CodeStubArguments args(this, argc); TNode const receiver = args.GetReceiver(); TNode const callback = args.GetOptionalArgumentValue(0); TNode const this_arg = args.GetOptionalArgumentValue(1); @@ -1914,8 +2001,8 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { // the {receiver} while we're iterating. TNode index = var_index.value(); TNode table = var_table.value(); - std::tie(table, index) = - Transition(table, index, [](Node*, Node*) {}); + std::tie(table, index) = Transition( + table, index, [](const TNode, const TNode) {}); // Read the next entry from the {table}, skipping holes. TNode entry_key; @@ -1951,31 +2038,32 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { } TF_BUILTIN(MapPrototypeKeys, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.keys"); Return(AllocateJSCollectionIterator( - context, Context::MAP_KEY_ITERATOR_MAP_INDEX, receiver)); + context, Context::MAP_KEY_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(MapPrototypeValues, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.values"); Return(AllocateJSCollectionIterator( - context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::MAP_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Map Iterator.prototype.next"; - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); // Ensure that the {receiver} is actually a JSMapIterator. Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid); - TNode const receiver_instance_type = LoadInstanceType(receiver); + TNode const receiver_instance_type = + LoadInstanceType(CAST(receiver)); GotoIf( InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_VALUE_ITERATOR_TYPE), &if_receiver_valid); @@ -1989,8 +2077,8 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&if_receiver_valid); // Check if the {receiver} is exhausted. - VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant()); - VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant()); + TVARIABLE(Oddball, var_done, TrueConstant()); + TVARIABLE(Object, var_value, UndefinedConstant()); Label return_value(this, {&var_done, &var_value}), return_entry(this), return_end(this, Label::kDeferred); @@ -2007,22 +2095,22 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { NextSkipHoles(table, index, &return_end); StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset, SmiTag(index)); - var_value.Bind(entry_key); - var_done.Bind(FalseConstant()); + var_value = entry_key; + var_done = FalseConstant(); // Check how to return the {key} (depending on {receiver} type). GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE), &return_value); - var_value.Bind(LoadFixedArrayElement( + var_value = LoadFixedArrayElement( table, entry_start_position, (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) * - kTaggedSize)); + kTaggedSize); Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE), &return_value, &return_entry); BIND(&return_entry); { - Node* result = + TNode result = AllocateJSIteratorResultForEntry(context, entry_key, var_value.value()); Return(result); } @@ -2043,23 +2131,22 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has"); - TNode const table = LoadObjectField(receiver, JSMap::kTableOffset); + TNode const table = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); - VARIABLE(entry_start_position, MachineType::PointerRepresentation(), - IntPtrConstant(0)); - VARIABLE(result, MachineRepresentation::kTaggedSigned, IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0)); Label if_key_smi(this), if_key_string(this), if_key_heap_number(this), if_key_bigint(this), entry_found(this), not_found(this), done(this); GotoIf(TaggedIsSmi(key), &if_key_smi); - TNode key_map = LoadMap(key); + TNode key_map = LoadMap(CAST(key)); TNode key_instance_type = LoadMapInstanceType(key_map); GotoIf(IsStringInstanceType(key_instance_type), &if_key_string); @@ -2067,30 +2154,34 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint); FindOrderedHashTableEntryForOtherKey( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, ¬_found); BIND(&if_key_smi); { FindOrderedHashTableEntryForSmiKey( - table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&if_key_string); { FindOrderedHashTableEntryForStringKey( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&if_key_heap_number); { FindOrderedHashTableEntryForHeapNumberKey( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&if_key_bigint); { FindOrderedHashTableEntryForBigIntKey( - context, table, key, &entry_start_position, &entry_found, ¬_found); + CAST(table), CAST(key), &entry_start_position, &entry_found, + ¬_found); } BIND(&entry_found); @@ -2101,29 +2192,30 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.entries"); Return(AllocateJSCollectionIterator( - context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(SetPrototypeGetSize, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "get Set.prototype.size"); TNode const table = - CAST(LoadObjectField(receiver, JSSet::kTableOffset)); + LoadObjectField(CAST(receiver), JSSet::kTableOffset); Return(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset())); } TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Set.prototype.forEach"; - Node* const argc = Parameter(Descriptor::kJSActualArgumentsCount); - Node* const context = Parameter(Descriptor::kContext); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); + const TNode context = CAST(Parameter(Descriptor::kContext)); + CodeStubArguments args(this, argc); TNode const receiver = args.GetReceiver(); TNode const callback = args.GetOptionalArgumentValue(0); TNode const this_arg = args.GetOptionalArgumentValue(1); @@ -2146,12 +2238,12 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) { // the {receiver} while we're iterating. TNode index = var_index.value(); TNode table = var_table.value(); - std::tie(table, index) = - Transition(table, index, [](Node*, Node*) {}); + std::tie(table, index) = Transition( + table, index, [](const TNode, const TNode) {}); // Read the next entry from the {table}, skipping holes. - Node* entry_key; - Node* entry_start_position; + TNode entry_key; + TNode entry_start_position; std::tie(entry_key, entry_start_position, index) = NextSkipHoles(table, index, &done_loop); @@ -2176,23 +2268,24 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) { } TF_BUILTIN(SetPrototypeValues, CollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.values"); Return(AllocateJSCollectionIterator( - context, Context::SET_VALUE_ITERATOR_MAP_INDEX, receiver)); + context, Context::SET_VALUE_ITERATOR_MAP_INDEX, CAST(receiver))); } TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { const char* const kMethodName = "Set Iterator.prototype.next"; - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode context = CAST(Parameter(Descriptor::kContext)); // Ensure that the {receiver} is actually a JSSetIterator. Label if_receiver_valid(this), if_receiver_invalid(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &if_receiver_invalid); - TNode const receiver_instance_type = LoadInstanceType(receiver); + TNode const receiver_instance_type = + LoadInstanceType(CAST(receiver)); GotoIf(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE), &if_receiver_valid); Branch( @@ -2204,8 +2297,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&if_receiver_valid); // Check if the {receiver} is exhausted. - VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant()); - VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant()); + TVARIABLE(Oddball, var_done, TrueConstant()); + TVARIABLE(Object, var_value, UndefinedConstant()); Label return_value(this, {&var_done, &var_value}), return_entry(this), return_end(this, Label::kDeferred); @@ -2216,14 +2309,14 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { TransitionAndUpdate(CAST(receiver)); // Read the next entry from the {table}, skipping holes. - Node* entry_key; - Node* entry_start_position; + TNode entry_key; + TNode entry_start_position; std::tie(entry_key, entry_start_position, index) = NextSkipHoles(table, index, &return_end); StoreObjectFieldNoWriteBarrier(receiver, JSSetIterator::kIndexOffset, SmiTag(index)); - var_value.Bind(entry_key); - var_done.Bind(FalseConstant()); + var_value = entry_key; + var_done = FalseConstant(); // Check how to return the {key} (depending on {receiver} type). Branch(InstanceTypeEqual(receiver_instance_type, JS_SET_VALUE_ITERATOR_TYPE), @@ -2231,8 +2324,8 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { BIND(&return_entry); { - Node* result = AllocateJSIteratorResultForEntry(context, var_value.value(), - var_value.value()); + TNode result = AllocateJSIteratorResultForEntry( + context, var_value.value(), var_value.value()); Return(result); } @@ -2253,14 +2346,14 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) { template void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex( - Node* const table, Node* const key, Node* const context, Variable* result, - Label* if_entry_found, Label* if_not_found) { + const TNode table, const TNode key, + TVariable* result, Label* if_entry_found, Label* if_not_found) { Label if_key_smi(this), if_key_string(this), if_key_heap_number(this), if_key_bigint(this); GotoIf(TaggedIsSmi(key), &if_key_smi); - TNode key_map = LoadMap(key); + TNode key_map = LoadMap(CAST(key)); TNode key_instance_type = LoadMapInstanceType(key_map); GotoIf(IsStringInstanceType(key_instance_type), &if_key_string); @@ -2268,44 +2361,42 @@ void CollectionsBuiltinsAssembler::TryLookupOrderedHashTableIndex( GotoIf(IsBigIntInstanceType(key_instance_type), &if_key_bigint); FindOrderedHashTableEntryForOtherKey( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); BIND(&if_key_smi); { FindOrderedHashTableEntryForSmiKey( - table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } BIND(&if_key_string); { FindOrderedHashTableEntryForStringKey( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } BIND(&if_key_heap_number); { FindOrderedHashTableEntryForHeapNumberKey( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } BIND(&if_key_bigint); { FindOrderedHashTableEntryForBigIntKey( - context, table, key, result, if_entry_found, if_not_found); + table, CAST(key), result, if_entry_found, if_not_found); } } TF_BUILTIN(FindOrderedHashMapEntry, CollectionsBuiltinsAssembler) { - Node* const table = Parameter(Descriptor::kTable); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode table = CAST(Parameter(Descriptor::kTable)); + const TNode key = CAST(Parameter(Descriptor::kKey)); - VARIABLE(entry_start_position, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + TVARIABLE(IntPtrT, entry_start_position, IntPtrConstant(0)); Label entry_found(this), not_found(this); TryLookupOrderedHashTableIndex( - table, key, context, &entry_start_position, &entry_found, ¬_found); + table, key, &entry_start_position, &entry_found, ¬_found); BIND(&entry_found); Return(SmiTag(entry_start_position.value())); @@ -2324,8 +2415,8 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { TNode key, TNode value, TNode number_of_elements); - TNode AllocateTable(Variant variant, TNode context, - TNode at_least_space_for) override; + TNode AllocateTable(Variant variant, + TNode at_least_space_for) override; // Generates and sets the identity for a JSRececiver. TNode CreateIdentityHash(TNode receiver); @@ -2390,9 +2481,8 @@ void WeakCollectionsBuiltinsAssembler::AddEntry( SmiFromIntPtr(number_of_elements), SKIP_WRITE_BARRIER); } -TNode WeakCollectionsBuiltinsAssembler::AllocateTable( - Variant variant, TNode context, - TNode at_least_space_for) { +TNode WeakCollectionsBuiltinsAssembler::AllocateTable( + Variant variant, TNode at_least_space_for) { // See HashTable::New(). CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(0), at_least_space_for)); @@ -2446,8 +2536,7 @@ TNode WeakCollectionsBuiltinsAssembler::FindKeyIndex( TVARIABLE(IntPtrT, var_entry, WordAnd(key_hash, entry_mask)); TVARIABLE(IntPtrT, var_count, IntPtrConstant(0)); - Variable* loop_vars[] = {&var_count, &var_entry}; - Label loop(this, arraysize(loop_vars), loop_vars), if_found(this); + Label loop(this, {&var_count, &var_entry}), if_found(this); Goto(&loop); BIND(&loop); TNode key_index; @@ -2631,9 +2720,9 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) { } TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); Label return_undefined(this); @@ -2653,9 +2742,9 @@ TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) { } TF_BUILTIN(WeakMapPrototypeHas, WeakCollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); Label return_false(this); @@ -2817,9 +2906,9 @@ TF_BUILTIN(WeakSetPrototypeDelete, CodeStubAssembler) { } TF_BUILTIN(WeakSetPrototypeHas, WeakCollectionsBuiltinsAssembler) { - Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const key = Parameter(Descriptor::kKey); - Node* const context = Parameter(Descriptor::kContext); + const TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + const TNode key = CAST(Parameter(Descriptor::kKey)); + const TNode context = CAST(Parameter(Descriptor::kContext)); Label return_false(this); diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h index 2bde108e9aeffa..a132557e3cd0a4 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.h +++ b/deps/v8/src/builtins/builtins-collections-gen.h @@ -11,13 +11,13 @@ namespace v8 { namespace internal { void BranchIfIterableWithOriginalKeyOrValueMapIterator( - compiler::CodeAssemblerState* state, compiler::TNode iterable, - compiler::TNode context, compiler::CodeAssemblerLabel* if_true, + compiler::CodeAssemblerState* state, TNode iterable, + TNode context, compiler::CodeAssemblerLabel* if_true, compiler::CodeAssemblerLabel* if_false); void BranchIfIterableWithOriginalValueSetIterator( - compiler::CodeAssemblerState* state, compiler::TNode iterable, - compiler::TNode context, compiler::CodeAssemblerLabel* if_true, + compiler::CodeAssemblerState* state, TNode iterable, + TNode context, compiler::CodeAssemblerLabel* if_true, compiler::CodeAssemblerLabel* if_false); } // namespace internal diff --git a/deps/v8/src/builtins/builtins-console-gen.cc b/deps/v8/src/builtins/builtins-console-gen.cc index 1d6a22f61118f6..8a4c8b83da35d0 100644 --- a/deps/v8/src/builtins/builtins-console-gen.cc +++ b/deps/v8/src/builtins/builtins-console-gen.cc @@ -15,15 +15,13 @@ TF_BUILTIN(FastConsoleAssert, CodeStubAssembler) { Label runtime(this); Label out(this); - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. TNode argc = UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); - Node* context = Parameter(Descriptor::kContext); - Node* new_target = Parameter(Descriptor::kJSNewTarget); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode new_target = CAST(Parameter(Descriptor::kJSNewTarget)); GotoIf(Word32Equal(argc, Int32Constant(0)), &runtime); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); BranchIfToBooleanIsTrue(args.AtIndex(0), &out, &runtime); BIND(&out); args.PopAndReturn(UndefinedConstant()); diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc index 28c9261ed41fdc..bc743b6e70a8d2 100644 --- a/deps/v8/src/builtins/builtins-console.cc +++ b/deps/v8/src/builtins/builtins-console.cc @@ -39,8 +39,7 @@ namespace internal { namespace { void ConsoleCall( - Isolate* isolate, - internal::BuiltinArguments& args, // NOLINT(runtime/references) + Isolate* isolate, const internal::BuiltinArguments& args, void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&, const v8::debug::ConsoleContext&)) { CHECK(!isolate->has_pending_exception()); diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index 856718cedfbf0a..bc03e86f791d46 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -57,12 +57,11 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) { using Node = compiler::Node; TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { - Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo); - Node* feedback_cell = Parameter(Descriptor::kFeedbackCell); - Node* context = Parameter(Descriptor::kContext); - - CSA_ASSERT(this, IsFeedbackCell(feedback_cell)); - CSA_ASSERT(this, IsSharedFunctionInfo(shared_function_info)); + TNode shared_function_info = + CAST(Parameter(Descriptor::kSharedFunctionInfo)); + TNode feedback_cell = + CAST(Parameter(Descriptor::kFeedbackCell)); + TNode context = CAST(Parameter(Descriptor::kContext)); IncrementCounter(isolate()->counters()->fast_new_closure_total(), 1); @@ -90,9 +89,8 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { // The calculation of |function_map_index| must be in sync with // SharedFunctionInfo::function_map_index(). - Node* const flags = - LoadObjectField(shared_function_info, SharedFunctionInfo::kFlagsOffset, - MachineType::Uint32()); + TNode flags = LoadObjectField( + shared_function_info, SharedFunctionInfo::kFlagsOffset); TNode const function_map_index = Signed(IntPtrAdd( DecodeWordFromWord32(flags), IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX))); @@ -161,7 +159,7 @@ TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) { TailCallRuntime(Runtime::kNewObject, context, target, new_target); } -compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( +TNode ConstructorBuiltinsAssembler::EmitFastNewObject( SloppyTNode context, SloppyTNode target, SloppyTNode new_target) { TVARIABLE(JSObject, var_obj); @@ -178,7 +176,7 @@ compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( return var_obj.value(); } -compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( +TNode ConstructorBuiltinsAssembler::EmitFastNewObject( SloppyTNode context, SloppyTNode target, SloppyTNode new_target, Label* call_runtime) { // Verify that the new target is a JSFunction. @@ -202,17 +200,17 @@ compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset); GotoIf(TaggedNotEqual(target, new_target_constructor), call_runtime); - VARIABLE(properties, MachineRepresentation::kTagged); + TVARIABLE(HeapObject, properties); Label instantiate_map(this), allocate_properties(this); GotoIf(IsDictionaryMap(initial_map), &allocate_properties); { - properties.Bind(EmptyFixedArrayConstant()); + properties = EmptyFixedArrayConstant(); Goto(&instantiate_map); } BIND(&allocate_properties); { - properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity)); + properties = AllocateNameDictionary(NameDictionary::kInitialCapacity); Goto(&instantiate_map); } @@ -221,11 +219,12 @@ compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( kNone, kWithSlackTracking); } -Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( - Node* scope_info, Node* slots_uint32, Node* context, ScopeType scope_type) { - TNode slots = Signed(ChangeUint32ToWord(slots_uint32)); - TNode size = ElementOffsetFromIndex( - slots, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::kTodoHeaderSize); +TNode ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( + TNode scope_info, TNode slots, TNode context, + ScopeType scope_type) { + TNode slots_intptr = Signed(ChangeUint32ToWord(slots)); + TNode size = ElementOffsetFromIndex(slots_intptr, PACKED_ELEMENTS, + Context::kTodoHeaderSize); // Create a new closure from the given function info in new space TNode function_context = @@ -246,7 +245,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( StoreMapNoWriteBarrier(function_context, context_type); TNode min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS); // TODO(ishell): for now, length also includes MIN_CONTEXT_SLOTS. - TNode length = IntPtrAdd(slots, min_context_slots); + TNode length = IntPtrAdd(slots_intptr, min_context_slots); StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset, SmiTag(length)); StoreObjectFieldNoWriteBarrier(function_context, Context::kScopeInfoOffset, @@ -263,60 +262,60 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( TNode undefined = UndefinedConstant(); TNode start_offset = IntPtrConstant(Context::kTodoHeaderSize); CodeStubAssembler::VariableList vars(0, zone()); - BuildFastLoop( + BuildFastLoop( vars, start_offset, size, - [=](Node* offset) { - StoreObjectFieldNoWriteBarrier( - function_context, UncheckedCast(offset), undefined); + [=](TNode offset) { + StoreObjectFieldNoWriteBarrier(function_context, offset, undefined); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); return function_context; } TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) { - Node* scope_info = Parameter(Descriptor::kScopeInfo); - Node* slots = Parameter(Descriptor::kSlots); - Node* context = Parameter(Descriptor::kContext); + TNode scope_info = CAST(Parameter(Descriptor::kScopeInfo)); + TNode slots = UncheckedCast(Parameter(Descriptor::kSlots)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(EmitFastNewFunctionContext(scope_info, slots, context, ScopeType::EVAL_SCOPE)); } TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) { - Node* scope_info = Parameter(Descriptor::kScopeInfo); - Node* slots = Parameter(Descriptor::kSlots); - Node* context = Parameter(Descriptor::kContext); + TNode scope_info = CAST(Parameter(Descriptor::kScopeInfo)); + TNode slots = UncheckedCast(Parameter(Descriptor::kSlots)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(EmitFastNewFunctionContext(scope_info, slots, context, ScopeType::FUNCTION_SCOPE)); } -Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( - Node* feedback_vector, Node* slot, Node* pattern, Node* flags, - Node* context) { +TNode ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( + TNode maybe_feedback_vector, TNode slot, + TNode pattern, TNode flags, TNode context) { Label call_runtime(this, Label::kDeferred), end(this); - GotoIf(IsUndefined(feedback_vector), &call_runtime); + GotoIf(IsUndefined(maybe_feedback_vector), &call_runtime); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(JSRegExp, result); + TNode feedback_vector = CAST(maybe_feedback_vector); TNode literal_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); GotoIf(NotHasBoilerplate(literal_site), &call_runtime); { - Node* boilerplate = literal_site; - CSA_ASSERT(this, IsJSRegExp(boilerplate)); + TNode boilerplate = CAST(literal_site); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kTaggedSize; TNode copy = Allocate(size); for (int offset = 0; offset < size; offset += kTaggedSize) { TNode value = LoadObjectField(boilerplate, offset); StoreObjectFieldNoWriteBarrier(copy, offset, value); } - result.Bind(copy); + result = CAST(copy); Goto(&end); } BIND(&call_runtime); { - result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, - feedback_vector, SmiTag(slot), pattern, flags)); + result = CAST(CallRuntime(Runtime::kCreateRegExpLiteral, context, + maybe_feedback_vector, SmiTag(Signed(slot)), + pattern, flags)); Goto(&end); } @@ -325,25 +324,26 @@ Node* ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( } TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) { - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* pattern = Parameter(Descriptor::kPattern); - Node* flags = Parameter(Descriptor::kFlags); - Node* context = Parameter(Descriptor::kContext); - Node* result = - EmitCreateRegExpLiteral(feedback_vector, slot, pattern, flags, context); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode pattern = CAST(Parameter(Descriptor::kPattern)); + TNode flags = CAST(Parameter(Descriptor::kFlags)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode result = EmitCreateRegExpLiteral(maybe_feedback_vector, slot, + pattern, flags, context); Return(result); } -Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( - Node* feedback_vector, Node* slot, Node* context, Label* call_runtime, +TNode ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( + TNode feedback_vector, TNode slot, + TNode context, Label* call_runtime, AllocationSiteMode allocation_site_mode) { Label zero_capacity(this), cow_elements(this), fast_elements(this), return_result(this); - VARIABLE(result, MachineRepresentation::kTagged); TNode maybe_allocation_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime); TNode allocation_site = CAST(maybe_allocation_site); @@ -358,10 +358,12 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( } TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) { - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* constant_elements = Parameter(Descriptor::kConstantElements); - Node* context = Parameter(Descriptor::kContext); + TNode feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode constant_elements = + CAST(Parameter(Descriptor::kConstantElements)); + TNode context = CAST(Parameter(Descriptor::kContext)); Label call_runtime(this, Label::kDeferred); Return(EmitCreateShallowArrayLiteral(feedback_vector, slot, context, &call_runtime, @@ -373,16 +375,18 @@ TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) { int const flags = AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow; Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector, - SmiTag(slot), constant_elements, SmiConstant(flags))); + SmiTag(Signed(slot)), constant_elements, + SmiConstant(flags))); } } -Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( - Node* feedback_vector, Node* slot, Node* context) { +TNode ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( + TNode feedback_vector, TNode slot, + TNode context) { // Array literals always have a valid AllocationSite to properly track // elements transitions. TNode maybe_allocation_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); TVARIABLE(AllocationSite, allocation_site); Label create_empty_array(this), @@ -396,7 +400,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( BIND(&initialize_allocation_site); { allocation_site = - CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot)); + CreateAllocationSiteInFeedbackVector(feedback_vector, slot); Goto(&create_empty_array); } @@ -418,17 +422,20 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( } TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) { - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* context = Parameter(Descriptor::kContext); - Node* result = EmitCreateEmptyArrayLiteral(feedback_vector, slot, context); + TNode feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode result = + EmitCreateEmptyArrayLiteral(feedback_vector, slot, context); Return(result); } -Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( - Node* feedback_vector, Node* slot, Label* call_runtime) { +TNode ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( + TNode feedback_vector, TNode slot, + Label* call_runtime) { TNode maybe_allocation_site = - CAST(LoadFeedbackVectorSlot(feedback_vector, slot, 0, INTPTR_PARAMETERS)); + CAST(LoadFeedbackVectorSlot(feedback_vector, slot)); GotoIf(NotHasBoilerplate(maybe_allocation_site), call_runtime); TNode allocation_site = CAST(maybe_allocation_site); @@ -436,7 +443,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( TNode boilerplate_map = LoadMap(boilerplate); CSA_ASSERT(this, IsJSObjectMap(boilerplate_map)); - VARIABLE(var_properties, MachineRepresentation::kTagged); + TVARIABLE(FixedArray, var_properties); { TNode bit_field_3 = LoadMapBitField3(boilerplate_map); GotoIf(IsSetWord32(bit_field_3), call_runtime); @@ -447,8 +454,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( BIND(&if_dictionary); { Comment("Copy dictionary properties"); - var_properties.Bind(CopyNameDictionary( - CAST(LoadSlowProperties(boilerplate)), call_runtime)); + var_properties = CopyNameDictionary(CAST(LoadSlowProperties(boilerplate)), + call_runtime); // Slow objects have no in-object properties. Goto(&done); } @@ -458,13 +465,13 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( TNode boilerplate_properties = LoadFastProperties(boilerplate); GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime); - var_properties.Bind(EmptyFixedArrayConstant()); + var_properties = EmptyFixedArrayConstant(); Goto(&done); } BIND(&done); } - VARIABLE(var_elements, MachineRepresentation::kTagged); + TVARIABLE(FixedArrayBase, var_elements); { // Copy the elements backing store, assuming that it's flat. Label if_empty_fixed_array(this), if_copy_elements(this), done(this); @@ -473,7 +480,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( &if_copy_elements); BIND(&if_empty_fixed_array); - var_elements.Bind(boilerplate_elements); + var_elements = boilerplate_elements; Goto(&done); BIND(&if_copy_elements); @@ -483,7 +490,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( flags |= ExtractFixedArrayFlag::kAllFixedArrays; flags |= ExtractFixedArrayFlag::kNewSpaceAllocationOnly; flags |= ExtractFixedArrayFlag::kDontCopyCOW; - var_elements.Bind(CloneFixedArray(boilerplate_elements, flags)); + var_elements = CloneFixedArray(boilerplate_elements, flags); Goto(&done); BIND(&done); } @@ -563,18 +570,18 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( BIND(&continue_with_write_barrier); { Comment("Copy in-object properties slow"); - BuildFastLoop( + BuildFastLoop( offset.value(), instance_size, - [=](Node* offset) { + [=](TNode offset) { // TODO(ishell): value decompression is not necessary here. TNode field = LoadObjectField(boilerplate, offset); StoreObjectFieldNoWriteBarrier(copy, offset, field); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); Comment("Copy mutable HeapNumber values"); - BuildFastLoop( + BuildFastLoop( offset.value(), instance_size, - [=](Node* offset) { + [=](TNode offset) { TNode field = LoadObjectField(copy, offset); Label copy_heap_number(this, Label::kDeferred), continue_loop(this); // We only have to clone complex field values. @@ -593,7 +600,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( } BIND(&continue_loop); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); Goto(&done_init); } BIND(&done_init); @@ -603,29 +610,30 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) { Label call_runtime(this); - Node* feedback_vector = Parameter(Descriptor::kFeedbackVector); - TNode slot = SmiUntag(Parameter(Descriptor::kSlot)); - Node* copy = + TNode feedback_vector = + CAST(Parameter(Descriptor::kFeedbackVector)); + TNode slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot))); + TNode copy = EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime); Return(copy); BIND(&call_runtime); - Node* object_boilerplate_description = - Parameter(Descriptor::kObjectBoilerplateDescription); - Node* flags = Parameter(Descriptor::kFlags); - Node* context = Parameter(Descriptor::kContext); + TNode object_boilerplate_description = + CAST(Parameter(Descriptor::kObjectBoilerplateDescription)); + TNode flags = CAST(Parameter(Descriptor::kFlags)); + TNode context = CAST(Parameter(Descriptor::kContext)); TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, - SmiTag(slot), object_boilerplate_description, flags); + SmiTag(Signed(slot)), object_boilerplate_description, flags); } // Used by the CreateEmptyObjectLiteral bytecode and the Object constructor. -Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral( - Node* context) { +TNode ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral( + TNode context) { TNode native_context = LoadNativeContext(context); TNode object_function = CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); - TNode map = CAST(LoadObjectField( - object_function, JSFunction::kPrototypeOrInitialMapOffset)); + TNode map = LoadObjectField( + object_function, JSFunction::kPrototypeOrInitialMapOffset); // Ensure that slack tracking is disabled for the map. STATIC_ASSERT(Map::kNoSlackTracking == 0); CSA_ASSERT( @@ -642,10 +650,10 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { TNode argc = ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode new_target = CAST(Parameter(Descriptor::kJSNewTarget)); - VARIABLE(var_result, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result); Label if_subclass(this, Label::kDeferred), if_notsubclass(this), return_result(this); GotoIf(IsUndefined(new_target), &if_notsubclass); @@ -654,9 +662,8 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { BIND(&if_subclass); { - TNode result = + var_result = CallBuiltin(Builtins::kFastNewObject, context, target, new_target); - var_result.Bind(result); Goto(&return_result); } @@ -672,15 +679,13 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { BIND(&if_newobject); { - Node* result = EmitCreateEmptyObjectLiteral(context); - var_result.Bind(result); + var_result = EmitCreateEmptyObjectLiteral(context); Goto(&return_result); } BIND(&if_toobject); { - TNode result = CallBuiltin(Builtins::kToObject, context, value); - var_result.Bind(result); + var_result = CallBuiltin(Builtins::kToObject, context, value); Goto(&return_result); } } @@ -691,13 +696,13 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { // ES #sec-number-constructor TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode argc = ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); // 1. If no arguments were passed to this function invocation, let n be +0. - VARIABLE(var_n, MachineRepresentation::kTagged, SmiConstant(0)); + TVARIABLE(Number, var_n, SmiConstant(0)); Label if_nloaded(this, &var_n); GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &if_nloaded); @@ -706,14 +711,14 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { // b. If Type(prim) is BigInt, let n be the Number value for prim. // c. Otherwise, let n be prim. TNode value = args.AtIndex(0); - var_n.Bind(ToNumber(context, value, BigIntHandling::kConvertToNumber)); + var_n = ToNumber(context, value, BigIntHandling::kConvertToNumber); Goto(&if_nloaded); BIND(&if_nloaded); { // 3. If NewTarget is undefined, return n. - Node* n_value = var_n.value(); - Node* new_target = Parameter(Descriptor::kJSNewTarget); + TNode n_value = var_n.value(); + TNode new_target = CAST(Parameter(Descriptor::kJSNewTarget)); Label return_n(this), constructnumber(this, Label::kDeferred); Branch(IsUndefined(new_target), &return_n, &constructnumber); @@ -740,7 +745,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { } TF_BUILTIN(GenericLazyDeoptContinuation, ConstructorBuiltinsAssembler) { - Node* result = Parameter(Descriptor::kResult); + TNode result = CAST(Parameter(Descriptor::kResult)); Return(result); } diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h index 9208506c79eced..761a6c7adbc0a6 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.h +++ b/deps/v8/src/builtins/builtins-constructor-gen.h @@ -15,21 +15,28 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler { explicit ConstructorBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context, - ScopeType scope_type); - - Node* EmitCreateRegExpLiteral(Node* feedback_vector, Node* slot, - Node* pattern, Node* flags, Node* context); - Node* EmitCreateShallowArrayLiteral(Node* feedback_vector, Node* slot, - Node* context, Label* call_runtime, - AllocationSiteMode allocation_site_mode); - - Node* EmitCreateEmptyArrayLiteral(Node* feedback_vector, Node* slot, - Node* context); - - Node* EmitCreateShallowObjectLiteral(Node* feedback_vector, Node* slot, - Label* call_runtime); - Node* EmitCreateEmptyObjectLiteral(Node* context); + TNode EmitFastNewFunctionContext(TNode scope_info, + TNode slots, + TNode context, + ScopeType scope_type); + + TNode EmitCreateRegExpLiteral( + TNode maybe_feedback_vector, TNode slot, + TNode pattern, TNode flags, TNode context); + + TNode EmitCreateShallowArrayLiteral( + TNode feedback_vector, TNode slot, + TNode context, Label* call_runtime, + AllocationSiteMode allocation_site_mode); + + TNode EmitCreateEmptyArrayLiteral( + TNode feedback_vector, TNode slot, + TNode context); + + TNode EmitCreateShallowObjectLiteral( + TNode feedback_vector, TNode slot, + Label* call_runtime); + TNode EmitCreateEmptyObjectLiteral(TNode context); TNode EmitFastNewObject(SloppyTNode context, SloppyTNode target, diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index 8a0c73b29288af..1666cbf6acc41f 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -18,16 +18,17 @@ class ConversionBuiltinsAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: - void Generate_NonPrimitiveToPrimitive(Node* context, Node* input, + void Generate_NonPrimitiveToPrimitive(TNode context, + TNode input, ToPrimitiveHint hint); - void Generate_OrdinaryToPrimitive(Node* context, Node* input, + void Generate_OrdinaryToPrimitive(TNode context, TNode input, OrdinaryToPrimitiveHint hint); }; // ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] ) void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( - Node* context, Node* input, ToPrimitiveHint hint) { + TNode context, TNode input, ToPrimitiveHint hint) { // Lookup the @@toPrimitive property on the {input}. TNode exotic_to_prim = GetProperty(context, input, factory()->to_primitive_symbol()); @@ -42,14 +43,14 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined); TNode hint_string = HeapConstant(factory()->ToPrimitiveHintString(hint)); - Node* result = + TNode result = CallJS(callable, context, exotic_to_prim, input, hint_string); // Verify that the {result} is actually a primitive. Label if_resultisprimitive(this), if_resultisnotprimitive(this, Label::kDeferred); GotoIf(TaggedIsSmi(result), &if_resultisprimitive); - TNode result_instance_type = LoadInstanceType(result); + TNode result_instance_type = LoadInstanceType(CAST(result)); Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive, &if_resultisnotprimitive); @@ -78,22 +79,22 @@ void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( } TF_BUILTIN(NonPrimitiveToPrimitive_Default, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kDefault); } TF_BUILTIN(NonPrimitiveToPrimitive_Number, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kNumber); } TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kString); } @@ -105,22 +106,22 @@ TF_BUILTIN(StringToNumber, CodeStubAssembler) { } TF_BUILTIN(ToName, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); - VARIABLE(var_input, MachineRepresentation::kTagged, input); + TVARIABLE(Object, var_input, input); Label loop(this, &var_input); Goto(&loop); BIND(&loop); { // Load the current {input} value. - Node* input = var_input.value(); + TNode input = var_input.value(); // Dispatch based on the type of the {input.} Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this), if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred); GotoIf(TaggedIsSmi(input), &if_inputisnumber); - TNode input_instance_type = LoadInstanceType(input); + TNode input_instance_type = LoadInstanceType(CAST(input)); STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname); GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver); @@ -151,7 +152,7 @@ TF_BUILTIN(ToName, CodeStubAssembler) { { // Just return the {input}'s string representation. CSA_ASSERT(this, IsOddballInstanceType(input_instance_type)); - Return(LoadObjectField(input, Oddball::kToStringOffset)); + Return(LoadObjectField(CAST(input), Oddball::kToStringOffset)); } BIND(&if_inputisreceiver); @@ -159,23 +160,23 @@ TF_BUILTIN(ToName, CodeStubAssembler) { // Convert the JSReceiver {input} to a primitive first, // and then run the loop again with the new {input}, // which is then a primitive value. - var_input.Bind(CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String, - context, input)); + var_input = CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String, + context, input); Goto(&loop); } } } TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Return(NonNumberToNumber(context, input)); } TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Return(NonNumberToNumeric(context, input)); } @@ -191,16 +192,19 @@ TF_BUILTIN(ToNumeric, CodeStubAssembler) { // ES6 section 7.1.3 ToNumber ( argument ) TF_BUILTIN(ToNumber, CodeStubAssembler) { + // TODO(solanes, v8:6949): Changing this to a TNode crashes with the + // empty context. Context might not be needed, but it is propagated all over + // the place and hard to pull out. Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode input = CAST(Parameter(Descriptor::kArgument)); Return(ToNumber(context, input)); } // Like ToNumber, but also converts BigInts. TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Return(ToNumber(context, input, BigIntHandling::kConvertToNumber)); } @@ -214,8 +218,8 @@ TF_BUILTIN(NumberToString, CodeStubAssembler) { // 7.1.1.1 OrdinaryToPrimitive ( O, hint ) void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( - Node* context, Node* input, OrdinaryToPrimitiveHint hint) { - VARIABLE(var_result, MachineRepresentation::kTagged); + TNode context, TNode input, OrdinaryToPrimitiveHint hint) { + TVARIABLE(Object, var_result); Label return_result(this, &var_result); Handle method_names[2]; @@ -246,12 +250,12 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( // Call the {method} on the {input}. Callable callable = CodeFactory::Call( isolate(), ConvertReceiverMode::kNotNullOrUndefined); - Node* result = CallJS(callable, context, method, input); - var_result.Bind(result); + TNode result = CallJS(callable, context, method, input); + var_result = result; // Return the {result} if it is a primitive. GotoIf(TaggedIsSmi(result), &return_result); - TNode result_instance_type = LoadInstanceType(result); + TNode result_instance_type = LoadInstanceType(CAST(result)); GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result); } @@ -267,22 +271,22 @@ void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( } TF_BUILTIN(OrdinaryToPrimitive_Number, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Generate_OrdinaryToPrimitive(context, input, OrdinaryToPrimitiveHint::kNumber); } TF_BUILTIN(OrdinaryToPrimitive_String, ConversionBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Generate_OrdinaryToPrimitive(context, input, OrdinaryToPrimitiveHint::kString); } // ES6 section 7.1.2 ToBoolean ( argument ) TF_BUILTIN(ToBoolean, CodeStubAssembler) { - Node* value = Parameter(Descriptor::kArgument); + TNode value = CAST(Parameter(Descriptor::kArgument)); Label return_true(this), return_false(this); BranchIfToBooleanIsTrue(value, &return_true, &return_false); @@ -298,7 +302,7 @@ TF_BUILTIN(ToBoolean, CodeStubAssembler) { // Requires parameter on stack so that it can be used as a continuation from a // LAZY deopt. TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) { - Node* value = Parameter(Descriptor::kArgument); + TNode value = CAST(Parameter(Descriptor::kArgument)); Label return_true(this), return_false(this); BranchIfToBooleanIsTrue(value, &return_true, &return_false); @@ -311,11 +315,10 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) { } TF_BUILTIN(ToLength, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); // We might need to loop once for ToNumber conversion. - VARIABLE(var_len, MachineRepresentation::kTagged, - Parameter(Descriptor::kArgument)); + TVARIABLE(Object, var_len, CAST(Parameter(Descriptor::kArgument))); Label loop(this, &var_len); Goto(&loop); BIND(&loop); @@ -325,7 +328,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { return_zero(this, Label::kDeferred); // Load the current {len} value. - Node* len = var_len.value(); + TNode len = var_len.value(); // Check if {len} is a positive Smi. GotoIf(TaggedIsPositiveSmi(len), &return_len); @@ -334,14 +337,16 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { GotoIf(TaggedIsSmi(len), &return_zero); // Check if {len} is a HeapNumber. + TNode len_heap_object = CAST(len); Label if_lenisheapnumber(this), if_lenisnotheapnumber(this, Label::kDeferred); - Branch(IsHeapNumber(len), &if_lenisheapnumber, &if_lenisnotheapnumber); + Branch(IsHeapNumber(len_heap_object), &if_lenisheapnumber, + &if_lenisnotheapnumber); BIND(&if_lenisheapnumber); { // Load the floating-point value of {len}. - TNode len_value = LoadHeapNumberValue(len); + TNode len_value = LoadHeapNumberValue(len_heap_object); // Check if {len} is not greater than zero. GotoIfNot(Float64GreaterThan(len_value, Float64Constant(0.0)), @@ -361,7 +366,7 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { BIND(&if_lenisnotheapnumber); { // Need to convert {len} to a Number first. - var_len.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, len)); + var_len = CallBuiltin(Builtins::kNonNumberToNumber, context, len); Goto(&loop); } @@ -377,15 +382,15 @@ TF_BUILTIN(ToLength, CodeStubAssembler) { } TF_BUILTIN(ToInteger, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Return(ToInteger(context, input, kNoTruncation)); } TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kArgument)); Return(ToInteger(context, input, kTruncateMinusZero)); } @@ -396,15 +401,14 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { if_noconstructor(this, Label::kDeferred), if_wrapjs_primitive_wrapper(this); - Node* context = Parameter(Descriptor::kContext); - Node* object = Parameter(Descriptor::kArgument); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode object = CAST(Parameter(Descriptor::kArgument)); - VARIABLE(constructor_function_index_var, - MachineType::PointerRepresentation()); + TVARIABLE(IntPtrT, constructor_function_index_var); GotoIf(TaggedIsSmi(object), &if_smi); - TNode map = LoadMap(object); + TNode map = LoadMap(CAST(object)); TNode instance_type = LoadMapInstanceType(map); GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver); @@ -413,12 +417,12 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { GotoIf(WordEqual(constructor_function_index, IntPtrConstant(Map::kNoConstructorFunctionIndex)), &if_noconstructor); - constructor_function_index_var.Bind(constructor_function_index); + constructor_function_index_var = constructor_function_index; Goto(&if_wrapjs_primitive_wrapper); BIND(&if_smi); - constructor_function_index_var.Bind( - IntPtrConstant(Context::NUMBER_FUNCTION_INDEX)); + constructor_function_index_var = + IntPtrConstant(Context::NUMBER_FUNCTION_INDEX); Goto(&if_wrapjs_primitive_wrapper); BIND(&if_wrapjs_primitive_wrapper); @@ -449,7 +453,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { // ES6 section 12.5.5 typeof operator TF_BUILTIN(Typeof, CodeStubAssembler) { - Node* object = Parameter(Descriptor::kObject); + TNode object = CAST(Parameter(Descriptor::kObject)); Return(Typeof(object)); } diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc index 97600efaa49098..98c1343d2c8f8e 100644 --- a/deps/v8/src/builtins/builtins-date-gen.cc +++ b/deps/v8/src/builtins/builtins-date-gen.cc @@ -18,23 +18,23 @@ class DateBuiltinsAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: - void Generate_DatePrototype_GetField(Node* context, Node* receiver, - int field_index); + void Generate_DatePrototype_GetField(TNode context, + TNode receiver, int field_index); }; -void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, - Node* receiver, - int field_index) { +void DateBuiltinsAssembler::Generate_DatePrototype_GetField( + TNode context, TNode receiver, int field_index) { Label receiver_not_date(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &receiver_not_date); - TNode receiver_instance_type = LoadInstanceType(receiver); + TNode receiver_instance_type = LoadInstanceType(CAST(receiver)); GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_DATE_TYPE), &receiver_not_date); + TNode date_receiver = CAST(receiver); // Load the specified date field, falling back to the runtime as necessary. if (field_index == JSDate::kDateValue) { - Return(LoadObjectField(receiver, JSDate::kValueOffset)); + Return(LoadObjectField(date_receiver, JSDate::kValueOffset)); } else { if (field_index < JSDate::kFirstUncachedField) { Label stamp_mismatch(this, Label::kDeferred); @@ -42,9 +42,9 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, ExternalConstant(ExternalReference::date_cache_stamp(isolate()))); TNode cache_stamp = - LoadObjectField(receiver, JSDate::kCacheStampOffset); + LoadObjectField(date_receiver, JSDate::kCacheStampOffset); GotoIf(TaggedNotEqual(date_cache_stamp, cache_stamp), &stamp_mismatch); - Return(LoadObjectField(receiver, + Return(LoadObjectField(date_receiver, JSDate::kValueOffset + field_index * kTaggedSize)); BIND(&stamp_mismatch); @@ -53,10 +53,10 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, TNode field_index_smi = SmiConstant(field_index); TNode function = ExternalConstant(ExternalReference::get_date_field_function()); - Node* result = CallCFunction( + TNode result = CAST(CallCFunction( function, MachineType::AnyTagged(), - std::make_pair(MachineType::AnyTagged(), receiver), - std::make_pair(MachineType::AnyTagged(), field_index_smi)); + std::make_pair(MachineType::AnyTagged(), date_receiver), + std::make_pair(MachineType::AnyTagged(), field_index_smi))); Return(result); } @@ -66,128 +66,128 @@ void DateBuiltinsAssembler::Generate_DatePrototype_GetField(Node* context, } TF_BUILTIN(DatePrototypeGetDate, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDay); } TF_BUILTIN(DatePrototypeGetDay, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekday); } TF_BUILTIN(DatePrototypeGetFullYear, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kYear); } TF_BUILTIN(DatePrototypeGetHours, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kHour); } TF_BUILTIN(DatePrototypeGetMilliseconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecond); } TF_BUILTIN(DatePrototypeGetMinutes, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMinute); } TF_BUILTIN(DatePrototypeGetMonth, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMonth); } TF_BUILTIN(DatePrototypeGetSeconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kSecond); } TF_BUILTIN(DatePrototypeGetTime, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue); } TF_BUILTIN(DatePrototypeGetTimezoneOffset, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kTimezoneOffset); } TF_BUILTIN(DatePrototypeGetUTCDate, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDayUTC); } TF_BUILTIN(DatePrototypeGetUTCDay, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kWeekdayUTC); } TF_BUILTIN(DatePrototypeGetUTCFullYear, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kYearUTC); } TF_BUILTIN(DatePrototypeGetUTCHours, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kHourUTC); } TF_BUILTIN(DatePrototypeGetUTCMilliseconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMillisecondUTC); } TF_BUILTIN(DatePrototypeGetUTCMinutes, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMinuteUTC); } TF_BUILTIN(DatePrototypeGetUTCMonth, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kMonthUTC); } TF_BUILTIN(DatePrototypeGetUTCSeconds, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kSecondUTC); } TF_BUILTIN(DatePrototypeValueOf, DateBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Generate_DatePrototype_GetField(context, receiver, JSDate::kDateValue); } TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); TNode hint = CAST(Parameter(Descriptor::kHint)); // Check if the {receiver} is actually a JSReceiver. Label receiver_is_invalid(this, Label::kDeferred); GotoIf(TaggedIsSmi(receiver), &receiver_is_invalid); - GotoIfNot(IsJSReceiver(receiver), &receiver_is_invalid); + GotoIfNot(IsJSReceiver(CAST(receiver)), &receiver_is_invalid); // Dispatch to the appropriate OrdinaryToPrimitive builtin. Label hint_is_number(this), hint_is_string(this), diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc index c3e7601832148f..258b1022da3c60 100644 --- a/deps/v8/src/builtins/builtins-date.cc +++ b/deps/v8/src/builtins/builtins-date.cc @@ -854,16 +854,18 @@ BUILTIN(DatePrototypeToLocaleDateString) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleDateString); - CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleDateString"); + const char* method = "Date.prototype.toLocaleDateString"; + CHECK_RECEIVER(JSDate, date, method); RETURN_RESULT_OR_FAILURE( isolate, JSDateTimeFormat::ToLocaleDateTime( isolate, - date, // date - args.atOrUndefined(isolate, 1), // locales - args.atOrUndefined(isolate, 2), // options - JSDateTimeFormat::RequiredOption::kDate, // required - JSDateTimeFormat::DefaultsOption::kDate)); // defaults + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kDate, // required + JSDateTimeFormat::DefaultsOption::kDate, // defaults + method)); // method } // ecma402 #sup-date.prototype.tolocalestring @@ -872,16 +874,18 @@ BUILTIN(DatePrototypeToLocaleString) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleString); - CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleString"); + const char* method = "Date.prototype.toLocaleString"; + CHECK_RECEIVER(JSDate, date, method); RETURN_RESULT_OR_FAILURE( isolate, JSDateTimeFormat::ToLocaleDateTime( isolate, - date, // date - args.atOrUndefined(isolate, 1), // locales - args.atOrUndefined(isolate, 2), // options - JSDateTimeFormat::RequiredOption::kAny, // required - JSDateTimeFormat::DefaultsOption::kAll)); // defaults + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kAny, // required + JSDateTimeFormat::DefaultsOption::kAll, // defaults + method)); // method } // ecma402 #sup-date.prototype.tolocaletimestring @@ -890,16 +894,18 @@ BUILTIN(DatePrototypeToLocaleTimeString) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleTimeString); - CHECK_RECEIVER(JSDate, date, "Date.prototype.toLocaleTimeString"); + const char* method = "Date.prototype.toLocaleTimeString"; + CHECK_RECEIVER(JSDate, date, method); RETURN_RESULT_OR_FAILURE( isolate, JSDateTimeFormat::ToLocaleDateTime( isolate, - date, // date - args.atOrUndefined(isolate, 1), // locales - args.atOrUndefined(isolate, 2), // options - JSDateTimeFormat::RequiredOption::kTime, // required - JSDateTimeFormat::DefaultsOption::kTime)); // defaults + date, // date + args.atOrUndefined(isolate, 1), // locales + args.atOrUndefined(isolate, 2), // options + JSDateTimeFormat::RequiredOption::kTime, // required + JSDateTimeFormat::DefaultsOption::kTime, // defaults + method)); // method } #endif // V8_INTL_SUPPORT diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 95f5273f14f7fc..2489538d192826 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -102,7 +102,6 @@ namespace internal { ASM(ResumeGeneratorTrampoline, ResumeGenerator) \ \ /* String helpers */ \ - TFC(StringCharAt, StringAt) \ TFC(StringCodePointAt, StringAt) \ TFC(StringFromCodePointAt, StringAtAsString) \ TFC(StringEqual, Compare) \ @@ -219,9 +218,7 @@ namespace internal { TFH(KeyedLoadIC_Slow, LoadWithVector) \ TFH(KeyedStoreIC_Megamorphic, Store) \ TFH(KeyedStoreIC_Slow, StoreWithVector) \ - TFH(LoadGlobalIC_Slow, LoadWithVector) \ TFH(LoadIC_FunctionPrototype, LoadWithVector) \ - TFH(LoadIC_Slow, LoadWithVector) \ TFH(LoadIC_StringLength, LoadWithVector) \ TFH(LoadIC_StringWrapperLength, LoadWithVector) \ TFH(LoadIC_NoFeedback, Load) \ @@ -230,7 +227,6 @@ namespace internal { TFH(StoreInArrayLiteralIC_Slow, StoreWithVector) \ TFH(KeyedLoadIC_SloppyArguments, LoadWithVector) \ TFH(LoadIndexedInterceptorIC, LoadWithVector) \ - TFH(StoreInterceptorIC, StoreWithVector) \ TFH(KeyedStoreIC_SloppyArguments_Standard, StoreWithVector) \ TFH(KeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW, StoreWithVector) \ TFH(KeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB, StoreWithVector) \ @@ -568,6 +564,9 @@ namespace internal { SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ CPP(AsyncFunctionConstructor) \ \ + /* Iterator Protocol */ \ + TFC(GetIteratorWithFeedbackLazyDeoptContinuation, GetIteratorStackParameter) \ + \ /* Global object */ \ CPP(GlobalDecodeURI) \ CPP(GlobalDecodeURIComponent) \ @@ -616,6 +615,10 @@ namespace internal { TFS(IterableToList, kIterable, kIteratorFn) \ TFS(IterableToListWithSymbolLookup, kIterable) \ TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \ + TFS(IterableToFixedArrayForWasm, kIterable, kExpectedLength) \ + \ + /* #sec-createstringlistfromiterable */ \ + TFS(StringListFromIterable, kIterable) \ \ /* Map */ \ TFS(FindOrderedHashMapEntry, kTable, kKey) \ @@ -845,28 +848,13 @@ namespace internal { CPP(RegExpLeftContextGetter) \ /* ES #sec-regexp.prototype.compile */ \ TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags) \ - /* ES #sec-regexp.prototype.exec */ \ - TFJ(RegExpPrototypeExec, 1, kReceiver, kString) \ - /* https://tc39.github.io/proposal-string-matchall/ */ \ - TFJ(RegExpPrototypeMatchAll, 1, kReceiver, kString) \ - /* ES #sec-regexp.prototype-@@search */ \ - TFJ(RegExpPrototypeSearch, 1, kReceiver, kString) \ CPP(RegExpPrototypeToString) \ CPP(RegExpRightContextGetter) \ \ - /* ES #sec-regexp.prototype-@@split */ \ - TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* RegExp helpers */ \ TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \ TFS(RegExpExecInternal, kRegExp, kString, kLastIndex, kMatchInfo) \ ASM(RegExpInterpreterTrampoline, CCall) \ - TFS(RegExpPrototypeExecSlow, kReceiver, kString) \ - TFS(RegExpSearchFast, kReceiver, kPattern) \ - TFS(RegExpSplit, kRegExp, kString, kLimit) \ - \ - /* RegExp String Iterator */ \ - /* https://tc39.github.io/proposal-string-matchall/ */ \ - TFJ(RegExpStringIteratorPrototypeNext, 0, kReceiver) \ \ /* Set */ \ TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ @@ -1117,7 +1105,6 @@ namespace internal { TFS(SetProperty, kReceiver, kKey, kValue) \ TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \ ASM(MemCopyUint8Uint8, CCall) \ - ASM(MemCopyUint16Uint8, CCall) \ ASM(MemMove, CCall) \ \ /* Trace */ \ @@ -1131,7 +1118,14 @@ namespace internal { CPP(FinalizationGroupRegister) \ CPP(FinalizationGroupUnregister) \ CPP(WeakRefConstructor) \ - CPP(WeakRefDeref) + CPP(WeakRefDeref) \ + \ + /* Async modules */ \ + TFJ(AsyncModuleEvaluate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + \ + /* CallAsyncModule* are spec anonymyous functions */ \ + CPP(CallAsyncModuleFulfilled) \ + CPP(CallAsyncModuleRejected) #ifdef V8_INTL_SUPPORT #define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \ diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc index ee1f67d43428b6..f0853e9bd979dc 100644 --- a/deps/v8/src/builtins/builtins-function-gen.cc +++ b/deps/v8/src/builtins/builtins-function-gen.cc @@ -15,14 +15,12 @@ namespace internal { TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { Label slow(this); - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. TNode argc = UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); - Node* context = Parameter(Descriptor::kContext); - Node* new_target = Parameter(Descriptor::kJSNewTarget); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode new_target = CAST(Parameter(Descriptor::kJSNewTarget)); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); // Check that receiver has instance type of JS_FUNCTION_TYPE TNode receiver = args.GetReceiver(); @@ -85,21 +83,20 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // Choose the right bound function map based on whether the target is // constructable. Comment("Choose the right bound function map"); - VARIABLE(bound_function_map, MachineRepresentation::kTagged); + TVARIABLE(Map, bound_function_map); { Label with_constructor(this); - VariableList vars({&bound_function_map}, zone()); TNode native_context = LoadNativeContext(context); - Label map_done(this, vars); + Label map_done(this, &bound_function_map); GotoIf(IsConstructorMap(receiver_map), &with_constructor); - bound_function_map.Bind(LoadContextElement( + bound_function_map = CAST(LoadContextElement( native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX)); Goto(&map_done); BIND(&with_constructor); - bound_function_map.Bind(LoadContextElement( + bound_function_map = CAST(LoadContextElement( native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX)); Goto(&map_done); @@ -115,30 +112,28 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // Allocate the arguments array. Comment("Allocate the arguments array"); - VARIABLE(argument_array, MachineRepresentation::kTagged); + TVARIABLE(FixedArray, argument_array); { Label empty_arguments(this); Label arguments_done(this, &argument_array); GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments); TNode elements_length = Signed(ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1))))); - TNode elements = CAST(AllocateFixedArray( - PACKED_ELEMENTS, elements_length, kAllowLargeObjectAllocation)); - VARIABLE(index, MachineType::PointerRepresentation()); - index.Bind(IntPtrConstant(0)); + argument_array = CAST(AllocateFixedArray(PACKED_ELEMENTS, elements_length, + kAllowLargeObjectAllocation)); + TVARIABLE(IntPtrT, index, IntPtrConstant(0)); VariableList foreach_vars({&index}, zone()); args.ForEach( foreach_vars, - [this, elements, &index](Node* arg) { - StoreFixedArrayElement(elements, index.value(), arg); + [&](TNode arg) { + StoreFixedArrayElement(argument_array.value(), index.value(), arg); Increment(&index); }, IntPtrConstant(1)); - argument_array.Bind(elements); Goto(&arguments_done); BIND(&empty_arguments); - argument_array.Bind(EmptyFixedArrayConstant()); + argument_array = EmptyFixedArrayConstant(); Goto(&arguments_done); BIND(&arguments_done); @@ -146,16 +141,16 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // Determine bound receiver. Comment("Determine bound receiver"); - VARIABLE(bound_receiver, MachineRepresentation::kTagged); + TVARIABLE(Object, bound_receiver); { Label has_receiver(this); Label receiver_done(this, &bound_receiver); GotoIf(Word32NotEqual(argc, Int32Constant(0)), &has_receiver); - bound_receiver.Bind(UndefinedConstant()); + bound_receiver = UndefinedConstant(); Goto(&receiver_done); BIND(&has_receiver); - bound_receiver.Bind(args.AtIndex(0)); + bound_receiver = args.AtIndex(0); Goto(&receiver_done); BIND(&receiver_done); @@ -196,10 +191,10 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { // ES6 #sec-function.prototype-@@hasinstance TF_BUILTIN(FunctionPrototypeHasInstance, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* f = Parameter(Descriptor::kReceiver); - Node* v = Parameter(Descriptor::kV); - Node* result = OrdinaryHasInstance(context, f, v); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode f = CAST(Parameter(Descriptor::kReceiver)); + TNode v = CAST(Parameter(Descriptor::kV)); + TNode result = OrdinaryHasInstance(context, f, v); Return(result); } diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc index d884c417fc04a0..0a4b3b205b9503 100644 --- a/deps/v8/src/builtins/builtins-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-generator-gen.cc @@ -19,19 +19,25 @@ class GeneratorBuiltinsAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: + // Currently, AsyncModules in V8 are built on top of JSAsyncFunctionObjects + // with an initial yield. Thus, we need some way to 'resume' the + // underlying JSAsyncFunctionObject owned by an AsyncModule. To support this + // the body of resume is factored out below, and shared by JSGeneratorObject + // prototype methods as well as AsyncModuleEvaluate. The only difference + // between AsyncModuleEvaluate and JSGeneratorObject::PrototypeNext is + // the expected reciever. + void InnerResume(CodeStubArguments* args, Node* receiver, Node* value, + Node* context, JSGeneratorObject::ResumeMode resume_mode, + char const* const method_name); void GeneratorPrototypeResume(CodeStubArguments* args, Node* receiver, Node* value, Node* context, JSGeneratorObject::ResumeMode resume_mode, char const* const method_name); }; -void GeneratorBuiltinsAssembler::GeneratorPrototypeResume( +void GeneratorBuiltinsAssembler::InnerResume( CodeStubArguments* args, Node* receiver, Node* value, Node* context, JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) { - // Check if the {receiver} is actually a JSGeneratorObject. - ThrowIfNotInstanceType(context, receiver, JS_GENERATOR_OBJECT_TYPE, - method_name); - // Check if the {receiver} is running or already closed. TNode receiver_continuation = CAST(LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset)); @@ -111,17 +117,46 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume( } } +void GeneratorBuiltinsAssembler::GeneratorPrototypeResume( + CodeStubArguments* args, Node* receiver, Node* value, Node* context, + JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) { + // Check if the {receiver} is actually a JSGeneratorObject. + ThrowIfNotInstanceType(context, receiver, JS_GENERATOR_OBJECT_TYPE, + method_name); + InnerResume(args, receiver, value, context, resume_mode, method_name); +} + +TF_BUILTIN(AsyncModuleEvaluate, GeneratorBuiltinsAssembler) { + const int kValueArg = 0; + + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); + CodeStubArguments args(this, argc); + + TNode receiver = args.GetReceiver(); + TNode value = args.GetOptionalArgumentValue(kValueArg); + TNode context = Cast(Parameter(Descriptor::kContext)); + + // AsyncModules act like JSAsyncFunctions. Thus we check here + // that the {receiver} is a JSAsyncFunction. + char const* const method_name = "[AsyncModule].evaluate"; + ThrowIfNotInstanceType(context, receiver, JS_ASYNC_FUNCTION_OBJECT_TYPE, + method_name); + InnerResume(&args, receiver, value, context, JSGeneratorObject::kNext, + method_name); +} + // ES6 #sec-generator.prototype.next TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) { const int kValueArg = 0; - TNode argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); TNode receiver = args.GetReceiver(); TNode value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); GeneratorPrototypeResume(&args, receiver, value, context, JSGeneratorObject::kNext, @@ -132,13 +167,13 @@ TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) { TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) { const int kValueArg = 0; - TNode argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); TNode receiver = args.GetReceiver(); TNode value = args.GetOptionalArgumentValue(kValueArg); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); GeneratorPrototypeResume(&args, receiver, value, context, JSGeneratorObject::kReturn, @@ -149,13 +184,13 @@ TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) { TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) { const int kExceptionArg = 0; - TNode argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); TNode receiver = args.GetReceiver(); TNode exception = args.GetOptionalArgumentValue(kExceptionArg); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); GeneratorPrototypeResume(&args, receiver, exception, context, JSGeneratorObject::kThrow, diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index eae8690f1facd0..ef912eabf1fcf6 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -48,8 +48,8 @@ TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) { } TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) { - Node* value = Parameter(Descriptor::kReceiver); - Node* string = LoadJSPrimitiveWrapperValue(value); + TNode value = CAST(Parameter(Descriptor::kReceiver)); + TNode string = CAST(LoadJSPrimitiveWrapperValue(value)); Return(LoadStringLengthAsSmi(string)); } @@ -388,15 +388,6 @@ TF_BUILTIN(StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) { Generate_StoreFastElementIC(STORE_HANDLE_COW); } -TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) { - Node* name = Parameter(Descriptor::kName); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name, slot, vector); -} - TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); @@ -411,14 +402,6 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) { TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector); } -TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kGetProperty, context, receiver, name); -} - TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); @@ -491,17 +474,6 @@ TF_BUILTIN(KeyedStoreIC_SloppyArguments_NoTransitionHandleCOW, Generate_KeyedStoreIC_SloppyArguments(); } -TF_BUILTIN(StoreInterceptorIC, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context, value, slot, - vector, receiver, name); -} - TF_BUILTIN(LoadIndexedInterceptorIC, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* key = Parameter(Descriptor::kName); diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 445c8c951732c8..0625b8affcd23a 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -18,9 +18,6 @@ namespace v8 { namespace internal { -template -using TNode = compiler::TNode; - // ----------------------------------------------------------------------------- // Stack checks. @@ -32,12 +29,14 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) { // TurboFan support builtins. TF_BUILTIN(CopyFastSmiOrObjectElements, CodeStubAssembler) { - Node* object = Parameter(Descriptor::kObject); + TNode js_object = CAST(Parameter(Descriptor::kObject)); // Load the {object}s elements. - TNode source = LoadObjectField(object, JSObject::kElementsOffset); - Node* target = CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays); - StoreObjectField(object, JSObject::kElementsOffset, target); + TNode source = + CAST(LoadObjectField(js_object, JSObject::kElementsOffset)); + TNode target = + CloneFixedArray(source, ExtractFixedArrayFlag::kFixedArrays); + StoreObjectField(js_object, JSObject::kElementsOffset, target); Return(target); } @@ -47,7 +46,7 @@ TF_BUILTIN(GrowFastDoubleElements, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Label runtime(this, Label::kDeferred); - Node* elements = LoadElements(object); + TNode elements = LoadElements(object); elements = TryGrowElementsCapacity(object, elements, PACKED_DOUBLE_ELEMENTS, key, &runtime); Return(elements); @@ -62,7 +61,7 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Label runtime(this, Label::kDeferred); - Node* elements = LoadElements(object); + TNode elements = LoadElements(object); elements = TryGrowElementsCapacity(object, elements, PACKED_ELEMENTS, key, &runtime); Return(elements); @@ -274,25 +273,24 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET)); } - void CallCFunction1WithCallerSavedRegistersMode(MachineType return_type, - MachineType arg0_type, - Node* function, Node* arg0, - Node* mode, Label* next) { + void CallCFunction2WithCallerSavedRegistersMode( + MachineType return_type, MachineType arg0_type, MachineType arg1_type, + Node* function, Node* arg0, Node* arg1, Node* mode, Label* next) { Label dont_save_fp(this), save_fp(this); Branch(ShouldSkipFPRegs(mode), &dont_save_fp, &save_fp); BIND(&dont_save_fp); { - CallCFunctionWithCallerSavedRegisters(function, return_type, - kDontSaveFPRegs, - std::make_pair(arg0_type, arg0)); + CallCFunctionWithCallerSavedRegisters( + function, return_type, kDontSaveFPRegs, + std::make_pair(arg0_type, arg0), std::make_pair(arg1_type, arg1)); Goto(next); } BIND(&save_fp); { - CallCFunctionWithCallerSavedRegisters(function, return_type, - kSaveFPRegs, - std::make_pair(arg0_type, arg0)); + CallCFunctionWithCallerSavedRegisters(function, return_type, kSaveFPRegs, + std::make_pair(arg0_type, arg0), + std::make_pair(arg1_type, arg1)); Goto(next); } } @@ -321,34 +319,82 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { } } - void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Node* mode, - Label* next) { - TNode store_buffer_top_addr = - ExternalConstant(ExternalReference::store_buffer_top(this->isolate())); - Node* store_buffer_top = - Load(MachineType::Pointer(), store_buffer_top_addr); - StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top, - slot); - TNode new_store_buffer_top = - IntPtrAdd(store_buffer_top, IntPtrConstant(kSystemPointerSize)); - StoreNoWriteBarrier(MachineType::PointerRepresentation(), - store_buffer_top_addr, new_store_buffer_top); - - TNode test = - WordAnd(new_store_buffer_top, - IntPtrConstant(Heap::store_buffer_mask_constant())); - - Label overflow(this); - Branch(IntPtrEqual(test, IntPtrConstant(0)), &overflow, next); - - BIND(&overflow); - { - TNode function = - ExternalConstant(ExternalReference::store_buffer_overflow_function()); - CallCFunction1WithCallerSavedRegistersMode(MachineType::Int32(), - MachineType::Pointer(), - function, isolate, mode, next); - } + void InsertIntoRememberedSetAndGotoSlow(Node* isolate, TNode object, + TNode slot, Node* mode, + Label* next) { + TNode page = PageFromAddress(object); + TNode function = + ExternalConstant(ExternalReference::insert_remembered_set_function()); + CallCFunction2WithCallerSavedRegistersMode( + MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(), + function, page, slot, mode, next); + } + + void InsertIntoRememberedSetAndGoto(Node* isolate, TNode object, + TNode slot, Node* mode, + Label* next) { + Label slow_path(this); + TNode page = PageFromAddress(object); + + // Load address of SlotSet + TNode slot_set_array = LoadSlotSetArray(page, &slow_path); + TNode slot_offset = IntPtrSub(slot, page); + + // Load bucket + TNode bucket = LoadBucket(slot_set_array, slot_offset, &slow_path); + + // Update cell + SetBitInCell(bucket, slot_offset); + + Goto(next); + + BIND(&slow_path); + InsertIntoRememberedSetAndGotoSlow(isolate, object, slot, mode, next); + } + + TNode LoadSlotSetArray(TNode page, Label* slow_path) { + TNode slot_set_array = UncheckedCast( + Load(MachineType::Pointer(), page, + IntPtrConstant(MemoryChunk::kOldToNewSlotSetOffset))); + GotoIf(WordEqual(slot_set_array, IntPtrConstant(0)), slow_path); + + return slot_set_array; + } + + TNode LoadBucket(TNode slot_set_array, + TNode slot_offset, Label* slow_path) { + // Assume here that SlotSet only contains of buckets + DCHECK_EQ(SlotSet::kSize, SlotSet::kBuckets * sizeof(SlotSet::Bucket)); + TNode bucket_index = + WordShr(slot_offset, SlotSet::kBitsPerBucketLog2 + kTaggedSizeLog2); + TNode bucket = UncheckedCast( + Load(MachineType::Pointer(), slot_set_array, + WordShl(bucket_index, kSystemPointerSizeLog2))); + GotoIf(WordEqual(bucket, IntPtrConstant(0)), slow_path); + return bucket; + } + + void SetBitInCell(TNode bucket, TNode slot_offset) { + // Load cell value + TNode cell_offset = WordAnd( + WordShr(slot_offset, SlotSet::kBitsPerCellLog2 + kTaggedSizeLog2 - + SlotSet::kCellSizeBytesLog2), + IntPtrConstant((SlotSet::kCellsPerBucket - 1) + << SlotSet::kCellSizeBytesLog2)); + TNode cell_address = + UncheckedCast(IntPtrAdd(bucket, cell_offset)); + TNode old_cell_value = + ChangeInt32ToIntPtr(Load(cell_address)); + + // Calculate new cell value + TNode bit_index = WordAnd(WordShr(slot_offset, kTaggedSizeLog2), + IntPtrConstant(SlotSet::kBitsPerCell - 1)); + TNode new_cell_value = UncheckedCast( + WordOr(old_cell_value, WordShl(IntPtrConstant(1), bit_index))); + + // Update cell value + StoreNoWriteBarrier(MachineRepresentation::kWord32, cell_address, + TruncateIntPtrToInt32(new_cell_value)); } }; @@ -399,7 +445,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { TNode isolate_constant = ExternalConstant(ExternalReference::isolate_address(isolate())); Node* fp_mode = Parameter(Descriptor::kFPMode); - InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, &exit); + TNode object = + BitcastTaggedToWord(Parameter(Descriptor::kObject)); + InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode, + &exit); } BIND(&store_buffer_incremental_wb); @@ -407,8 +456,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { TNode isolate_constant = ExternalConstant(ExternalReference::isolate_address(isolate())); Node* fp_mode = Parameter(Descriptor::kFPMode); - InsertToStoreBufferAndGoto(isolate_constant, slot, fp_mode, - &incremental_wb); + TNode object = + BitcastTaggedToWord(Parameter(Descriptor::kObject)); + InsertIntoRememberedSetAndGoto(isolate_constant, object, slot, fp_mode, + &incremental_wb); } } @@ -532,8 +583,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { TNode language_mode = CAST(Parameter(Descriptor::kLanguageMode)); TNode context = CAST(Parameter(Descriptor::kContext)); - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged, key); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_index(this), if_unique_name(this), if_notunique(this), if_notfound(this), slow(this), if_proxy(this); @@ -554,8 +605,7 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { BIND(&if_unique_name); { Comment("key is unique name"); - TNode unique = CAST(var_unique.value()); - CheckForAssociatedProtector(unique, &slow); + CheckForAssociatedProtector(var_unique.value(), &slow); Label dictionary(this), dont_delete(this); GotoIf(IsDictionaryMap(receiver_map), &dictionary); @@ -570,8 +620,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { TNode properties = CAST(LoadSlowProperties(CAST(receiver))); - DeleteDictionaryProperty(receiver, properties, unique, context, - &dont_delete, &if_notfound); + DeleteDictionaryProperty(receiver, properties, var_unique.value(), + context, &dont_delete, &if_notfound); } BIND(&dont_delete); @@ -587,7 +637,7 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) { { // If the string was not found in the string table, then no object can // have a property with that name. - TryInternalizeString(key, &if_index, &var_index, &if_unique_name, + TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name, &var_unique, &if_notfound, &slow); } @@ -719,11 +769,11 @@ TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) { } TF_BUILTIN(ForInEnumerate, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* context = Parameter(Descriptor::kContext); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode context = CAST(Parameter(Descriptor::kContext)); Label if_empty(this), if_runtime(this, Label::kDeferred); - Node* receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime); + TNode receiver_map = CheckEnumCache(receiver, &if_empty, &if_runtime); Return(receiver_map); BIND(&if_empty); @@ -934,12 +984,6 @@ void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { } #endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) -#ifndef V8_TARGET_ARCH_ARM -void Builtins::Generate_MemCopyUint16Uint8(MacroAssembler* masm) { - masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); -} -#endif // V8_TARGET_ARCH_ARM - #ifndef V8_TARGET_ARCH_IA32 void Builtins::Generate_MemMove(MacroAssembler* masm) { masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc index 1a9a3b7fd9a822..23305537210fee 100644 --- a/deps/v8/src/builtins/builtins-intl-gen.cc +++ b/deps/v8/src/builtins/builtins-intl-gen.cc @@ -17,9 +17,6 @@ namespace v8 { namespace internal { -template -using TNode = compiler::TNode; - class IntlBuiltinsAssembler : public CodeStubAssembler { public: explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -30,6 +27,16 @@ class IntlBuiltinsAssembler : public CodeStubAssembler { const char* method_name); TNode AllocateEmptyJSArray(TNode context); + + TNode PointerToSeqStringData(TNode seq_string) { + CSA_ASSERT(this, + IsSequentialStringInstanceType(LoadInstanceType(seq_string))); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == + SeqTwoByteString::kHeaderSize); + return IntPtrAdd( + BitcastTaggedToWord(seq_string), + IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + } }; TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { @@ -61,35 +68,35 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { &call_c); { - Node* const dst_ptr = PointerToSeqStringData(dst); - VARIABLE(var_cursor, MachineType::PointerRepresentation(), - IntPtrConstant(0)); + const TNode dst_ptr = PointerToSeqStringData(dst); + TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0)); - TNode const start_address = to_direct.PointerToData(&call_c); + TNode const start_address = + ReinterpretCast(to_direct.PointerToData(&call_c)); TNode const end_address = Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length))); TNode const to_lower_table_addr = ExternalConstant(ExternalReference::intl_to_latin1_lower_table()); - VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0)); + TVARIABLE(Word32T, var_did_change, Int32Constant(0)); VariableList push_vars({&var_cursor, &var_did_change}, zone()); - BuildFastLoop( + BuildFastLoop( push_vars, start_address, end_address, - [=, &var_cursor, &var_did_change](Node* current) { + [&](TNode current) { TNode c = Load(current); TNode lower = Load(to_lower_table_addr, ChangeInt32ToIntPtr(c)); StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr, var_cursor.value(), lower); - var_did_change.Bind( - Word32Or(Word32NotEqual(c, lower), var_did_change.value())); + var_did_change = + Word32Or(Word32NotEqual(c, lower), var_did_change.value()); Increment(&var_cursor); }, - kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kCharSize, IndexAdvanceMode::kPost); // Return the original string if it remained unchanged in order to preserve // e.g. internalization and private symbols (such as the preserved object @@ -110,9 +117,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { MachineType type_tagged = MachineType::AnyTagged(); - Node* const result = CallCFunction(function_addr, type_tagged, - std::make_pair(type_tagged, src), - std::make_pair(type_tagged, dst)); + const TNode result = CAST(CallCFunction( + function_addr, type_tagged, std::make_pair(type_tagged, src), + std::make_pair(type_tagged, dst))); Return(result); } @@ -142,7 +149,7 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode context, TNode argc, Runtime::FunctionId format_func_id, const char* method_name) { - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); // Label has_list(this); // 1. Let lf be this value. @@ -151,32 +158,18 @@ void IntlBuiltinsAssembler::ListFormatCommon(TNode context, // 3. If lf does not have an [[InitializedListFormat]] internal slot, throw a // TypeError exception. - ThrowIfNotInstanceType(context, receiver, JS_INTL_LIST_FORMAT_TYPE, - method_name); + ThrowIfNotInstanceType(context, receiver, JS_LIST_FORMAT_TYPE, method_name); TNode list_format = CAST(receiver); - // 4. If list is not provided or is undefined, then TNode list = args.GetOptionalArgumentValue(0); - Label has_list(this); - { - GotoIfNot(IsUndefined(list), &has_list); - if (format_func_id == Runtime::kFormatList) { - // a. Return an empty String. - args.PopAndReturn(EmptyStringConstant()); - } else { - DCHECK_EQ(format_func_id, Runtime::kFormatListToParts); - // a. Return an empty Array. - args.PopAndReturn(AllocateEmptyJSArray(context)); - } - } - BIND(&has_list); { - // 5. Let x be ? IterableToList(list). - TNode x = - CallBuiltin(Builtins::kIterableToListWithSymbolLookup, context, list); + // 4. Let stringList be ? StringListFromIterable(list). + TNode string_list = + CallBuiltin(Builtins::kStringListFromIterable, context, list); - // 6. Return ? FormatList(lf, x). - args.PopAndReturn(CallRuntime(format_func_id, context, list_format, x)); + // 6. Return ? FormatList(lf, stringList). + args.PopAndReturn( + CallRuntime(format_func_id, context, list_format, string_list)); } } diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index ff8e96f4f512f0..81954a481f0a44 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -83,13 +83,8 @@ BUILTIN(NumberFormatPrototypeFormatToParts) { Handle x; if (args.length() >= 2) { - if (FLAG_harmony_intl_bigint) { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, x, Object::ToNumeric(isolate, args.at(1))); - } else { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, - Object::ToNumber(isolate, args.at(1))); - } + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, + Object::ToNumeric(isolate, args.at(1))); } else { x = isolate->factory()->nan_value(); } @@ -282,8 +277,8 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate, // 3. Perform ? Initialize(Format, locales, options). Handle format; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, format, - T::New(isolate, map, locales, options)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, format, T::New(isolate, map, locales, options, method)); // 4. Let this be the this value. Handle receiver = args.receiver(); @@ -367,7 +362,8 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate, * Common code shared by Collator and V8BreakIterator */ template -Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) { +Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate, + const char* method) { Handle new_target; if (args.new_target()->IsUndefined(isolate)) { @@ -386,7 +382,8 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) { ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); - RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options)); + RETURN_RESULT_OR_FAILURE(isolate, + T::New(isolate, map, locales, options, method)); } } // namespace @@ -466,13 +463,8 @@ BUILTIN(NumberFormatInternalFormatNumber) { // 4. Let x be ? ToNumeric(value). Handle numeric_obj; - if (FLAG_harmony_intl_bigint) { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj, - Object::ToNumeric(isolate, value)); - } else { - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj, - Object::ToNumber(isolate, value)); - } + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, numeric_obj, + Object::ToNumeric(isolate, value)); icu::number::LocalizedNumberFormatter* icu_localized_number_formatter = number_format->icu_number_formatter().raw(); @@ -884,7 +876,7 @@ BUILTIN(CollatorConstructor) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kCollator); - return CallOrConstructConstructor(args, isolate); + return CallOrConstructConstructor(args, isolate, "Intl.Collator"); } BUILTIN(CollatorPrototypeResolvedOptions) { @@ -1069,7 +1061,8 @@ BUILTIN(SegmenterPrototypeSegment) { BUILTIN(V8BreakIteratorConstructor) { HandleScope scope(isolate); - return CallOrConstructConstructor(args, isolate); + return CallOrConstructConstructor(args, isolate, + "Intl.v8BreakIterator"); } BUILTIN(V8BreakIteratorPrototypeResolvedOptions) { diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index 7bd5acfdcda845..2f8761902b5553 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -241,6 +241,104 @@ TF_BUILTIN(IterableToList, IteratorBuiltinsAssembler) { Return(IterableToList(context, iterable, iterator_fn)); } +TF_BUILTIN(IterableToFixedArrayForWasm, IteratorBuiltinsAssembler) { + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode iterable = CAST(Parameter(Descriptor::kIterable)); + TNode expected_length = CAST(Parameter(Descriptor::kExpectedLength)); + + TNode iterator_fn = GetIteratorMethod(context, iterable); + + IteratorRecord iterator_record = GetIterator(context, iterable, iterator_fn); + + GrowableFixedArray values(state()); + + Variable* vars[] = {values.var_array(), values.var_length(), + values.var_capacity()}; + Label loop_start(this, 3, vars), compare_length(this), done(this); + Goto(&loop_start); + BIND(&loop_start); + { + TNode next = + IteratorStep(context, iterator_record, &compare_length); + TNode next_value = IteratorValue(context, next); + values.Push(next_value); + Goto(&loop_start); + } + + BIND(&compare_length); + GotoIf(WordEqual(SmiUntag(expected_length), values.var_length()->value()), + &done); + Return(CallRuntime( + Runtime::kThrowTypeError, context, + SmiConstant(MessageTemplate::kWasmTrapMultiReturnLengthMismatch))); + + BIND(&done); + Return(values.var_array()->value()); +} + +TNode IteratorBuiltinsAssembler::StringListFromIterable( + TNode context, TNode iterable) { + Label done(this); + GrowableFixedArray list(state()); + // 1. If iterable is undefined, then + // a. Return a new empty List. + GotoIf(IsUndefined(iterable), &done); + + // 2. Let iteratorRecord be ? GetIterator(items). + IteratorRecord iterator_record = GetIterator(context, iterable); + + // 3. Let list be a new empty List. + + Variable* vars[] = {list.var_array(), list.var_length(), list.var_capacity()}; + Label loop_start(this, 3, vars); + Goto(&loop_start); + // 4. Let next be true. + // 5. Repeat, while next is not false + Label if_isnotstringtype(this, Label::kDeferred), + if_exception(this, Label::kDeferred); + BIND(&loop_start); + { + // a. Set next to ? IteratorStep(iteratorRecord). + TNode next = IteratorStep(context, iterator_record, &done); + // b. If next is not false, then + // i. Let nextValue be ? IteratorValue(next). + TNode next_value = IteratorValue(context, next); + // ii. If Type(nextValue) is not String, then + GotoIf(TaggedIsSmi(next_value), &if_isnotstringtype); + TNode next_value_type = LoadInstanceType(CAST(next_value)); + GotoIfNot(IsStringInstanceType(next_value_type), &if_isnotstringtype); + // iii. Append nextValue to the end of the List list. + list.Push(next_value); + Goto(&loop_start); + // 5.b.ii + BIND(&if_isnotstringtype); + { + // 1. Let error be ThrowCompletion(a newly created TypeError object). + TVARIABLE(Object, var_exception); + TNode ret = CallRuntime( + Runtime::kThrowTypeError, context, + SmiConstant(MessageTemplate::kIterableYieldedNonString), next_value); + GotoIfException(ret, &if_exception, &var_exception); + Unreachable(); + + // 2. Return ? IteratorClose(iteratorRecord, error). + BIND(&if_exception); + IteratorCloseOnException(context, iterator_record, var_exception.value()); + } + } + + BIND(&done); + // 6. Return list. + return list.ToJSArray(context); +} + +TF_BUILTIN(StringListFromIterable, IteratorBuiltinsAssembler) { + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode iterable = CAST(Parameter(Descriptor::kIterable)); + + Return(StringListFromIterable(context, iterable)); +} + // This builtin always returns a new JSArray and is thus safe to use even in the // presence of code that may call back into user-JS. This builtin will take the // fast path if the iterable is a fast array and the Array prototype and the @@ -354,5 +452,19 @@ TF_BUILTIN(IterableToListWithSymbolLookup, IteratorBuiltinsAssembler) { } } +TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation, + IteratorBuiltinsAssembler) { + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode callSlot = CAST(Parameter(Descriptor::kCallSlot)); + TNode feedback = CAST(Parameter(Descriptor::kFeedback)); + TNode iteratorMethod = CAST(Parameter(Descriptor::kResult)); + + TNode result = + CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver, + iteratorMethod, callSlot, feedback); + Return(result); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h index 2a0a510f738782..7d6e7d5b811c1a 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.h +++ b/deps/v8/src/builtins/builtins-iterator-gen.h @@ -68,6 +68,11 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { TNode IterableToList(TNode context, TNode iterable, TNode iterator_fn); + // Currently at https://tc39.github.io/proposal-intl-list-format/ + // #sec-createstringlistfromiterable + TNode StringListFromIterable(TNode context, + TNode iterable); + void FastIterableToList(TNode context, TNode iterable, TVariable* var_result, Label* slow); }; diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc index 42d0162f388d33..3bae7c06c35b46 100644 --- a/deps/v8/src/builtins/builtins-math-gen.cc +++ b/deps/v8/src/builtins/builtins-math-gen.cc @@ -143,20 +143,18 @@ void MathBuiltinsAssembler::MathRoundingOperation( } void MathBuiltinsAssembler::MathMaxMin( - Node* context, Node* argc, + TNode context, TNode argc, TNode (CodeStubAssembler::*float64op)(SloppyTNode, SloppyTNode), double default_val) { - CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); - argc = arguments.GetLength(INTPTR_PARAMETERS); + CodeStubArguments arguments(this, argc); - VARIABLE(result, MachineRepresentation::kFloat64); - result.Bind(Float64Constant(default_val)); + TVARIABLE(Float64T, result, Float64Constant(default_val)); CodeStubAssembler::VariableList vars({&result}, zone()); - arguments.ForEach(vars, [=, &result](Node* arg) { - Node* float_value = TruncateTaggedToFloat64(context, arg); - result.Bind((this->*float64op)(result.value(), float_value)); + arguments.ForEach(vars, [&](TNode arg) { + TNode float_value = TruncateTaggedToFloat64(context, arg); + result = (this->*float64op)(result.value(), float_value); }); arguments.PopAndReturn(ChangeFloat64ToTagged(result.value())); @@ -181,8 +179,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Node* x = Parameter(Descriptor::kX); Node* y = Parameter(Descriptor::kY); - Node* x_value = TruncateTaggedToWord32(context, x); - Node* y_value = TruncateTaggedToWord32(context, y); + TNode x_value = TruncateTaggedToWord32(context, x); + TNode y_value = TruncateTaggedToWord32(context, y); TNode value = Signed(Int32Mul(x_value, y_value)); TNode result = ChangeInt32ToTagged(value); Return(result); @@ -191,8 +189,8 @@ TF_BUILTIN(MathImul, CodeStubAssembler) { CodeStubAssembler::Node* MathBuiltinsAssembler::MathPow(Node* context, Node* base, Node* exponent) { - Node* base_value = TruncateTaggedToFloat64(context, base); - Node* exponent_value = TruncateTaggedToFloat64(context, exponent); + TNode base_value = TruncateTaggedToFloat64(context, base); + TNode exponent_value = TruncateTaggedToFloat64(context, exponent); TNode value = Float64Pow(base_value, exponent_value); return ChangeFloat64ToTagged(value); } @@ -260,19 +258,17 @@ TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) { // ES6 #sec-math.max TF_BUILTIN(MathMax, MathBuiltinsAssembler) { - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. - Node* context = Parameter(Descriptor::kContext); - Node* argc = Parameter(Descriptor::kJSActualArgumentsCount); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); MathMaxMin(context, argc, &CodeStubAssembler::Float64Max, -1.0 * V8_INFINITY); } // ES6 #sec-math.min TF_BUILTIN(MathMin, MathBuiltinsAssembler) { - // TODO(ishell): use constants from Descriptor once the JSFunction linkage - // arguments are reordered. - Node* context = Parameter(Descriptor::kContext); - Node* argc = Parameter(Descriptor::kJSActualArgumentsCount); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); MathMaxMin(context, argc, &CodeStubAssembler::Float64Min, V8_INFINITY); } diff --git a/deps/v8/src/builtins/builtins-math-gen.h b/deps/v8/src/builtins/builtins-math-gen.h index 4bb76d96922681..4de654fa201609 100644 --- a/deps/v8/src/builtins/builtins-math-gen.h +++ b/deps/v8/src/builtins/builtins-math-gen.h @@ -21,7 +21,7 @@ class MathBuiltinsAssembler : public CodeStubAssembler { void MathRoundingOperation( Node* context, Node* x, TNode (CodeStubAssembler::*float64op)(SloppyTNode)); - void MathMaxMin(Node* context, Node* argc, + void MathMaxMin(TNode context, TNode argc, TNode (CodeStubAssembler::*float64op)( SloppyTNode, SloppyTNode), double default_val); diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc index 427fd6edb65f71..62aee3b300b371 100644 --- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -14,9 +14,6 @@ namespace v8 { namespace internal { -template -using TNode = compiler::TNode; - class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler { public: explicit MicrotaskQueueBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -60,23 +57,20 @@ TNode MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue( TNode MicrotaskQueueBuiltinsAssembler::GetMicrotaskRingBuffer( TNode microtask_queue) { - return UncheckedCast( - Load(MachineType::Pointer(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kRingBufferOffset))); + return Load(microtask_queue, + IntPtrConstant(MicrotaskQueue::kRingBufferOffset)); } TNode MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueCapacity( TNode microtask_queue) { - return UncheckedCast( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kCapacityOffset))); + return Load(microtask_queue, + IntPtrConstant(MicrotaskQueue::kCapacityOffset)); } TNode MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueSize( TNode microtask_queue) { - return UncheckedCast( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kSizeOffset))); + return Load(microtask_queue, + IntPtrConstant(MicrotaskQueue::kSizeOffset)); } void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueSize( @@ -87,9 +81,8 @@ void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueSize( TNode MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueueStart( TNode microtask_queue) { - return UncheckedCast( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kStartOffset))); + return Load(microtask_queue, + IntPtrConstant(MicrotaskQueue::kStartOffset)); } void MicrotaskQueueBuiltinsAssembler::SetMicrotaskQueueStart( @@ -125,7 +118,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( TNode microtask_map = LoadMap(microtask); TNode microtask_type = LoadMapInstanceType(microtask_map); - VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant()); + TVARIABLE(HeapObject, var_exception, TheHoleConstant()); Label if_exception(this, Label::kDeferred); Label is_callable(this), is_callback(this), is_promise_fulfill_reaction_job(this), @@ -295,9 +288,9 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount( TNode microtask_queue) { - TNode count = UncheckedCast( - Load(MachineType::IntPtr(), microtask_queue, - IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset))); + TNode count = Load( + microtask_queue, + IntPtrConstant(MicrotaskQueue::kFinishedMicrotaskCountOffset)); TNode new_count = IntPtrAdd(count, IntPtrConstant(1)); StoreNoWriteBarrier( MachineType::PointerRepresentation(), microtask_queue, @@ -306,6 +299,8 @@ void MicrotaskQueueBuiltinsAssembler::IncrementFinishedMicrotaskCount( TNode MicrotaskQueueBuiltinsAssembler::GetCurrentContext() { auto ref = ExternalReference::Create(kContextAddress, isolate()); + // TODO(delphick): Add a checked cast. For now this is not possible as context + // can actually be Smi(0). return TNode::UncheckedCast(LoadFullTagged(ExternalConstant(ref))); } @@ -317,15 +312,13 @@ void MicrotaskQueueBuiltinsAssembler::SetCurrentContext( TNode MicrotaskQueueBuiltinsAssembler::GetEnteredContextCount() { auto ref = ExternalReference::handle_scope_implementer_address(isolate()); - Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + TNode hsi = Load(ExternalConstant(ref)); using ContextStack = DetachableVector; TNode size_offset = IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset + ContextStack::kSizeOffset); - TNode size = - UncheckedCast(Load(MachineType::IntPtr(), hsi, size_offset)); - return size; + return Load(hsi, size_offset); } void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( @@ -333,7 +326,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( CSA_ASSERT(this, IsNativeContext(native_context)); auto ref = ExternalReference::handle_scope_implementer_address(isolate()); - Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + TNode hsi = Load(ExternalConstant(ref)); using ContextStack = DetachableVector; TNode capacity_offset = @@ -343,10 +336,8 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset + ContextStack::kSizeOffset); - TNode capacity = - UncheckedCast(Load(MachineType::IntPtr(), hsi, capacity_offset)); - TNode size = - UncheckedCast(Load(MachineType::IntPtr(), hsi, size_offset)); + TNode capacity = Load(hsi, capacity_offset); + TNode size = Load(hsi, size_offset); Label if_append(this), if_grow(this, Label::kDeferred), done(this); Branch(WordEqual(size, capacity), &if_grow, &if_append); @@ -355,7 +346,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( TNode data_offset = IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset + ContextStack::kDataOffset); - Node* data = Load(MachineType::Pointer(), hsi, data_offset); + TNode data = Load(hsi, data_offset); StoreFullTaggedNoWriteBarrier(data, TimesSystemPointerSize(size), native_context); @@ -367,7 +358,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( TNode flag_data_offset = IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset + FlagStack::kDataOffset); - Node* flag_data = Load(MachineType::Pointer(), hsi, flag_data_offset); + TNode flag_data = Load(hsi, flag_data_offset); StoreNoWriteBarrier(MachineRepresentation::kWord8, flag_data, size, BoolConstant(true)); StoreNoWriteBarrier( @@ -396,7 +387,7 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext( TNode saved_entered_context_count) { auto ref = ExternalReference::handle_scope_implementer_address(isolate()); - Node* hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + TNode hsi = Load(ExternalConstant(ref)); using ContextStack = DetachableVector; TNode size_offset = @@ -404,8 +395,7 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext( ContextStack::kSizeOffset); #ifdef ENABLE_VERIFY_CSA - TNode size = - UncheckedCast(Load(MachineType::IntPtr(), hsi, size_offset)); + TNode size = Load(hsi, size_offset); CSA_ASSERT(this, IntPtrLessThan(IntPtrConstant(0), size)); CSA_ASSERT(this, IntPtrLessThanOrEqual(saved_entered_context_count, size)); #endif @@ -446,8 +436,7 @@ void MicrotaskQueueBuiltinsAssembler::RunPromiseHook( } TF_BUILTIN(EnqueueMicrotask, MicrotaskQueueBuiltinsAssembler) { - TNode microtask = - UncheckedCast(Parameter(Descriptor::kMicrotask)); + TNode microtask = CAST(Parameter(Descriptor::kMicrotask)); TNode context = CAST(Parameter(Descriptor::kContext)); TNode native_context = LoadNativeContext(context); TNode microtask_queue = GetMicrotaskQueue(native_context); @@ -517,8 +506,7 @@ TF_BUILTIN(RunMicrotasks, MicrotaskQueueBuiltinsAssembler) { TNode offset = CalculateRingBufferOffset(capacity, start, IntPtrConstant(0)); - TNode microtask_pointer = - UncheckedCast(Load(MachineType::Pointer(), ring_buffer, offset)); + TNode microtask_pointer = Load(ring_buffer, offset); TNode microtask = CAST(BitcastWordToTagged(microtask_pointer)); TNode new_size = IntPtrSub(size, IntPtrConstant(1)); diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc index 2aa996eba0dc2f..fc737b793bec26 100644 --- a/deps/v8/src/builtins/builtins-number-gen.cc +++ b/deps/v8/src/builtins/builtins-number-gen.cc @@ -22,57 +22,58 @@ class NumberBuiltinsAssembler : public CodeStubAssembler { protected: template void EmitBitwiseOp(Operation op) { - Node* left = Parameter(Descriptor::kLeft); - Node* right = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); - - VARIABLE(var_left_word32, MachineRepresentation::kWord32); - VARIABLE(var_right_word32, MachineRepresentation::kWord32); - VARIABLE(var_left_bigint, MachineRepresentation::kTagged, left); - VARIABLE(var_right_bigint, MachineRepresentation::kTagged); + TNode left = CAST(Parameter(Descriptor::kLeft)); + TNode right = CAST(Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); + + TVARIABLE(Word32T, var_left_word32); + TVARIABLE(Word32T, var_right_word32); + TVARIABLE(Object, var_left_maybe_bigint, left); + TVARIABLE(Object, var_right_maybe_bigint); Label if_left_number(this), do_number_op(this); Label if_left_bigint(this), do_bigint_op(this); TaggedToWord32OrBigInt(context, left, &if_left_number, &var_left_word32, - &if_left_bigint, &var_left_bigint); + &if_left_bigint, &var_left_maybe_bigint); BIND(&if_left_number); TaggedToWord32OrBigInt(context, right, &do_number_op, &var_right_word32, - &do_bigint_op, &var_right_bigint); + &do_bigint_op, &var_right_maybe_bigint); BIND(&do_number_op); Return(BitwiseOp(var_left_word32.value(), var_right_word32.value(), op)); // BigInt cases. BIND(&if_left_bigint); - TaggedToNumeric(context, right, &do_bigint_op, &var_right_bigint); + TaggedToNumeric(context, right, &do_bigint_op, &var_right_maybe_bigint); BIND(&do_bigint_op); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, - var_left_bigint.value(), var_right_bigint.value(), - SmiConstant(op))); + var_left_maybe_bigint.value(), + var_right_maybe_bigint.value(), SmiConstant(op))); } template void RelationalComparisonBuiltin(Operation op) { - Node* lhs = Parameter(Descriptor::kLeft); - Node* rhs = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode lhs = CAST(Parameter(Descriptor::kLeft)); + TNode rhs = CAST(Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(RelationalComparison(op, lhs, rhs, context)); } template - void UnaryOp(Variable* var_input, Label* do_smi, Label* do_double, - Variable* var_input_double, Label* do_bigint); + void UnaryOp(TVariable* var_input, Label* do_smi, Label* do_double, + TVariable* var_input_double, Label* do_bigint); template - void BinaryOp(Label* smis, Variable* var_left, Variable* var_right, - Label* doubles, Variable* var_left_double, - Variable* var_right_double, Label* bigints); + void BinaryOp(Label* smis, TVariable* var_left, + TVariable* var_right, Label* doubles, + TVariable* var_left_double, + TVariable* var_right_double, Label* bigints); }; // ES6 #sec-number.isfinite TF_BUILTIN(NumberIsFinite, CodeStubAssembler) { - Node* number = Parameter(Descriptor::kNumber); + TNode number = CAST(Parameter(Descriptor::kNumber)); Label return_true(this), return_false(this); @@ -80,10 +81,11 @@ TF_BUILTIN(NumberIsFinite, CodeStubAssembler) { GotoIf(TaggedIsSmi(number), &return_true); // Check if {number} is a HeapNumber. - GotoIfNot(IsHeapNumber(number), &return_false); + TNode number_heap_object = CAST(number); + GotoIfNot(IsHeapNumber(number_heap_object), &return_false); // Check if {number} contains a finite, non-NaN value. - TNode number_value = LoadHeapNumberValue(number); + TNode number_value = LoadHeapNumberValue(number_heap_object); BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false, &return_true); @@ -107,7 +109,7 @@ TF_BUILTIN(NumberIsInteger, CodeStubAssembler) { // ES6 #sec-number.isnan TF_BUILTIN(NumberIsNaN, CodeStubAssembler) { - Node* number = Parameter(Descriptor::kNumber); + TNode number = CAST(Parameter(Descriptor::kNumber)); Label return_true(this), return_false(this); @@ -115,10 +117,11 @@ TF_BUILTIN(NumberIsNaN, CodeStubAssembler) { GotoIf(TaggedIsSmi(number), &return_false); // Check if {number} is a HeapNumber. - GotoIfNot(IsHeapNumber(number), &return_false); + TNode number_heap_object = CAST(number); + GotoIfNot(IsHeapNumber(number_heap_object), &return_false); // Check if {number} contains a NaN value. - TNode number_value = LoadHeapNumberValue(number); + TNode number_value = LoadHeapNumberValue(number_heap_object); BranchIfFloat64IsNaN(number_value, &return_true, &return_false); BIND(&return_true); @@ -136,17 +139,16 @@ TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) { // ES6 #sec-number.parsefloat TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); // We might need to loop once for ToString conversion. - VARIABLE(var_input, MachineRepresentation::kTagged, - Parameter(Descriptor::kString)); + TVARIABLE(Object, var_input, CAST(Parameter(Descriptor::kString))); Label loop(this, &var_input); Goto(&loop); BIND(&loop); { // Load the current {input} value. - Node* input = var_input.value(); + TNode input = var_input.value(); // Check if the {input} is a HeapObject or a Smi. Label if_inputissmi(this), if_inputisnotsmi(this); @@ -161,8 +163,9 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { BIND(&if_inputisnotsmi); { // The {input} is a HeapObject, check if it's already a String. + TNode input_heap_object = CAST(input); Label if_inputisstring(this), if_inputisnotstring(this); - TNode input_map = LoadMap(input); + TNode input_map = LoadMap(input_heap_object); TNode input_instance_type = LoadMapInstanceType(input_map); Branch(IsStringInstanceType(input_instance_type), &if_inputisstring, &if_inputisnotstring); @@ -172,7 +175,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { // The {input} is already a String, check if {input} contains // a cached array index. Label if_inputcached(this), if_inputnotcached(this); - TNode input_hash = LoadNameHashField(input); + TNode input_hash = LoadNameHashField(CAST(input)); Branch(IsClearWord32(input_hash, Name::kDoesNotContainCachedArrayIndexMask), &if_inputcached, &if_inputnotcached); @@ -204,7 +207,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { { // The {input} is already a Number, take care of -0. Label if_inputiszero(this), if_inputisnotzero(this); - TNode input_value = LoadHeapNumberValue(input); + TNode input_value = LoadHeapNumberValue(input_heap_object); Branch(Float64Equal(input_value, Float64Constant(0.0)), &if_inputiszero, &if_inputisnotzero); @@ -219,7 +222,7 @@ TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { { // Need to convert the {input} to String first. // TODO(bmeurer): This could be more efficient if necessary. - var_input.Bind(CallBuiltin(Builtins::kToString, context, input)); + var_input = CallBuiltin(Builtins::kToString, context, input); Goto(&loop); } } @@ -309,9 +312,9 @@ TF_BUILTIN(ParseInt, CodeStubAssembler) { // ES6 #sec-number.parseint TF_BUILTIN(NumberParseInt, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* input = Parameter(Descriptor::kString); - Node* radix = Parameter(Descriptor::kRadix); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode input = CAST(Parameter(Descriptor::kString)); + TNode radix = CAST(Parameter(Descriptor::kRadix)); Return(CallBuiltin(Builtins::kParseInt, context, input, radix)); } @@ -331,27 +334,29 @@ class AddStubAssembler : public CodeStubAssembler { : CodeStubAssembler(state) {} protected: - void ConvertReceiverAndLoop(Variable* var_value, Label* loop, Node* context) { + TNode ConvertReceiver(TNode js_receiver, + TNode context) { // Call ToPrimitive explicitly without hint (whereas ToNumber // would pass a "number" hint). Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); - var_value->Bind(CallStub(callable, context, var_value->value())); - Goto(loop); + return CallStub(callable, context, js_receiver); } - void ConvertNonReceiverAndLoop(Variable* var_value, Label* loop, - Node* context) { - var_value->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, - var_value->value())); + void ConvertNonReceiverAndLoop(TVariable* var_value, Label* loop, + TNode context) { + *var_value = + CallBuiltin(Builtins::kNonNumberToNumeric, context, var_value->value()); Goto(loop); } - void ConvertAndLoop(Variable* var_value, Node* instance_type, Label* loop, - Node* context) { + void ConvertAndLoop(TVariable* var_value, + TNode instance_type, Label* loop, + TNode context) { Label is_not_receiver(this, Label::kDeferred); GotoIfNot(IsJSReceiverInstanceType(instance_type), &is_not_receiver); - ConvertReceiverAndLoop(var_value, loop, context); + *var_value = ConvertReceiver(CAST(var_value->value()), context); + Goto(loop); BIND(&is_not_receiver); ConvertNonReceiverAndLoop(var_value, loop, context); @@ -359,30 +364,26 @@ class AddStubAssembler : public CodeStubAssembler { }; TF_BUILTIN(Add, AddStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_left, MachineRepresentation::kTagged, - Parameter(Descriptor::kLeft)); - VARIABLE(var_right, MachineRepresentation::kTagged, - Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_left, CAST(Parameter(Descriptor::kLeft))); + TVARIABLE(Object, var_right, CAST(Parameter(Descriptor::kRight))); // Shared entry for floating point addition. Label do_double_add(this); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); // We might need to loop several times due to ToPrimitive, ToString and/or // ToNumeric conversions. - VARIABLE(var_result, MachineRepresentation::kTagged); - Variable* loop_vars[2] = {&var_left, &var_right}; - Label loop(this, 2, loop_vars), + Label loop(this, {&var_left, &var_right}), string_add_convert_left(this, Label::kDeferred), string_add_convert_right(this, Label::kDeferred), do_bigint_add(this, Label::kDeferred); Goto(&loop); BIND(&loop); { - Node* left = var_left.value(); - Node* right = var_right.value(); + TNode left = var_left.value(); + TNode right = var_right.value(); Label if_left_smi(this), if_left_heapobject(this); Branch(TaggedIsSmi(left), &if_left_smi, &if_left_heapobject); @@ -395,27 +396,30 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&if_right_smi); { Label if_overflow(this); - TNode result = TrySmiAdd(CAST(left), CAST(right), &if_overflow); + TNode left_smi = CAST(left); + TNode right_smi = CAST(right); + TNode result = TrySmiAdd(left_smi, right_smi, &if_overflow); Return(result); BIND(&if_overflow); { - var_left_double.Bind(SmiToFloat64(left)); - var_right_double.Bind(SmiToFloat64(right)); + var_left_double = SmiToFloat64(left_smi); + var_right_double = SmiToFloat64(right_smi); Goto(&do_double_add); } } // if_right_smi BIND(&if_right_heapobject); { - TNode right_map = LoadMap(right); + TNode right_heap_object = CAST(right); + TNode right_map = LoadMap(right_heap_object); Label if_right_not_number(this, Label::kDeferred); GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number); // {right} is a HeapNumber. - var_left_double.Bind(SmiToFloat64(left)); - var_right_double.Bind(LoadHeapNumberValue(right)); + var_left_double = SmiToFloat64(CAST(left)); + var_right_double = LoadHeapNumberValue(right_heap_object); Goto(&do_double_add); BIND(&if_right_not_number); @@ -431,7 +435,8 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&if_left_heapobject); { - TNode left_map = LoadMap(left); + TNode left_heap_object = CAST(left); + TNode left_map = LoadMap(left_heap_object); Label if_right_smi(this), if_right_heapobject(this); Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject); @@ -441,8 +446,8 @@ TF_BUILTIN(Add, AddStubAssembler) { GotoIfNot(IsHeapNumberMap(left_map), &if_left_not_number); // {left} is a HeapNumber, {right} is a Smi. - var_left_double.Bind(LoadHeapNumberValue(left)); - var_right_double.Bind(SmiToFloat64(right)); + var_left_double = LoadHeapNumberValue(left_heap_object); + var_right_double = SmiToFloat64(CAST(right)); Goto(&do_double_add); BIND(&if_left_not_number); @@ -458,7 +463,8 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&if_right_heapobject); { - TNode right_map = LoadMap(right); + TNode right_heap_object = CAST(right); + TNode right_map = LoadMap(right_heap_object); Label if_left_number(this), if_left_not_number(this, Label::kDeferred); Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number); @@ -469,8 +475,8 @@ TF_BUILTIN(Add, AddStubAssembler) { GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number); // Both {left} and {right} are HeapNumbers. - var_left_double.Bind(LoadHeapNumberValue(left)); - var_right_double.Bind(LoadHeapNumberValue(right)); + var_left_double = LoadHeapNumberValue(CAST(left)); + var_right_double = LoadHeapNumberValue(right_heap_object); Goto(&do_double_add); BIND(&if_right_not_number); @@ -499,7 +505,8 @@ TF_BUILTIN(Add, AddStubAssembler) { GotoIfNot(IsJSReceiverInstanceType(left_instance_type), &if_left_not_receiver); // {left} is a JSReceiver, convert it first. - ConvertReceiverAndLoop(&var_left, &loop, context); + var_left = ConvertReceiver(CAST(var_left.value()), context); + Goto(&loop); BIND(&if_left_bigint); { @@ -515,7 +522,8 @@ TF_BUILTIN(Add, AddStubAssembler) { &if_right_not_receiver); // {left} is a Primitive, but {right} is a JSReceiver, so convert // {right} with priority. - ConvertReceiverAndLoop(&var_right, &loop, context); + var_right = ConvertReceiver(CAST(var_right.value()), context); + Goto(&loop); BIND(&if_right_not_receiver); // Neither {left} nor {right} are JSReceivers. @@ -553,54 +561,46 @@ TF_BUILTIN(Add, AddStubAssembler) { } template -void NumberBuiltinsAssembler::UnaryOp(Variable* var_input, Label* do_smi, - Label* do_double, - Variable* var_input_double, +void NumberBuiltinsAssembler::UnaryOp(TVariable* var_input, + Label* do_smi, Label* do_double, + TVariable* var_input_double, Label* do_bigint) { - DCHECK_EQ(var_input->rep(), MachineRepresentation::kTagged); - DCHECK_IMPLIES(var_input_double != nullptr, - var_input_double->rep() == MachineRepresentation::kFloat64); - - Node* context = Parameter(Descriptor::kContext); - var_input->Bind(Parameter(Descriptor::kValue)); + TNode context = CAST(Parameter(Descriptor::kContext)); + *var_input = CAST(Parameter(Descriptor::kValue)); // We might need to loop for ToNumeric conversion. Label loop(this, {var_input}); Goto(&loop); BIND(&loop); - Node* input = var_input->value(); + TNode input = var_input->value(); Label not_number(this); GotoIf(TaggedIsSmi(input), do_smi); - GotoIfNot(IsHeapNumber(input), ¬_number); + TNode input_heap_object = CAST(input); + GotoIfNot(IsHeapNumber(input_heap_object), ¬_number); if (var_input_double != nullptr) { - var_input_double->Bind(LoadHeapNumberValue(input)); + *var_input_double = LoadHeapNumberValue(input_heap_object); } Goto(do_double); BIND(¬_number); - GotoIf(IsBigInt(input), do_bigint); - var_input->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, input)); + GotoIf(IsBigInt(input_heap_object), do_bigint); + *var_input = CallBuiltin(Builtins::kNonNumberToNumeric, context, input); Goto(&loop); } template -void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left, - Variable* var_right, Label* doubles, - Variable* var_left_double, - Variable* var_right_double, +void NumberBuiltinsAssembler::BinaryOp(Label* smis, TVariable* var_left, + TVariable* var_right, + Label* doubles, + TVariable* var_left_double, + TVariable* var_right_double, Label* bigints) { - DCHECK_EQ(var_left->rep(), MachineRepresentation::kTagged); - DCHECK_EQ(var_right->rep(), MachineRepresentation::kTagged); - DCHECK_IMPLIES(var_left_double != nullptr, - var_left_double->rep() == MachineRepresentation::kFloat64); - DCHECK_IMPLIES(var_right_double != nullptr, - var_right_double->rep() == MachineRepresentation::kFloat64); DCHECK_EQ(var_left_double == nullptr, var_right_double == nullptr); - Node* context = Parameter(Descriptor::kContext); - var_left->Bind(Parameter(Descriptor::kLeft)); - var_right->Bind(Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); + *var_left = CAST(Parameter(Descriptor::kLeft)); + *var_right = CAST(Parameter(Descriptor::kRight)); // We might need to loop for ToNumeric conversions. Label loop(this, {var_left, var_right}); @@ -613,32 +613,36 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left, GotoIf(TaggedIsSmi(var_right->value()), smis); // At this point, var_left is a Smi but var_right is not. - GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number); + TNode var_left_smi = CAST(var_left->value()); + TNode var_right_heap_object = CAST(var_right->value()); + GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number); if (var_left_double != nullptr) { - var_left_double->Bind(SmiToFloat64(var_left->value())); - var_right_double->Bind(LoadHeapNumberValue(var_right->value())); + *var_left_double = SmiToFloat64(var_left_smi); + *var_right_double = LoadHeapNumberValue(var_right_heap_object); } Goto(doubles); BIND(&left_not_smi); { - GotoIfNot(IsHeapNumber(var_left->value()), &left_not_number); + TNode var_left_heap_object = CAST(var_left->value()); + GotoIfNot(IsHeapNumber(var_left_heap_object), &left_not_number); GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi); // At this point, var_left is a HeapNumber and var_right is a Smi. if (var_left_double != nullptr) { - var_left_double->Bind(LoadHeapNumberValue(var_left->value())); - var_right_double->Bind(SmiToFloat64(var_right->value())); + *var_left_double = LoadHeapNumberValue(var_left_heap_object); + *var_right_double = SmiToFloat64(CAST(var_right->value())); } Goto(doubles); } BIND(&right_not_smi); { - GotoIfNot(IsHeapNumber(var_right->value()), &right_not_number); + TNode var_right_heap_object = CAST(var_right->value()); + GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number); if (var_left_double != nullptr) { - var_left_double->Bind(LoadHeapNumberValue(var_left->value())); - var_right_double->Bind(LoadHeapNumberValue(var_right->value())); + *var_left_double = LoadHeapNumberValue(CAST(var_left->value())); + *var_right_double = LoadHeapNumberValue(var_right_heap_object); } Goto(doubles); } @@ -646,37 +650,38 @@ void NumberBuiltinsAssembler::BinaryOp(Label* smis, Variable* var_left, BIND(&left_not_number); { Label left_bigint(this); - GotoIf(IsBigInt(var_left->value()), &left_bigint); - var_left->Bind( - CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value())); + GotoIf(IsBigInt(CAST(var_left->value())), &left_bigint); + *var_left = + CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value()); Goto(&loop); BIND(&left_bigint); { // Jump to {bigints} if {var_right} is already a Numeric. GotoIf(TaggedIsSmi(var_right->value()), bigints); - GotoIf(IsBigInt(var_right->value()), bigints); - GotoIf(IsHeapNumber(var_right->value()), bigints); - var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, - var_right->value())); + TNode var_right_heap_object = CAST(var_right->value()); + GotoIf(IsBigInt(var_right_heap_object), bigints); + GotoIf(IsHeapNumber(var_right_heap_object), bigints); + *var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context, + var_right->value()); Goto(&loop); } } BIND(&right_not_number); { - GotoIf(IsBigInt(var_right->value()), bigints); - var_right->Bind(CallBuiltin(Builtins::kNonNumberToNumeric, context, - var_right->value())); + GotoIf(IsBigInt(CAST(var_right->value())), bigints); + *var_right = + CallBuiltin(Builtins::kNonNumberToNumeric, context, var_right->value()); Goto(&loop); } } TF_BUILTIN(Subtract, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_sub(this), do_double_sub(this), do_bigint_sub(this); BinaryOp(&do_smi_sub, &var_left, &var_right, &do_double_sub, @@ -685,14 +690,15 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) { BIND(&do_smi_sub); { Label if_overflow(this); - TNode result = TrySmiSub(CAST(var_left.value()), - CAST(var_right.value()), &if_overflow); + TNode var_left_smi = CAST(var_left.value()); + TNode var_right_smi = CAST(var_right.value()); + TNode result = TrySmiSub(var_left_smi, var_right_smi, &if_overflow); Return(result); BIND(&if_overflow); { - var_left_double.Bind(SmiToFloat64(var_left.value())); - var_right_double.Bind(SmiToFloat64(var_right.value())); + var_left_double = SmiToFloat64(var_left_smi); + var_right_double = SmiToFloat64(var_right_smi); Goto(&do_double_sub); } } @@ -706,15 +712,15 @@ TF_BUILTIN(Subtract, NumberBuiltinsAssembler) { BIND(&do_bigint_sub); { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kSubtract))); } } TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_input, MachineRepresentation::kTagged); + TNode context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_input); Label do_number(this), do_bigint(this); UnaryOp(&var_input, &do_number, &do_number, nullptr, &do_bigint); @@ -733,8 +739,8 @@ TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) { } TF_BUILTIN(Decrement, NumberBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_input, MachineRepresentation::kTagged); + TNode context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_input); Label do_number(this), do_bigint(this); UnaryOp(&var_input, &do_number, &do_number, nullptr, &do_bigint); @@ -753,8 +759,8 @@ TF_BUILTIN(Decrement, NumberBuiltinsAssembler) { } TF_BUILTIN(Increment, NumberBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - VARIABLE(var_input, MachineRepresentation::kTagged); + TNode context = CAST(Parameter(Descriptor::kContext)); + TVARIABLE(Object, var_input); Label do_number(this), do_bigint(this); UnaryOp(&var_input, &do_number, &do_number, nullptr, &do_bigint); @@ -772,8 +778,8 @@ TF_BUILTIN(Increment, NumberBuiltinsAssembler) { } TF_BUILTIN(Negate, NumberBuiltinsAssembler) { - VARIABLE(var_input, MachineRepresentation::kTagged); - VARIABLE(var_input_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_input); + TVARIABLE(Float64T, var_input_double); Label do_smi(this), do_double(this), do_bigint(this); UnaryOp(&var_input, &do_smi, &do_double, &var_input_double, @@ -791,17 +797,17 @@ TF_BUILTIN(Negate, NumberBuiltinsAssembler) { BIND(&do_bigint); { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(), SmiConstant(Operation::kNegate))); } } TF_BUILTIN(Multiply, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_mul(this), do_double_mul(this), do_bigint_mul(this); BinaryOp(&do_smi_mul, &var_left, &var_right, &do_double_mul, @@ -818,17 +824,17 @@ TF_BUILTIN(Multiply, NumberBuiltinsAssembler) { BIND(&do_bigint_mul); { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kMultiply))); } } TF_BUILTIN(Divide, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_div(this), do_double_div(this), do_bigint_div(this); BinaryOp(&do_smi_div, &var_left, &var_right, &do_double_div, @@ -889,8 +895,8 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) { // division. BIND(&bailout); { - var_left_double.Bind(SmiToFloat64(dividend)); - var_right_double.Bind(SmiToFloat64(divisor)); + var_left_double = SmiToFloat64(dividend); + var_right_double = SmiToFloat64(divisor); Goto(&do_double_div); } } @@ -904,17 +910,17 @@ TF_BUILTIN(Divide, NumberBuiltinsAssembler) { BIND(&do_bigint_div); { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kDivide))); } } TF_BUILTIN(Modulus, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); - VARIABLE(var_left_double, MachineRepresentation::kFloat64); - VARIABLE(var_right_double, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); + TVARIABLE(Float64T, var_left_double); + TVARIABLE(Float64T, var_right_double); Label do_smi_mod(this), do_double_mod(this), do_bigint_mod(this); BinaryOp(&do_smi_mod, &var_left, &var_right, &do_double_mod, @@ -930,17 +936,17 @@ TF_BUILTIN(Modulus, NumberBuiltinsAssembler) { BIND(&do_bigint_mod); { - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), var_right.value(), SmiConstant(Operation::kModulus))); } } TF_BUILTIN(Exponentiate, NumberBuiltinsAssembler) { - VARIABLE(var_left, MachineRepresentation::kTagged); - VARIABLE(var_right, MachineRepresentation::kTagged); + TVARIABLE(Object, var_left); + TVARIABLE(Object, var_right); Label do_number_exp(this), do_bigint_exp(this); - Node* context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); BinaryOp(&do_number_exp, &var_left, &var_right, &do_number_exp, nullptr, nullptr, &do_bigint_exp); @@ -997,9 +1003,9 @@ TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) { } TF_BUILTIN(Equal, CodeStubAssembler) { - Node* lhs = Parameter(Descriptor::kLeft); - Node* rhs = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode lhs = CAST(Parameter(Descriptor::kLeft)); + TNode rhs = CAST(Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(Equal(lhs, rhs, context)); } diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc index d2fb0ff74c3a02..49e7ff27b850d2 100644 --- a/deps/v8/src/builtins/builtins-number.cc +++ b/deps/v8/src/builtins/builtins-number.cc @@ -111,6 +111,7 @@ BUILTIN(NumberPrototypeToFixed) { // ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] ) BUILTIN(NumberPrototypeToLocaleString) { HandleScope scope(isolate); + const char* method = "Number.prototype.toLocaleString"; isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberToLocaleString); @@ -123,17 +124,17 @@ BUILTIN(NumberPrototypeToLocaleString) { // 1. Let x be ? thisNumberValue(this value) if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kNotGeneric, - isolate->factory()->NewStringFromAsciiChecked( - "Number.prototype.toLocaleString"), - isolate->factory()->Number_string())); + isolate, + NewTypeError(MessageTemplate::kNotGeneric, + isolate->factory()->NewStringFromAsciiChecked(method), + isolate->factory()->Number_string())); } #ifdef V8_INTL_SUPPORT RETURN_RESULT_OR_FAILURE( isolate, Intl::NumberToLocaleString(isolate, value, args.atOrUndefined(isolate, 1), - args.atOrUndefined(isolate, 2))); + args.atOrUndefined(isolate, 2), method)); #else // Turn the {value} into a String. return *isolate->factory()->NumberToString(value); diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index db9d4ed6579244..a35990e2f57801 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -22,29 +22,35 @@ namespace internal { // ----------------------------------------------------------------------------- // ES6 section 19.1 Object Objects -using Node = compiler::Node; -template -using TNode = CodeStubAssembler::TNode; - class ObjectBuiltinsAssembler : public CodeStubAssembler { public: explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} protected: - void ReturnToStringFormat(Node* context, Node* string); + void ReturnToStringFormat(TNode context, TNode string); void AddToDictionaryIf(TNode condition, TNode name_dictionary, Handle name, TNode value, Label* bailout); - Node* FromPropertyDescriptor(Node* context, Node* desc); - Node* FromPropertyDetails(Node* context, Node* raw_value, Node* details, - Label* if_bailout); - Node* ConstructAccessorDescriptor(Node* context, Node* getter, Node* setter, - Node* enumerable, Node* configurable); - Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable, - Node* enumerable, Node* configurable); - Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout); + TNode FromPropertyDescriptor(TNode context, + TNode desc); + TNode FromPropertyDetails(TNode context, + TNode raw_value, + TNode details, + Label* if_bailout); + TNode ConstructAccessorDescriptor(TNode context, + TNode getter, + TNode setter, + TNode enumerable, + TNode configurable); + TNode ConstructDataDescriptor(TNode context, + TNode value, + TNode writable, + TNode enumerable, + TNode configurable); + TNode GetAccessorOrUndefined(TNode accessor, + Label* if_bailout); }; class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { @@ -79,8 +85,8 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { TNode size, TNode array_map, Label* if_empty); }; -void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context, - Node* string) { +void ObjectBuiltinsAssembler::ReturnToStringFormat(TNode context, + TNode string) { TNode lhs = StringConstant("[object "); TNode rhs = StringConstant("]"); @@ -90,11 +96,9 @@ void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context, rhs)); } -Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context, - Node* getter, - Node* setter, - Node* enumerable, - Node* configurable) { +TNode ObjectBuiltinsAssembler::ConstructAccessorDescriptor( + TNode context, TNode getter, TNode setter, + TNode enumerable, TNode configurable) { TNode native_context = LoadNativeContext(context); TNode map = CAST(LoadContextElement( native_context, Context::ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX)); @@ -114,11 +118,9 @@ Node* ObjectBuiltinsAssembler::ConstructAccessorDescriptor(Node* context, return js_desc; } -Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context, - Node* value, - Node* writable, - Node* enumerable, - Node* configurable) { +TNode ObjectBuiltinsAssembler::ConstructDataDescriptor( + TNode context, TNode value, TNode writable, + TNode enumerable, TNode configurable) { TNode native_context = LoadNativeContext(context); TNode map = CAST(LoadContextElement( native_context, Context::DATA_PROPERTY_DESCRIPTOR_MAP_INDEX)); @@ -260,10 +262,10 @@ TNode ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0)); TVARIABLE(IntPtrT, var_descriptor_number, IntPtrConstant(0)); - Variable* vars[] = {&var_descriptor_number, &var_result_index}; + VariableList vars({&var_descriptor_number, &var_result_index}, zone()); // Let desc be ? O.[[GetOwnProperty]](key). TNode descriptors = LoadMapDescriptors(map); - Label loop(this, 2, vars), after_loop(this), next_descriptor(this); + Label loop(this, vars), after_loop(this), next_descriptor(this); Branch(IntPtrEqual(var_descriptor_number.value(), object_enum_length), &after_loop, &loop); @@ -309,11 +311,10 @@ TNode ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( if (collect_type == CollectType::kEntries) { // Let entry be CreateArrayFromList(« key, value »). - Node* array = nullptr; - Node* elements = nullptr; + TNode array; + TNode elements; std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr, - IntPtrConstant(2)); + PACKED_ELEMENTS, array_map, SmiConstant(2), {}, IntPtrConstant(2)); StoreFixedArrayElement(CAST(elements), 0, next_key, SKIP_WRITE_BARRIER); StoreFixedArrayElement(CAST(elements), 1, value, SKIP_WRITE_BARRIER); value = TNode::UncheckedCast(array); @@ -321,12 +322,12 @@ TNode ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( StoreFixedArrayElement(values_or_entries, var_result_index.value(), value); - Increment(&var_result_index, 1); + Increment(&var_result_index); Goto(&next_descriptor); BIND(&next_descriptor); { - Increment(&var_descriptor_number, 1); + Increment(&var_descriptor_number); Branch(IntPtrEqual(var_result_index.value(), object_enum_length), &after_loop, &loop); } @@ -366,9 +367,9 @@ TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) { } TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kReceiver); - Node* key = Parameter(Descriptor::kKey); - Node* context = Parameter(Descriptor::kContext); + TNode object = CAST(Parameter(Descriptor::kReceiver)); + TNode key = CAST(Parameter(Descriptor::kKey)); + TNode context = CAST(Parameter(Descriptor::kContext)); Label call_runtime(this), return_true(this), return_false(this), to_primitive(this); @@ -379,12 +380,12 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { Branch(TaggedIsSmi(object), &to_primitive, &if_objectisnotsmi); BIND(&if_objectisnotsmi); - TNode map = LoadMap(object); + TNode map = LoadMap(CAST(object)); TNode instance_type = LoadMapInstanceType(map); { - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_index(this), if_unique_name(this), if_notunique_name(this); TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, @@ -407,7 +408,7 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { BIND(&if_notunique_name); { Label not_in_string_table(this); - TryInternalizeString(key, &if_index, &var_index, &if_unique_name, + TryInternalizeString(CAST(key), &if_index, &var_index, &if_unique_name, &var_unique, ¬_in_string_table, &call_runtime); BIND(¬_in_string_table); @@ -422,7 +423,7 @@ TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { } BIND(&to_primitive); GotoIf(IsNumber(key), &return_false); - Branch(IsName(key), &return_false, &call_runtime); + Branch(IsName(CAST(key)), &return_false, &call_runtime); BIND(&return_true); Return(TrueConstant()); @@ -454,7 +455,7 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { // second argument. // 4. For each element nextSource of sources, in ascending index order, args.ForEach( - [=](Node* next_source) { + [=](TNode next_source) { CallBuiltin(Builtins::kSetDataProperties, context, to, next_source); }, IntPtrConstant(1)); @@ -467,17 +468,18 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) { // ES #sec-object.keys TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode object = CAST(Parameter(Descriptor::kObject)); + TNode context = CAST(Parameter(Descriptor::kContext)); - VARIABLE(var_length, MachineRepresentation::kTagged); - VARIABLE(var_elements, MachineRepresentation::kTagged); + TVARIABLE(Smi, var_length); + TVARIABLE(FixedArrayBase, var_elements); Label if_empty(this, Label::kDeferred), if_empty_elements(this), if_fast(this), if_slow(this, Label::kDeferred), if_join(this); // Check if the {object} has a usable enum cache. GotoIf(TaggedIsSmi(object), &if_slow); - TNode object_map = LoadMap(object); + + TNode object_map = LoadMap(CAST(object)); TNode object_bit_field3 = LoadMapBitField3(object_map); TNode object_enum_length = DecodeWordFromWord32(object_bit_field3); @@ -487,7 +489,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { // Ensure that the {object} doesn't have any elements. CSA_ASSERT(this, IsJSObjectMap(object_map)); - TNode object_elements = LoadElements(object); + TNode object_elements = LoadElements(CAST(object)); GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, &if_slow); @@ -500,20 +502,20 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { { // The {object} has a usable enum cache, use that. TNode object_descriptors = LoadMapDescriptors(object_map); - TNode object_enum_cache = CAST( - LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset)); + TNode object_enum_cache = LoadObjectField( + object_descriptors, DescriptorArray::kEnumCacheOffset); TNode object_enum_keys = LoadObjectField(object_enum_cache, EnumCache::kKeysOffset); // Allocate a JSArray and copy the elements from the {object_enum_keys}. - Node* array = nullptr; - Node* elements = nullptr; + TNode array; + TNode elements; TNode native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); TNode array_length = SmiTag(Signed(object_enum_length)); std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length, + PACKED_ELEMENTS, array_map, array_length, {}, object_enum_length, INTPTR_PARAMETERS); CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements, object_enum_length, SKIP_WRITE_BARRIER); @@ -523,8 +525,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { BIND(&if_empty); { // The {object} doesn't have any enumerable keys. - var_length.Bind(SmiConstant(0)); - var_elements.Bind(EmptyFixedArrayConstant()); + var_length = SmiConstant(0); + var_elements = EmptyFixedArrayConstant(); Goto(&if_join); } @@ -533,8 +535,8 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { // Let the runtime compute the elements. TNode elements = CAST(CallRuntime(Runtime::kObjectKeys, context, object)); - var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset)); - var_elements.Bind(elements); + var_length = LoadObjectField(elements, FixedArray::kLengthOffset); + var_elements = elements; Goto(&if_join); } @@ -544,19 +546,19 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - TNode array = AllocateJSArray( - array_map, CAST(var_elements.value()), CAST(var_length.value())); + TNode array = + AllocateJSArray(array_map, var_elements.value(), var_length.value()); Return(array); } } // ES #sec-object.getOwnPropertyNames TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode object = CAST(Parameter(Descriptor::kObject)); + TNode context = CAST(Parameter(Descriptor::kContext)); - VARIABLE(var_length, MachineRepresentation::kTagged); - VARIABLE(var_elements, MachineRepresentation::kTagged); + TVARIABLE(Smi, var_length); + TVARIABLE(FixedArrayBase, var_elements); Label if_empty(this, Label::kDeferred), if_empty_elements(this), if_fast(this), try_fast(this, Label::kDeferred), if_slow(this, Label::kDeferred), if_join(this); @@ -564,10 +566,11 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { // Take the slow path if the {object} IsCustomElementsReceiverInstanceType or // has any elements. GotoIf(TaggedIsSmi(object), &if_slow); - TNode object_map = LoadMap(object); + + TNode object_map = LoadMap(CAST(object)); TNode instance_type = LoadMapInstanceType(object_map); GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow); - TNode object_elements = LoadElements(object); + TNode object_elements = LoadElements(CAST(object)); GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, &if_slow); @@ -600,14 +603,14 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { LoadObjectField(object_enum_cache, EnumCache::kKeysOffset); // Allocate a JSArray and copy the elements from the {object_enum_keys}. - Node* array = nullptr; - Node* elements = nullptr; TNode native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); TNode array_length = SmiTag(Signed(object_enum_length)); + TNode array; + TNode elements; std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, array_length, nullptr, object_enum_length, + PACKED_ELEMENTS, array_map, array_length, {}, object_enum_length, INTPTR_PARAMETERS); CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements, object_enum_length, SKIP_WRITE_BARRIER); @@ -619,16 +622,16 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { // Let the runtime compute the elements and try initializing enum cache. TNode elements = CAST(CallRuntime( Runtime::kObjectGetOwnPropertyNamesTryFast, context, object)); - var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset)); - var_elements.Bind(elements); + var_length = LoadObjectField(elements, FixedArray::kLengthOffset); + var_elements = elements; Goto(&if_join); } BIND(&if_empty); { // The {object} doesn't have any enumerable keys. - var_length.Bind(SmiConstant(0)); - var_elements.Bind(EmptyFixedArrayConstant()); + var_length = SmiConstant(0); + var_elements = EmptyFixedArrayConstant(); Goto(&if_join); } @@ -637,8 +640,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { // Let the runtime compute the elements. TNode elements = CAST(CallRuntime(Runtime::kObjectGetOwnPropertyNames, context, object)); - var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset)); - var_elements.Bind(elements); + var_length = LoadObjectField(elements, FixedArray::kLengthOffset); + var_elements = elements; Goto(&if_join); } @@ -648,8 +651,8 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - TNode array = AllocateJSArray( - array_map, CAST(var_elements.value()), CAST(var_length.value())); + TNode array = + AllocateJSArray(array_map, var_elements.value(), var_length.value()); Return(array); } } @@ -672,9 +675,9 @@ TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) { // ES #sec-object.prototype.isprototypeof TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* value = Parameter(Descriptor::kValue); - Node* context = Parameter(Descriptor::kContext); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode value = CAST(Parameter(Descriptor::kValue)); + TNode context = CAST(Parameter(Descriptor::kContext)); Label if_receiverisnullorundefined(this, Label::kDeferred), if_valueisnotreceiver(this, Label::kDeferred); @@ -685,31 +688,35 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) { // immediately aborts and returns false anyways. GotoIf(TaggedIsSmi(value), &if_valueisnotreceiver); - // Check if {receiver} is either null or undefined and in that case, - // invoke the ToObject builtin, which raises the appropriate error. - // Otherwise we don't need to invoke ToObject, since {receiver} is - // either already a JSReceiver, in which case ToObject is a no-op, - // or it's a Primitive and ToObject would allocate a fresh JSPrimitiveWrapper - // wrapper, which wouldn't be identical to any existing JSReceiver - // found in the prototype chain of {value}, hence it will return - // false no matter if we search for the Primitive {receiver} or - // a newly allocated JSPrimitiveWrapper wrapper for {receiver}. - GotoIf(IsNull(receiver), &if_receiverisnullorundefined); - GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined); - - // Loop through the prototype chain looking for the {receiver}. - Return(HasInPrototypeChain(context, value, receiver)); - - BIND(&if_receiverisnullorundefined); { - // If {value} is a primitive HeapObject, we need to return - // false instead of throwing an exception per order of the - // steps in the specification, so check that first here. - GotoIfNot(IsJSReceiver(value), &if_valueisnotreceiver); - - // Simulate the ToObject invocation on {receiver}. - ToObject(context, receiver); - Unreachable(); + TNode value_heap_object = CAST(value); + + // Check if {receiver} is either null or undefined and in that case, + // invoke the ToObject builtin, which raises the appropriate error. + // Otherwise we don't need to invoke ToObject, since {receiver} is + // either already a JSReceiver, in which case ToObject is a no-op, + // or it's a Primitive and ToObject would allocate a fresh + // JSPrimitiveWrapper wrapper, which wouldn't be identical to any existing + // JSReceiver found in the prototype chain of {value}, hence it will return + // false no matter if we search for the Primitive {receiver} or + // a newly allocated JSPrimitiveWrapper wrapper for {receiver}. + GotoIf(IsNull(receiver), &if_receiverisnullorundefined); + GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined); + + // Loop through the prototype chain looking for the {receiver}. + Return(HasInPrototypeChain(context, value_heap_object, receiver)); + + BIND(&if_receiverisnullorundefined); + { + // If {value} is a primitive HeapObject, we need to return + // false instead of throwing an exception per order of the + // steps in the specification, so check that first here. + GotoIfNot(IsJSReceiver(value_heap_object), &if_valueisnotreceiver); + + // Simulate the ToObject invocation on {receiver}. + ToObject(context, receiver); + Unreachable(); + } } BIND(&if_valueisnotreceiver); @@ -731,14 +738,18 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { if_regexp(this), if_string(this), if_symbol(this, Label::kDeferred), if_value(this), if_bigint(this, Label::kDeferred); - Node* receiver = Parameter(Descriptor::kReceiver); - Node* context = Parameter(Descriptor::kContext); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode context = CAST(Parameter(Descriptor::kContext)); + + TVARIABLE(String, var_default); + TVARIABLE(HeapObject, var_holder); // This is arranged to check the likely cases first. - VARIABLE(var_default, MachineRepresentation::kTagged); - VARIABLE(var_holder, MachineRepresentation::kTagged, receiver); GotoIf(TaggedIsSmi(receiver), &if_number); - TNode receiver_map = LoadMap(receiver); + + TNode reciever_heap_object = CAST(receiver); + TNode receiver_map = LoadMap(reciever_heap_object); + var_holder = reciever_heap_object; TNode receiver_instance_type = LoadMapInstanceType(receiver_map); GotoIf(IsPrimitiveInstanceType(receiver_instance_type), &if_primitive); const struct { @@ -747,8 +758,8 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { } kJumpTable[] = {{JS_OBJECT_TYPE, &if_object}, {JS_ARRAY_TYPE, &if_array}, {JS_FUNCTION_TYPE, &if_function}, - {JS_REGEXP_TYPE, &if_regexp}, - {JS_ARGUMENTS_TYPE, &if_arguments}, + {JS_REG_EXP_TYPE, &if_regexp}, + {JS_ARGUMENTS_OBJECT_TYPE, &if_arguments}, {JS_DATE_TYPE, &if_date}, {JS_BOUND_FUNCTION_TYPE, &if_function}, {JS_API_OBJECT_TYPE, &if_apiobject}, @@ -769,30 +780,31 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { BIND(&if_apiobject); { // Lookup the @@toStringTag property on the {receiver}. - VARIABLE(var_tag, MachineRepresentation::kTagged, - GetProperty(context, receiver, - isolate()->factory()->to_string_tag_symbol())); + TVARIABLE(Object, var_tag, + GetProperty(context, receiver, + isolate()->factory()->to_string_tag_symbol())); Label if_tagisnotstring(this), if_tagisstring(this); GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring); - Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring); + Branch(IsString(CAST(var_tag.value())), &if_tagisstring, + &if_tagisnotstring); BIND(&if_tagisnotstring); { - var_tag.Bind(CallRuntime(Runtime::kClassOf, context, receiver)); + var_tag = CallRuntime(Runtime::kClassOf, context, receiver); Goto(&if_tagisstring); } BIND(&if_tagisstring); - ReturnToStringFormat(context, var_tag.value()); + ReturnToStringFormat(context, CAST(var_tag.value())); } BIND(&if_arguments); { - var_default.Bind(ArgumentsToStringConstant()); + var_default = ArgumentsToStringConstant(); Goto(&checkstringtag); } BIND(&if_array); { - var_default.Bind(ArrayToStringConstant()); + var_default = ArrayToStringConstant(); Goto(&checkstringtag); } @@ -801,30 +813,30 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode boolean_constructor = CAST( LoadContextElement(native_context, Context::BOOLEAN_FUNCTION_INDEX)); - TNode boolean_initial_map = CAST(LoadObjectField( - boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode boolean_prototype = - LoadObjectField(boolean_initial_map, Map::kPrototypeOffset); - var_default.Bind(BooleanToStringConstant()); - var_holder.Bind(boolean_prototype); + TNode boolean_initial_map = LoadObjectField( + boolean_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode boolean_prototype = + LoadObjectField(boolean_initial_map, Map::kPrototypeOffset); + var_default = BooleanToStringConstant(); + var_holder = boolean_prototype; Goto(&checkstringtag); } BIND(&if_date); { - var_default.Bind(DateToStringConstant()); + var_default = DateToStringConstant(); Goto(&checkstringtag); } BIND(&if_error); { - var_default.Bind(ErrorToStringConstant()); + var_default = ErrorToStringConstant(); Goto(&checkstringtag); } BIND(&if_function); { - var_default.Bind(FunctionToStringConstant()); + var_default = FunctionToStringConstant(); Goto(&checkstringtag); } @@ -833,19 +845,19 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode number_constructor = CAST( LoadContextElement(native_context, Context::NUMBER_FUNCTION_INDEX)); - TNode number_initial_map = CAST(LoadObjectField( - number_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode number_prototype = - LoadObjectField(number_initial_map, Map::kPrototypeOffset); - var_default.Bind(NumberToStringConstant()); - var_holder.Bind(number_prototype); + TNode number_initial_map = LoadObjectField( + number_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode number_prototype = + LoadObjectField(number_initial_map, Map::kPrototypeOffset); + var_default = NumberToStringConstant(); + var_holder = number_prototype; Goto(&checkstringtag); } BIND(&if_object); { - CSA_ASSERT(this, IsJSReceiver(receiver)); - var_default.Bind(ObjectToStringConstant()); + CSA_ASSERT(this, IsJSReceiver(CAST(receiver))); + var_default = ObjectToStringConstant(); Goto(&checkstringtag); } @@ -885,24 +897,25 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { }); // Lookup the @@toStringTag property on the {receiver}. - VARIABLE(var_tag, MachineRepresentation::kTagged, - GetProperty(context, receiver, - isolate()->factory()->to_string_tag_symbol())); + TVARIABLE(Object, var_tag, + GetProperty(context, receiver, + isolate()->factory()->to_string_tag_symbol())); Label if_tagisnotstring(this), if_tagisstring(this); GotoIf(TaggedIsSmi(var_tag.value()), &if_tagisnotstring); - Branch(IsString(var_tag.value()), &if_tagisstring, &if_tagisnotstring); + Branch(IsString(CAST(var_tag.value())), &if_tagisstring, + &if_tagisnotstring); BIND(&if_tagisnotstring); { - var_tag.Bind(builtin_tag); + var_tag = builtin_tag; Goto(&if_tagisstring); } BIND(&if_tagisstring); - ReturnToStringFormat(context, var_tag.value()); + ReturnToStringFormat(context, CAST(var_tag.value())); } BIND(&if_regexp); { - var_default.Bind(RegexpToStringConstant()); + var_default = RegexpToStringConstant(); Goto(&checkstringtag); } @@ -911,12 +924,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode string_constructor = CAST( LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX)); - TNode string_initial_map = CAST(LoadObjectField( - string_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode string_prototype = - LoadObjectField(string_initial_map, Map::kPrototypeOffset); - var_default.Bind(StringToStringConstant()); - var_holder.Bind(string_prototype); + TNode string_initial_map = LoadObjectField( + string_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode string_prototype = + LoadObjectField(string_initial_map, Map::kPrototypeOffset); + var_default = StringToStringConstant(); + var_holder = string_prototype; Goto(&checkstringtag); } @@ -925,12 +938,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode symbol_constructor = CAST( LoadContextElement(native_context, Context::SYMBOL_FUNCTION_INDEX)); - TNode symbol_initial_map = CAST(LoadObjectField( - symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode symbol_prototype = - LoadObjectField(symbol_initial_map, Map::kPrototypeOffset); - var_default.Bind(ObjectToStringConstant()); - var_holder.Bind(symbol_prototype); + TNode symbol_initial_map = LoadObjectField( + symbol_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode symbol_prototype = + LoadObjectField(symbol_initial_map, Map::kPrototypeOffset); + var_default = ObjectToStringConstant(); + var_holder = symbol_prototype; Goto(&checkstringtag); } @@ -939,12 +952,12 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode bigint_constructor = CAST( LoadContextElement(native_context, Context::BIGINT_FUNCTION_INDEX)); - TNode bigint_initial_map = CAST(LoadObjectField( - bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset)); - TNode bigint_prototype = - LoadObjectField(bigint_initial_map, Map::kPrototypeOffset); - var_default.Bind(ObjectToStringConstant()); - var_holder.Bind(bigint_prototype); + TNode bigint_initial_map = LoadObjectField( + bigint_constructor, JSFunction::kPrototypeOrInitialMapOffset); + TNode bigint_prototype = + LoadObjectField(bigint_initial_map, Map::kPrototypeOffset); + var_default = ObjectToStringConstant(); + var_holder = bigint_prototype; Goto(&checkstringtag); } @@ -956,12 +969,13 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { if_value_is_bigint(this, Label::kDeferred), if_value_is_string(this, Label::kDeferred); - Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); + TNode receiver_value = + LoadJSPrimitiveWrapperValue(CAST(reciever_heap_object)); // We need to start with the object to see if the value was a subclass // which might have interesting properties. - var_holder.Bind(receiver); + var_holder = reciever_heap_object; GotoIf(TaggedIsSmi(receiver_value), &if_value_is_number); - TNode receiver_value_map = LoadMap(receiver_value); + TNode receiver_value_map = LoadMap(CAST(receiver_value)); GotoIf(IsHeapNumberMap(receiver_value_map), &if_value_is_number); GotoIf(IsBooleanMap(receiver_value_map), &if_value_is_boolean); GotoIf(IsSymbolMap(receiver_value_map), &if_value_is_symbol); @@ -974,31 +988,31 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { BIND(&if_value_is_number); { - var_default.Bind(NumberToStringConstant()); + var_default = NumberToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_boolean); { - var_default.Bind(BooleanToStringConstant()); + var_default = BooleanToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_string); { - var_default.Bind(StringToStringConstant()); + var_default = StringToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_bigint); { - var_default.Bind(ObjectToStringConstant()); + var_default = ObjectToStringConstant(); Goto(&checkstringtag); } BIND(&if_value_is_symbol); { - var_default.Bind(ObjectToStringConstant()); + var_default = ObjectToStringConstant(); Goto(&checkstringtag); } } @@ -1013,13 +1027,13 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { Goto(&loop); BIND(&loop); { - Node* holder = var_holder.value(); + TNode holder = var_holder.value(); GotoIf(IsNull(holder), &return_default); TNode holder_map = LoadMap(holder); TNode holder_bit_field3 = LoadMapBitField3(holder_map); GotoIf(IsSetWord32(holder_bit_field3), &return_generic); - var_holder.Bind(LoadMapPrototype(holder_map)); + var_holder = LoadMapPrototype(holder_map); Goto(&loop); } @@ -1029,7 +1043,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { ToStringTagSymbolConstant()); GotoIf(TaggedIsSmi(tag), &return_default); GotoIfNot(IsString(CAST(tag)), &return_default); - ReturnToStringFormat(context, tag); + ReturnToStringFormat(context, CAST(tag)); } BIND(&return_default); @@ -1058,28 +1072,28 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) { BranchIfJSReceiver(prototype, &prototype_jsreceiver, &call_runtime); } - VARIABLE(map, MachineRepresentation::kTagged); - VARIABLE(properties, MachineRepresentation::kTagged); + TVARIABLE(Map, map); + TVARIABLE(HeapObject, properties); Label instantiate_map(this); BIND(&prototype_null); { Comment("Prototype is null"); - map.Bind(LoadContextElement(native_context, - Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); - properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity)); + map = CAST(LoadContextElement( + native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); + properties = AllocateNameDictionary(NameDictionary::kInitialCapacity); Goto(&instantiate_map); } BIND(&prototype_jsreceiver); { Comment("Prototype is JSReceiver"); - properties.Bind(EmptyFixedArrayConstant()); + properties = EmptyFixedArrayConstant(); TNode object_function = CAST( LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); - TNode object_function_map = LoadObjectField( + TNode object_function_map = LoadObjectField( object_function, JSFunction::kPrototypeOrInitialMapOffset); - map.Bind(object_function_map); + map = object_function_map; GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())), &instantiate_map); Comment("Try loading the prototype info"); @@ -1087,8 +1101,8 @@ TF_BUILTIN(CreateObjectWithoutProperties, ObjectBuiltinsAssembler) { LoadMapPrototypeInfo(LoadMap(CAST(prototype)), &call_runtime); TNode maybe_map = LoadMaybeWeakObjectField( prototype_info, PrototypeInfo::kObjectCreateMapOffset); - GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), &call_runtime); - map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); + GotoIf(TaggedEqual(maybe_map, UndefinedConstant()), &call_runtime); + map = CAST(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); Goto(&instantiate_map); } @@ -1153,28 +1167,28 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { // Create a new object with the given prototype. BIND(&no_properties); { - VARIABLE(map, MachineRepresentation::kTagged); - VARIABLE(properties, MachineRepresentation::kTagged); + TVARIABLE(Map, map); + TVARIABLE(HeapObject, properties); Label non_null_proto(this), instantiate_map(this), good(this); Branch(IsNull(prototype), &good, &non_null_proto); BIND(&good); { - map.Bind(LoadContextElement( + map = CAST(LoadContextElement( context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); - properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity)); + properties = AllocateNameDictionary(NameDictionary::kInitialCapacity); Goto(&instantiate_map); } BIND(&non_null_proto); { - properties.Bind(EmptyFixedArrayConstant()); + properties = EmptyFixedArrayConstant(); TNode object_function = CAST(LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX)); - TNode object_function_map = LoadObjectField( + TNode object_function_map = LoadObjectField( object_function, JSFunction::kPrototypeOrInitialMapOffset); - map.Bind(object_function_map); + map = object_function_map; GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())), &instantiate_map); // Try loading the prototype info. @@ -1183,9 +1197,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { Comment("Load ObjectCreateMap from PrototypeInfo"); TNode maybe_map = LoadMaybeWeakObjectField( prototype_info, PrototypeInfo::kObjectCreateMapOffset); - GotoIf(IsStrongReferenceTo(maybe_map, UndefinedConstant()), - &call_runtime); - map.Bind(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); + GotoIf(TaggedEqual(maybe_map, UndefinedConstant()), &call_runtime); + map = CAST(GetHeapObjectAssumeWeak(maybe_map, &call_runtime)); Goto(&instantiate_map); } @@ -1207,8 +1220,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { // ES #sec-object.is TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) { - Node* const left = Parameter(Descriptor::kLeft); - Node* const right = Parameter(Descriptor::kRight); + TNode const left = CAST(Parameter(Descriptor::kLeft)); + TNode const right = CAST(Parameter(Descriptor::kRight)); Label return_true(this), return_false(this); BranchIfSameValue(left, right, &return_true, &return_false); @@ -1221,9 +1234,9 @@ TF_BUILTIN(ObjectIs, ObjectBuiltinsAssembler) { } TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) { - Node* const value = Parameter(Descriptor::kValue); - Node* const done = Parameter(Descriptor::kDone); - Node* const context = Parameter(Descriptor::kContext); + TNode const value = CAST(Parameter(Descriptor::kValue)); + TNode const done = CAST(Parameter(Descriptor::kDone)); + TNode const context = CAST(Parameter(Descriptor::kContext)); TNode const native_context = LoadNativeContext(context); TNode const map = CAST( @@ -1238,53 +1251,53 @@ TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) { } TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) { - Node* key = Parameter(Descriptor::kKey); - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode key = CAST(Parameter(Descriptor::kKey)); + TNode object = CAST(Parameter(Descriptor::kObject)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(HasProperty(context, object, key, kHasProperty)); } TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kLeft); - Node* callable = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode object = CAST(Parameter(Descriptor::kLeft)); + TNode callable = CAST(Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(InstanceOf(object, callable, context)); } // ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) { - Node* constructor = Parameter(Descriptor::kLeft); - Node* object = Parameter(Descriptor::kRight); - Node* context = Parameter(Descriptor::kContext); + TNode constructor = CAST(Parameter(Descriptor::kLeft)); + TNode object = CAST(Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(OrdinaryHasInstance(context, constructor, object)); } TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) { - Node* object = Parameter(Descriptor::kObject); - Node* context = Parameter(Descriptor::kContext); + TNode object = CAST(Parameter(Descriptor::kObject)); + TNode context = CAST(Parameter(Descriptor::kContext)); Return(GetSuperConstructor(context, object)); } TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { - Node* closure = Parameter(Descriptor::kClosure); - Node* receiver = Parameter(Descriptor::kReceiver); - Node* context = Parameter(Descriptor::kContext); + TNode closure = CAST(Parameter(Descriptor::kClosure)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode context = CAST(Parameter(Descriptor::kContext)); // Get the initial map from the function, jumping to the runtime if we don't // have one. Label done(this), runtime(this); GotoIfNot(IsFunctionWithPrototypeSlotMap(LoadMap(closure)), &runtime); - TNode maybe_map = - CAST(LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset)); + TNode maybe_map = LoadObjectField( + closure, JSFunction::kPrototypeOrInitialMapOffset); GotoIf(DoesntHaveInstanceType(maybe_map, MAP_TYPE), &runtime); TNode map = CAST(maybe_map); - TNode shared = - CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset)); + TNode shared = LoadObjectField( + closure, JSFunction::kSharedFunctionInfoOffset); TNode bytecode_array = LoadSharedFunctionInfoBytecodeArray(shared); @@ -1293,7 +1306,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { MachineType::Uint16())); TNode frame_size = ChangeInt32ToIntPtr(LoadObjectField( bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32())); - TNode size = + TNode size = IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)), formal_parameter_count); TNode parameters_and_registers = @@ -1337,16 +1350,17 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { // ES6 section 19.1.2.7 Object.getOwnPropertyDescriptor ( O, P ) TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { - Node* argc = Parameter(Descriptor::kJSActualArgumentsCount); - Node* context = Parameter(Descriptor::kContext); + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); + TNode context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget))); - CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments args(this, argc); TNode object_input = args.GetOptionalArgumentValue(0); TNode key = args.GetOptionalArgumentValue(1); // 1. Let obj be ? ToObject(O). - TNode object = ToObject_Inline(CAST(context), object_input); + TNode object = ToObject_Inline(context, object_input); // 2. Let key be ? ToPropertyKey(P). key = CallBuiltin(Builtins::kToName, context, key); @@ -1359,9 +1373,8 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { TNode instance_type = LoadMapInstanceType(map); GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime); { - VARIABLE(var_index, MachineType::PointerRepresentation(), - IntPtrConstant(0)); - VARIABLE(var_name, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); + TVARIABLE(Name, var_name); TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_name, &call_runtime, &if_notunique_name); @@ -1369,8 +1382,9 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { BIND(&if_notunique_name); { Label not_in_string_table(this); - TryInternalizeString(key, &if_keyisindex, &var_index, &if_iskeyunique, - &var_name, ¬_in_string_table, &call_runtime); + TryInternalizeString(CAST(key), &if_keyisindex, &var_index, + &if_iskeyunique, &var_name, ¬_in_string_table, + &call_runtime); BIND(¬_in_string_table); { @@ -1384,9 +1398,9 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { { Label if_found_value(this), return_empty(this), if_not_found(this); - VARIABLE(var_value, MachineRepresentation::kTagged); - VARIABLE(var_details, MachineRepresentation::kWord32); - VARIABLE(var_raw_value, MachineRepresentation::kTagged); + TVARIABLE(Object, var_value); + TVARIABLE(Word32T, var_details); + TVARIABLE(Object, var_raw_value); TryGetOwnProperty(context, object, object, map, instance_type, var_name.value(), &if_found_value, &var_value, @@ -1394,13 +1408,13 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { &if_not_found, kReturnAccessorPair); BIND(&if_found_value); - // 4. Return FromPropertyDescriptor(desc). - Node* js_desc = FromPropertyDetails(context, var_value.value(), - var_details.value(), &call_runtime); + // 4. Return FromPropertyDetails(desc). + TNode js_desc = FromPropertyDetails( + context, var_value.value(), var_details.value(), &call_runtime); args.PopAndReturn(js_desc); BIND(&return_empty); - var_value.Bind(UndefinedConstant()); + var_value = UndefinedConstant(); args.PopAndReturn(UndefinedConstant()); BIND(&if_not_found); @@ -1421,7 +1435,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) { TNode desc_array = CAST(desc); // 4. Return FromPropertyDescriptor(desc). - Node* js_desc = FromPropertyDescriptor(context, desc_array); + TNode js_desc = FromPropertyDescriptor(context, desc_array); args.PopAndReturn(js_desc); } BIND(&return_undefined); @@ -1440,14 +1454,14 @@ void ObjectBuiltinsAssembler::AddToDictionaryIf( BIND(&done); } -Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, - Node* desc) { - VARIABLE(js_descriptor, MachineRepresentation::kTagged); +TNode ObjectBuiltinsAssembler::FromPropertyDescriptor( + TNode context, TNode desc) { + TVARIABLE(JSObject, js_descriptor); TNode flags = LoadAndUntagToWord32ObjectField( desc, PropertyDescriptorObject::kFlagsOffset); - TNode has_flags = + TNode has_flags = Word32And(flags, Int32Constant(PropertyDescriptorObject::kHasMask)); Label if_accessor_desc(this), if_data_desc(this), if_generic_desc(this), @@ -1465,21 +1479,21 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, BIND(&if_accessor_desc); { - js_descriptor.Bind(ConstructAccessorDescriptor( + js_descriptor = ConstructAccessorDescriptor( context, LoadObjectField(desc, PropertyDescriptorObject::kGetOffset), LoadObjectField(desc, PropertyDescriptorObject::kSetOffset), IsSetWord32(flags), - IsSetWord32(flags))); + IsSetWord32(flags)); Goto(&return_desc); } BIND(&if_data_desc); { - js_descriptor.Bind(ConstructDataDescriptor( + js_descriptor = ConstructDataDescriptor( context, LoadObjectField(desc, PropertyDescriptorObject::kValueOffset), IsSetWord32(flags), IsSetWord32(flags), - IsSetWord32(flags))); + IsSetWord32(flags)); Goto(&return_desc); } @@ -1529,7 +1543,7 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, IsSetWord32(flags)), &bailout); - js_descriptor.Bind(js_desc); + js_descriptor = js_desc; Goto(&return_desc); BIND(&bailout); @@ -1541,36 +1555,36 @@ Node* ObjectBuiltinsAssembler::FromPropertyDescriptor(Node* context, return js_descriptor.value(); } -Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context, - Node* raw_value, - Node* details, - Label* if_bailout) { - VARIABLE(js_descriptor, MachineRepresentation::kTagged); +TNode ObjectBuiltinsAssembler::FromPropertyDetails( + TNode context, TNode raw_value, TNode details, + Label* if_bailout) { + TVARIABLE(JSObject, js_descriptor); Label if_accessor_desc(this), if_data_desc(this), return_desc(this); BranchIfAccessorPair(raw_value, &if_accessor_desc, &if_data_desc); BIND(&if_accessor_desc); { - TNode getter = - LoadObjectField(raw_value, AccessorPair::kGetterOffset); - TNode setter = - LoadObjectField(raw_value, AccessorPair::kSetterOffset); - js_descriptor.Bind(ConstructAccessorDescriptor( + TNode accessor_pair_value = CAST(raw_value); + TNode getter = LoadObjectField( + accessor_pair_value, AccessorPair::kGetterOffset); + TNode setter = LoadObjectField( + accessor_pair_value, AccessorPair::kSetterOffset); + js_descriptor = ConstructAccessorDescriptor( context, GetAccessorOrUndefined(getter, if_bailout), GetAccessorOrUndefined(setter, if_bailout), IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask), - IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask))); + IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)); Goto(&return_desc); } BIND(&if_data_desc); { - js_descriptor.Bind(ConstructDataDescriptor( + js_descriptor = ConstructDataDescriptor( context, raw_value, IsNotSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), IsNotSetWord32(details, PropertyDetails::kAttributesDontEnumMask), - IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask))); + IsNotSetWord32(details, PropertyDetails::kAttributesDontDeleteMask)); Goto(&return_desc); } @@ -1578,20 +1592,20 @@ Node* ObjectBuiltinsAssembler::FromPropertyDetails(Node* context, return js_descriptor.value(); } -Node* ObjectBuiltinsAssembler::GetAccessorOrUndefined(Node* accessor, - Label* if_bailout) { +TNode ObjectBuiltinsAssembler::GetAccessorOrUndefined( + TNode accessor, Label* if_bailout) { Label bind_undefined(this, Label::kDeferred), return_result(this); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(HeapObject, result); GotoIf(IsNull(accessor), &bind_undefined); - result.Bind(accessor); + result = accessor; TNode map = LoadMap(accessor); // TODO(ishell): probe template instantiations cache. GotoIf(IsFunctionTemplateInfoMap(map), if_bailout); Goto(&return_result); BIND(&bind_undefined); - result.Bind(UndefinedConstant()); + result = UndefinedConstant(); Goto(&return_result); BIND(&return_result); diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc index a1da55e0d931e3..b20b288c3d63a3 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.cc +++ b/deps/v8/src/builtins/builtins-promise-gen.cc @@ -21,11 +21,10 @@ namespace v8 { namespace internal { using Node = compiler::Node; -template -using TNode = CodeStubAssembler::TNode; using IteratorRecord = TorqueStructIteratorRecord; -Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) { +TNode PromiseBuiltinsAssembler::AllocateJSPromise( + TNode context) { TNode const native_context = LoadNativeContext(context); TNode const promise_fun = CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX)); @@ -39,7 +38,7 @@ Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) { RootIndex::kEmptyFixedArray); StoreObjectFieldRoot(promise, JSPromise::kElementsOffset, RootIndex::kEmptyFixedArray); - return promise; + return CAST(promise); } void PromiseBuiltinsAssembler::PromiseInit(Node* promise) { @@ -54,13 +53,14 @@ void PromiseBuiltinsAssembler::PromiseInit(Node* promise) { } } -Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context) { +TNode PromiseBuiltinsAssembler::AllocateAndInitJSPromise( + TNode context) { return AllocateAndInitJSPromise(context, UndefinedConstant()); } -Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context, - Node* parent) { - Node* const instance = AllocateJSPromise(context); +TNode PromiseBuiltinsAssembler::AllocateAndInitJSPromise( + TNode context, TNode parent) { + const TNode instance = AllocateJSPromise(context); PromiseInit(instance); Label out(this); @@ -72,11 +72,12 @@ Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context, return instance; } -Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise( - Node* context, v8::Promise::PromiseState status, Node* result) { +TNode PromiseBuiltinsAssembler::AllocateAndSetJSPromise( + TNode context, v8::Promise::PromiseState status, + TNode result) { DCHECK_NE(Promise::kPending, status); - Node* const instance = AllocateJSPromise(context); + const TNode instance = AllocateJSPromise(context); StoreObjectFieldNoWriteBarrier(instance, JSPromise::kReactionsOrResultOffset, result); STATIC_ASSERT(JSPromise::kStatusShift == 0); @@ -97,22 +98,23 @@ Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise( return instance; } -std::pair +std::pair, TNode> PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions( - Node* promise, Node* debug_event, Node* native_context) { - Node* const promise_context = CreatePromiseResolvingFunctionsContext( + TNode promise, TNode debug_event, + TNode native_context) { + const TNode promise_context = CreatePromiseResolvingFunctionsContext( promise, debug_event, native_context); - TNode const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode const resolve_info = LoadContextElement( + const TNode map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode resolve_info = CAST(LoadContextElement( native_context, - Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX); - Node* const resolve = + Context::PROMISE_CAPABILITY_DEFAULT_RESOLVE_SHARED_FUN_INDEX)); + const TNode resolve = AllocateFunctionWithMapAndContext(map, resolve_info, promise_context); - TNode const reject_info = LoadContextElement( + const TNode reject_info = CAST(LoadContextElement( native_context, - Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX); - Node* const reject = + Context::PROMISE_CAPABILITY_DEFAULT_REJECT_SHARED_FUN_INDEX)); + const TNode reject = AllocateFunctionWithMapAndContext(map, reject_info, promise_context); return std::make_pair(resolve, reject); } @@ -196,7 +198,7 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { BIND(&if_fast_promise_capability); { - Node* promise = + TNode promise = AllocateAndInitJSPromise(native_context, UndefinedConstant()); Node* resolve = nullptr; @@ -226,14 +228,15 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { StoreObjectFieldRoot(capability, PromiseCapability::kRejectOffset, RootIndex::kUndefinedValue); - Node* executor_context = - CreatePromiseGetCapabilitiesExecutorContext(capability, native_context); - TNode executor_info = LoadContextElement( - native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN); - TNode function_map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode executor = CAST(AllocateFunctionWithMapAndContext( - function_map, executor_info, executor_context)); + TNode executor_context = + CAST(CreatePromiseGetCapabilitiesExecutorContext(capability, + native_context)); + TNode executor_info = CAST(LoadContextElement( + native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN)); + TNode function_map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + TNode executor = AllocateFunctionWithMapAndContext( + function_map, executor_info, executor_context); TNode promise = Construct(native_context, CAST(constructor), executor); @@ -258,14 +261,14 @@ TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) { ThrowTypeError(context, MessageTemplate::kPromiseNonCallable); } -Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context, - int slots) { +TNode PromiseBuiltinsAssembler::CreatePromiseContext( + TNode native_context, int slots) { DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS); TNode const context = AllocateInNewSpace(FixedArray::SizeFor(slots)); InitializeFunctionContext(native_context, context, slots); - return context; + return CAST(context); } Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext( @@ -278,8 +281,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext( TNode values_array = AllocateJSArray( PACKED_ELEMENTS, array_map, IntPtrConstant(0), SmiConstant(0)); - Node* const context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseAllResolveElementLength); + TNode const context = CreatePromiseContext( + CAST(native_context), PromiseBuiltins::kPromiseAllResolveElementLength); StoreContextElementNoWriteBarrier( context, PromiseBuiltins::kPromiseAllResolveElementRemainingSlot, SmiConstant(1)); @@ -301,12 +304,12 @@ PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction( index, SmiConstant(PropertyArray::HashField::kMax))); CSA_ASSERT(this, IsNativeContext(native_context)); - TNode const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode const resolve_info = - LoadContextElement(native_context, slot_index); + const TNode map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode resolve_info = + CAST(LoadContextElement(native_context, slot_index)); TNode resolve = - Cast(AllocateFunctionWithMapAndContext(map, resolve_info, context)); + AllocateFunctionWithMapAndContext(map, resolve_info, CAST(context)); STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0); StoreObjectFieldNoWriteBarrier(resolve, JSFunction::kPropertiesOrHashOffset, @@ -315,9 +318,10 @@ PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction( return resolve; } -Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext( - Node* promise, Node* debug_event, Node* native_context) { - Node* const context = CreatePromiseContext( +TNode PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext( + TNode promise, TNode debug_event, + TNode native_context) { + const TNode context = CreatePromiseContext( native_context, PromiseBuiltins::kPromiseContextLength); StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kPromiseSlot, promise); @@ -331,7 +335,8 @@ Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext( Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext( Node* promise_capability, Node* native_context) { int kContextLength = PromiseBuiltins::kCapabilitiesContextLength; - Node* context = CreatePromiseContext(native_context, kContextLength); + TNode context = + CreatePromiseContext(CAST(native_context), kContextLength); StoreContextElementNoWriteBarrier(context, PromiseBuiltins::kCapabilitySlot, promise_capability); return context; @@ -386,14 +391,12 @@ void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) { // ES #sec-performpromisethen void PromiseBuiltinsAssembler::PerformPromiseThen( - Node* context, Node* promise, Node* on_fulfilled, Node* on_rejected, - Node* result_promise_or_capability) { - CSA_ASSERT(this, TaggedIsNotSmi(promise)); - CSA_ASSERT(this, IsJSPromise(promise)); + TNode context, TNode promise, + TNode on_fulfilled, TNode on_rejected, + TNode result_promise_or_capability) { CSA_ASSERT(this, Word32Or(IsCallable(on_fulfilled), IsUndefined(on_fulfilled))); CSA_ASSERT(this, Word32Or(IsCallable(on_rejected), IsUndefined(on_rejected))); - CSA_ASSERT(this, TaggedIsNotSmi(result_promise_or_capability)); CSA_ASSERT( this, Word32Or(Word32Or(IsJSPromise(result_promise_or_capability), @@ -411,9 +414,9 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( // PromiseReaction holding both the onFulfilled and onRejected callbacks. // Once the {promise} is resolved we decide on the concrete handler to // push onto the microtask queue. - TNode const promise_reactions = + const TNode promise_reactions = LoadObjectField(promise, JSPromise::kReactionsOrResultOffset); - Node* const reaction = + const TNode reaction = AllocatePromiseReaction(promise_reactions, result_promise_or_capability, on_fulfilled, on_rejected); StoreObjectField(promise, JSPromise::kReactionsOrResultOffset, reaction); @@ -422,10 +425,9 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_notpending); { - VARIABLE(var_map, MachineRepresentation::kTagged); - VARIABLE(var_handler, MachineRepresentation::kTagged); - VARIABLE(var_handler_context, MachineRepresentation::kTagged, - UndefinedConstant()); + TVARIABLE(Map, var_map); + TVARIABLE(HeapObject, var_handler); + TVARIABLE(Object, var_handler_context, UndefinedConstant()); Label if_fulfilled(this), if_rejected(this, Label::kDeferred), enqueue(this); Branch(IsPromiseStatus(status, v8::Promise::kFulfilled), &if_fulfilled, @@ -433,15 +435,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_fulfilled); { - var_map.Bind(PromiseFulfillReactionJobTaskMapConstant()); - var_handler.Bind(on_fulfilled); + var_map = PromiseFulfillReactionJobTaskMapConstant(); + var_handler = on_fulfilled; Label use_fallback(this, Label::kDeferred), done(this); ExtractHandlerContext(on_fulfilled, &var_handler_context); Branch(IsUndefined(var_handler_context.value()), &use_fallback, &done); BIND(&use_fallback); - var_handler_context.Bind(context); + var_handler_context = context; ExtractHandlerContext(on_rejected, &var_handler_context); Goto(&done); @@ -452,15 +454,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( BIND(&if_rejected); { CSA_ASSERT(this, IsPromiseStatus(status, v8::Promise::kRejected)); - var_map.Bind(PromiseRejectReactionJobTaskMapConstant()); - var_handler.Bind(on_rejected); + var_map = PromiseRejectReactionJobTaskMapConstant(); + var_handler = on_rejected; Label use_fallback(this, Label::kDeferred), done(this); ExtractHandlerContext(on_rejected, &var_handler_context); Branch(IsUndefined(var_handler_context.value()), &use_fallback, &done); BIND(&use_fallback); - var_handler_context.Bind(context); + var_handler_context = context; ExtractHandlerContext(on_fulfilled, &var_handler_context); Goto(&done); BIND(&done); @@ -474,8 +476,8 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( { TNode argument = LoadObjectField(promise, JSPromise::kReactionsOrResultOffset); - Node* microtask = AllocatePromiseReactionJobTask( - var_map.value(), var_handler_context.value(), argument, + TNode microtask = AllocatePromiseReactionJobTask( + var_map.value(), CAST(var_handler_context.value()), argument, var_handler.value(), result_promise_or_capability); CallBuiltin(Builtins::kEnqueueMicrotask, var_handler_context.value(), microtask); @@ -489,13 +491,15 @@ void PromiseBuiltinsAssembler::PerformPromiseThen( // ES #sec-performpromisethen TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) { - Node* const context = Parameter(Descriptor::kContext); - Node* const promise = Parameter(Descriptor::kPromise); - Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled); - Node* const on_rejected = Parameter(Descriptor::kOnRejected); - Node* const result_promise = Parameter(Descriptor::kResultPromise); + const TNode context = CAST(Parameter(Descriptor::kContext)); + const TNode promise = CAST(Parameter(Descriptor::kPromise)); + const TNode on_fulfilled = + CAST(Parameter(Descriptor::kOnFulfilled)); + const TNode on_rejected = + CAST(Parameter(Descriptor::kOnRejected)); + const TNode result_promise = + CAST(Parameter(Descriptor::kResultPromise)); - CSA_ASSERT(this, TaggedIsNotSmi(result_promise)); CSA_ASSERT( this, Word32Or(IsJSPromise(result_promise), IsUndefined(result_promise))); @@ -504,9 +508,9 @@ TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) { Return(result_promise); } -Node* PromiseBuiltinsAssembler::AllocatePromiseReaction( - Node* next, Node* promise_or_capability, Node* fulfill_handler, - Node* reject_handler) { +TNode PromiseBuiltinsAssembler::AllocatePromiseReaction( + TNode next, TNode promise_or_capability, + TNode fulfill_handler, TNode reject_handler) { TNode const reaction = Allocate(PromiseReaction::kSize); StoreMapNoWriteBarrier(reaction, RootIndex::kPromiseReactionMap); StoreObjectFieldNoWriteBarrier(reaction, PromiseReaction::kNextOffset, next); @@ -517,12 +521,13 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReaction( reaction, PromiseReaction::kFulfillHandlerOffset, fulfill_handler); StoreObjectFieldNoWriteBarrier( reaction, PromiseReaction::kRejectHandlerOffset, reject_handler); - return reaction; + return CAST(reaction); } -Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( - Node* map, Node* context, Node* argument, Node* handler, - Node* promise_or_capability) { +TNode +PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( + TNode map, TNode context, TNode argument, + TNode handler, TNode promise_or_capability) { TNode const microtask = Allocate(PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks); StoreMapNoWriteBarrier(microtask, map); @@ -535,12 +540,14 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask( StoreObjectFieldNoWriteBarrier( microtask, PromiseReactionJobTask::kPromiseOrCapabilityOffset, promise_or_capability); - return microtask; + return CAST(microtask); } -Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask( - Node* promise_to_resolve, Node* then, Node* thenable, Node* context) { - TNode const microtask = +TNode +PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask( + TNode promise_to_resolve, TNode then, + TNode thenable, TNode context) { + const TNode microtask = Allocate(PromiseResolveThenableJobTask::kSize); StoreMapNoWriteBarrier(microtask, RootIndex::kPromiseResolveThenableJobTaskMap); @@ -553,7 +560,7 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobTask( microtask, PromiseResolveThenableJobTask::kThenOffset, then); StoreObjectFieldNoWriteBarrier( microtask, PromiseResolveThenableJobTask::kThenableOffset, thenable); - return microtask; + return CAST(microtask); } // ES #sec-triggerpromisereactions @@ -1003,7 +1010,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) { BIND(&if_targetisnotmodified); { - Node* const instance = AllocateAndInitJSPromise(context); + TNode const instance = AllocateAndInitJSPromise(context); var_result.Bind(instance); Goto(&debug_push); } @@ -1035,7 +1042,7 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) { Node *resolve, *reject; std::tie(resolve, reject) = CreatePromiseResolvingFunctions( - var_result.value(), TrueConstant(), native_context); + CAST(var_result.value()), TrueConstant(), native_context); Node* const maybe_exception = CallJS( CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined), @@ -1080,8 +1087,8 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) { // V8 Extras: v8.createPromise(parent) TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) { - Node* const parent = Parameter(Descriptor::kParent); - Node* const context = Parameter(Descriptor::kContext); + const TNode parent = CAST(Parameter(Descriptor::kParent)); + const TNode context = CAST(Parameter(Descriptor::kContext)); Return(AllocateAndInitJSPromise(context, parent)); } @@ -1127,14 +1134,15 @@ TF_BUILTIN(PromiseInternalResolve, PromiseBuiltinsAssembler) { // Promise.prototype.then ( onFulfilled, onRejected ) TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { // 1. Let promise be the this value. - Node* const promise = Parameter(Descriptor::kReceiver); - Node* const on_fulfilled = Parameter(Descriptor::kOnFulfilled); - Node* const on_rejected = Parameter(Descriptor::kOnRejected); - Node* const context = Parameter(Descriptor::kContext); + const TNode maybe_promise = CAST(Parameter(Descriptor::kReceiver)); + const TNode on_fulfilled = CAST(Parameter(Descriptor::kOnFulfilled)); + const TNode on_rejected = CAST(Parameter(Descriptor::kOnRejected)); + const TNode context = CAST(Parameter(Descriptor::kContext)); // 2. If IsPromise(promise) is false, throw a TypeError exception. - ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE, + ThrowIfNotInstanceType(context, maybe_promise, JS_PROMISE_TYPE, "Promise.prototype.then"); + TNode js_promise = CAST(maybe_promise); // 3. Let C be ? SpeciesConstructor(promise, %Promise%). Label fast_promise_capability(this), slow_constructor(this, Label::kDeferred), @@ -1142,26 +1150,27 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { TNode const native_context = LoadNativeContext(context); TNode promise_fun = CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX)); - TNode const promise_map = LoadMap(promise); + TNode const promise_map = LoadMap(js_promise); BranchIfPromiseSpeciesLookupChainIntact( native_context, promise_map, &fast_promise_capability, &slow_constructor); BIND(&slow_constructor); TNode constructor = - SpeciesConstructor(native_context, promise, promise_fun); + SpeciesConstructor(native_context, js_promise, promise_fun); Branch(TaggedEqual(constructor, promise_fun), &fast_promise_capability, &slow_promise_capability); // 4. Let resultCapability be ? NewPromiseCapability(C). Label perform_promise_then(this); - VARIABLE(var_result_promise, MachineRepresentation::kTagged); - VARIABLE(var_result_promise_or_capability, MachineRepresentation::kTagged); + TVARIABLE(Object, var_result_promise); + TVARIABLE(HeapObject, var_result_promise_or_capability); BIND(&fast_promise_capability); { - Node* const result_promise = AllocateAndInitJSPromise(context, promise); - var_result_promise_or_capability.Bind(result_promise); - var_result_promise.Bind(result_promise); + const TNode result_promise = + AllocateAndInitJSPromise(context, js_promise); + var_result_promise_or_capability = result_promise; + var_result_promise = result_promise; Goto(&perform_promise_then); } @@ -1170,9 +1179,9 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { TNode const debug_event = TrueConstant(); TNode const capability = CAST(CallBuiltin( Builtins::kNewPromiseCapability, context, constructor, debug_event)); - var_result_promise.Bind( - LoadObjectField(capability, PromiseCapability::kPromiseOffset)); - var_result_promise_or_capability.Bind(capability); + var_result_promise = + LoadObjectField(capability, PromiseCapability::kPromiseOffset); + var_result_promise_or_capability = capability; Goto(&perform_promise_then); } @@ -1187,30 +1196,30 @@ TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { // 3. If IsCallable(onFulfilled) is false, then // a. Set onFulfilled to undefined. - VARIABLE(var_on_fulfilled, MachineRepresentation::kTagged, on_fulfilled); + TVARIABLE(Object, var_on_fulfilled, on_fulfilled); Label if_fulfilled_done(this), if_fulfilled_notcallable(this); GotoIf(TaggedIsSmi(on_fulfilled), &if_fulfilled_notcallable); - Branch(IsCallable(on_fulfilled), &if_fulfilled_done, + Branch(IsCallable(CAST(on_fulfilled)), &if_fulfilled_done, &if_fulfilled_notcallable); BIND(&if_fulfilled_notcallable); - var_on_fulfilled.Bind(UndefinedConstant()); + var_on_fulfilled = UndefinedConstant(); Goto(&if_fulfilled_done); BIND(&if_fulfilled_done); // 4. If IsCallable(onRejected) is false, then // a. Set onRejected to undefined. - VARIABLE(var_on_rejected, MachineRepresentation::kTagged, on_rejected); + TVARIABLE(Object, var_on_rejected, on_rejected); Label if_rejected_done(this), if_rejected_notcallable(this); GotoIf(TaggedIsSmi(on_rejected), &if_rejected_notcallable); - Branch(IsCallable(on_rejected), &if_rejected_done, + Branch(IsCallable(CAST(on_rejected)), &if_rejected_done, &if_rejected_notcallable); BIND(&if_rejected_notcallable); - var_on_rejected.Bind(UndefinedConstant()); + var_on_rejected = UndefinedConstant(); Goto(&if_rejected_done); BIND(&if_rejected_done); - PerformPromiseThen(context, promise, var_on_fulfilled.value(), - var_on_rejected.value(), + PerformPromiseThen(context, js_promise, CAST(var_on_fulfilled.value()), + CAST(var_on_rejected.value()), var_result_promise_or_capability.value()); Return(var_result_promise.value()); } @@ -1522,7 +1531,7 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) { // create NewPromiseCapability. BIND(&if_nativepromise); { - Node* const result = AllocateAndInitJSPromise(context); + TNode const result = AllocateAndInitJSPromise(context); CallBuiltin(Builtins::kResolvePromise, context, result, value); Return(result); } @@ -1592,7 +1601,7 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) { BIND(&if_nativepromise); { - Node* const promise = + TNode const promise = AllocateAndSetJSPromise(context, v8::Promise::kRejected, reason); CallRuntime(Runtime::kPromiseRejectEventFromStack, context, promise, reason); @@ -1621,21 +1630,21 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) { std::pair PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions( Node* on_finally, Node* constructor, Node* native_context) { - Node* const promise_context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseFinallyContextLength); + const TNode promise_context = CreatePromiseContext( + CAST(native_context), PromiseBuiltins::kPromiseFinallyContextLength); StoreContextElementNoWriteBarrier( promise_context, PromiseBuiltins::kOnFinallySlot, on_finally); StoreContextElementNoWriteBarrier( promise_context, PromiseBuiltins::kConstructorSlot, constructor); - TNode const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode const then_finally_info = LoadContextElement( - native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN); - Node* const then_finally = AllocateFunctionWithMapAndContext( + const TNode map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode then_finally_info = CAST(LoadContextElement( + native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN)); + TNode const then_finally = AllocateFunctionWithMapAndContext( map, then_finally_info, promise_context); - TNode const catch_finally_info = LoadContextElement( - native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN); - Node* const catch_finally = AllocateFunctionWithMapAndContext( + const TNode catch_finally_info = CAST(LoadContextElement( + native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN)); + TNode const catch_finally = AllocateFunctionWithMapAndContext( map, catch_finally_info, promise_context); return std::make_pair(then_finally, catch_finally); } @@ -1650,15 +1659,16 @@ TF_BUILTIN(PromiseValueThunkFinally, PromiseBuiltinsAssembler) { Node* PromiseBuiltinsAssembler::CreateValueThunkFunction(Node* value, Node* native_context) { - Node* const value_thunk_context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); + const TNode value_thunk_context = CreatePromiseContext( + CAST(native_context), + PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); StoreContextElementNoWriteBarrier(value_thunk_context, PromiseBuiltins::kValueSlot, value); - TNode const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode const value_thunk_info = LoadContextElement( - native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN); - Node* const value_thunk = AllocateFunctionWithMapAndContext( + const TNode map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode value_thunk_info = CAST(LoadContextElement( + native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN)); + TNode const value_thunk = AllocateFunctionWithMapAndContext( map, value_thunk_info, value_thunk_context); return value_thunk; } @@ -1711,15 +1721,16 @@ TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) { Node* PromiseBuiltinsAssembler::CreateThrowerFunction(Node* reason, Node* native_context) { - Node* const thrower_context = CreatePromiseContext( - native_context, PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); + const TNode thrower_context = CreatePromiseContext( + CAST(native_context), + PromiseBuiltins::kPromiseValueThunkOrReasonContextLength); StoreContextElementNoWriteBarrier(thrower_context, PromiseBuiltins::kValueSlot, reason); - TNode const map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode const thrower_info = LoadContextElement( - native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN); - Node* const thrower = + const TNode map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode thrower_info = CAST(LoadContextElement( + native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN)); + TNode const thrower = AllocateFunctionWithMapAndContext(map, thrower_info, thrower_context); return thrower; } @@ -1919,7 +1930,7 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { Label do_enqueue(this), if_fulfill(this), if_reject(this, Label::kDeferred), if_runtime(this, Label::kDeferred); TVARIABLE(Object, var_reason); - TVARIABLE(Object, var_then); + TVARIABLE(JSReceiver, var_then); // If promise hook is enabled or the debugger is active, let // the runtime handle this operation, which greatly reduces @@ -1955,7 +1966,8 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { BIND(&if_fast); { // The {resolution} is a native Promise in this case. - var_then = LoadContextElement(native_context, Context::PROMISE_THEN_INDEX); + var_then = + CAST(LoadContextElement(native_context, Context::PROMISE_THEN_INDEX)); Goto(&do_enqueue); } @@ -1987,7 +1999,7 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { GotoIf(TaggedIsSmi(then), &if_fulfill); TNode const then_map = LoadMap(CAST(then)); GotoIfNot(IsCallableMap(then_map), &if_fulfill); - var_then = then; + var_then = CAST(then); Goto(&do_enqueue); } @@ -1995,8 +2007,9 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) { { // 12. Perform EnqueueJob("PromiseJobs", PromiseResolveThenableJob, // «promise, resolution, thenAction»). - Node* const task = AllocatePromiseResolveThenableJobTask( - promise, var_then.value(), resolution, native_context); + const TNode task = + AllocatePromiseResolveThenableJobTask(promise, var_then.value(), + CAST(resolution), native_context); TailCallBuiltin(Builtins::kEnqueueMicrotask, native_context, task); } @@ -2150,8 +2163,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // Register the PromiseReaction immediately on the {next_value}, not // passing any chained promise since neither async_hooks nor DevTools // are enabled, so there's no use of the resulting promise. - PerformPromiseThen(native_context, next_value, resolve_element_fun, - reject_element_fun, UndefinedConstant()); + PerformPromiseThen(native_context, CAST(next_value), + CAST(resolve_element_fun), CAST(reject_element_fun), + UndefinedConstant()); Goto(&loop); } diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h index 633e3321aa17d3..b2ae8fe8765d8c 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.h +++ b/deps/v8/src/builtins/builtins-promise-gen.h @@ -22,29 +22,34 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { // // This uses undefined as the parent promise for the promise init // hook. - Node* AllocateAndInitJSPromise(Node* context); + TNode AllocateAndInitJSPromise(TNode context); // This uses the given parent as the parent promise for the promise // init hook. - Node* AllocateAndInitJSPromise(Node* context, Node* parent); + TNode AllocateAndInitJSPromise(TNode context, + TNode parent); // This allocates and initializes a promise with the given state and // fields. - Node* AllocateAndSetJSPromise(Node* context, v8::Promise::PromiseState status, - Node* result); + TNode AllocateAndSetJSPromise(TNode context, + v8::Promise::PromiseState status, + TNode result); - Node* AllocatePromiseReaction(Node* next, Node* promise_or_capability, - Node* fulfill_handler, Node* reject_handler); + TNode AllocatePromiseReaction( + TNode next, TNode promise_or_capability, + TNode fulfill_handler, TNode reject_handler); - Node* AllocatePromiseReactionJobTask(Node* map, Node* context, Node* argument, - Node* handler, - Node* promise_or_capability); - Node* AllocatePromiseResolveThenableJobTask(Node* promise_to_resolve, - Node* then, Node* thenable, - Node* context); + TNode AllocatePromiseReactionJobTask( + TNode map, TNode context, TNode argument, + TNode handler, TNode promise_or_capability); - std::pair CreatePromiseResolvingFunctions(Node* promise, - Node* debug_event, - Node* native_context); + TNode AllocatePromiseResolveThenableJobTask( + TNode promise_to_resolve, TNode then, + TNode thenable, TNode context); + + std::pair, TNode> + CreatePromiseResolvingFunctions(TNode promise, + TNode debug_event, + TNode native_context); Node* PromiseHasHandler(Node* promise); @@ -62,8 +67,9 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { Node* native_context, int slot_index); - Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event, - Node* native_context); + TNode CreatePromiseResolvingFunctionsContext( + TNode promise, TNode debug_event, + TNode native_context); Node* CreatePromiseGetCapabilitiesExecutorContext(Node* promise_capability, Node* native_context); @@ -74,11 +80,13 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { void PromiseSetHasHandler(Node* promise); void PromiseSetHandledHint(Node* promise); - void PerformPromiseThen(Node* context, Node* promise, Node* on_fulfilled, - Node* on_rejected, - Node* result_promise_or_capability); + void PerformPromiseThen(TNode context, TNode promise, + TNode on_fulfilled, + TNode on_rejected, + TNode result_promise_or_capability); - Node* CreatePromiseContext(Node* native_context, int slots); + TNode CreatePromiseContext(TNode native_context, + int slots); Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result, PromiseReaction::Type type); @@ -161,7 +169,7 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler { v8::Promise::PromiseState expected); void PromiseSetStatus(Node* promise, v8::Promise::PromiseState status); - Node* AllocateJSPromise(Node* context); + TNode AllocateJSPromise(TNode context); void ExtractHandlerContext(Node* handler, Variable* var_context); void Generate_PromiseAll( diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc index bb1137735cdcf3..71d4e8226f595b 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.cc +++ b/deps/v8/src/builtins/builtins-proxy-gen.cc @@ -14,7 +14,7 @@ namespace v8 { namespace internal { -compiler::TNode ProxiesCodeStubAssembler::AllocateProxy( +TNode ProxiesCodeStubAssembler::AllocateProxy( TNode context, TNode target, TNode handler) { VARIABLE(map, MachineRepresentation::kTagged); @@ -59,7 +59,8 @@ compiler::TNode ProxiesCodeStubAssembler::AllocateProxy( } Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( - Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode) { + Node* context, const CodeStubArguments& args, Node* argc, + ParameterMode mode) { Comment("AllocateJSArrayForCodeStubArguments"); Label if_empty_array(this), allocate_js_array(this); @@ -80,7 +81,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( GotoIf(SmiGreaterThan(length, SmiConstant(FixedArray::kMaxRegularLength)), &if_large_object); - args.ForEach(list, [=, &offset](Node* arg) { + args.ForEach(list, [&](TNode arg) { StoreNoWriteBarrier(MachineRepresentation::kTagged, allocated_elements, offset.value(), arg); Increment(&offset, kTaggedSize); @@ -89,7 +90,7 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( BIND(&if_large_object); { - args.ForEach(list, [=, &offset](Node* arg) { + args.ForEach(list, [&](TNode arg) { Store(allocated_elements, offset.value(), arg); Increment(&offset, kTaggedSize); }); @@ -124,20 +125,19 @@ Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( return context; } -compiler::TNode -ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode context, - TNode proxy) { +TNode ProxiesCodeStubAssembler::AllocateProxyRevokeFunction( + TNode context, TNode proxy) { TNode const native_context = LoadNativeContext(context); - Node* const proxy_context = - CreateProxyRevokeFunctionContext(proxy, native_context); - TNode const revoke_map = LoadContextElement( - native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); - TNode const revoke_info = - LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN); + const TNode proxy_context = + CAST(CreateProxyRevokeFunctionContext(proxy, native_context)); + const TNode revoke_map = CAST(LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX)); + const TNode revoke_info = CAST( + LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN)); - return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info, - proxy_context)); + return AllocateFunctionWithMapAndContext(revoke_map, revoke_info, + proxy_context); } TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) { diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h index cb51faf57553fd..03b3749bf5d44a 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.h +++ b/deps/v8/src/builtins/builtins-proxy-gen.h @@ -39,10 +39,9 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { kProxyContextLength, }; - Node* AllocateJSArrayForCodeStubArguments( - Node* context, - CodeStubArguments& args, // NOLINT(runtime/references) - Node* argc, ParameterMode mode); + Node* AllocateJSArrayForCodeStubArguments(Node* context, + const CodeStubArguments& args, + Node* argc, ParameterMode mode); private: Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context); diff --git a/deps/v8/src/builtins/builtins-reflect-gen.cc b/deps/v8/src/builtins/builtins-reflect-gen.cc index 744a443ecc2dc1..6cffd6ed55ba39 100644 --- a/deps/v8/src/builtins/builtins-reflect-gen.cc +++ b/deps/v8/src/builtins/builtins-reflect-gen.cc @@ -11,12 +11,12 @@ namespace internal { // ES section #sec-reflect.has TF_BUILTIN(ReflectHas, CodeStubAssembler) { - Node* target = Parameter(Descriptor::kTarget); - Node* key = Parameter(Descriptor::kKey); + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode key = CAST(Parameter(Descriptor::kKey)); TNode context = CAST(Parameter(Descriptor::kContext)); - ThrowIfNotJSReceiver(context, CAST(target), - MessageTemplate::kCalledOnNonObject, "Reflect.has"); + ThrowIfNotJSReceiver(context, target, MessageTemplate::kCalledOnNonObject, + "Reflect.has"); Return(CallBuiltin(Builtins::kHasProperty, context, target, key)); } diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index f879d70c676329..b333f2a820c681 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -23,8 +23,6 @@ namespace v8 { namespace internal { using compiler::Node; -template -using TNode = compiler::TNode; // Tail calls the regular expression interpreter. // static @@ -80,7 +78,8 @@ TNode RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode code) { TNode RegExpBuiltinsAssembler::AllocateRegExpResult( TNode context, TNode length, TNode index, - TNode input, TNode* elements_out) { + TNode input, TNode match_info, + TNode* elements_out) { CSA_ASSERT(this, SmiLessThanOrEqual( length, SmiConstant(JSArray::kMaxFastArrayLength))); CSA_ASSERT(this, SmiGreaterThan(length, SmiConstant(0))); @@ -90,9 +89,8 @@ TNode RegExpBuiltinsAssembler::AllocateRegExpResult( const ElementsKind elements_kind = PACKED_ELEMENTS; TNode map = CAST(LoadContextElement(LoadNativeContext(context), Context::REGEXP_RESULT_MAP_INDEX)); - Node* no_allocation_site = nullptr; + TNode no_allocation_site = {}; TNode length_intptr = SmiUntag(length); - TNode capacity = length_intptr; // Note: The returned `elements` may be in young large object space, but // `array` is guaranteed to be in new space so we could skip write barriers @@ -100,18 +98,29 @@ TNode RegExpBuiltinsAssembler::AllocateRegExpResult( TNode array; TNode elements; std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - elements_kind, map, length, no_allocation_site, capacity, + elements_kind, map, length, no_allocation_site, length_intptr, INTPTR_PARAMETERS, kAllowLargeObjectAllocation, JSRegExpResult::kSize); // Finish result initialization. TNode result = CAST(array); + // Load undefined value once here to avoid multiple LoadRoots. + TNode undefined_value = UncheckedCast( + CodeAssembler::LoadRoot(RootIndex::kUndefinedValue)); + StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index); // TODO(jgruber,tebbi): Could skip barrier but the MemoryOptimizer complains. StoreObjectField(result, JSRegExpResult::kInputOffset, input); StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset, - UndefinedConstant()); + undefined_value); + StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kNamesOffset, + undefined_value); + + // Stash match_info in order to build JSRegExpResultIndices lazily when the + // 'indices' property is accessed. + StoreObjectField(result, JSRegExpResult::kCachedIndicesOrMatchInfoOffset, + match_info); // Finish elements initialization. @@ -213,7 +222,7 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TNode result_elements; TNode result = AllocateRegExpResult( - context, num_results, start, string, &result_elements); + context, num_results, start, string, match_info, &result_elements); UnsafeStoreFixedArrayElement(result_elements, 0, first); @@ -228,8 +237,7 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2)); TVARIABLE(IntPtrT, var_to_cursor, IntPtrConstant(1)); - Variable* vars[] = {&var_from_cursor, &var_to_cursor}; - Label loop(this, 2, vars); + Label loop(this, {&var_from_cursor, &var_to_cursor}); Goto(&loop); BIND(&loop); @@ -289,6 +297,9 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TNode names_length = LoadAndUntagFixedArrayBaseLength(names); CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero())); + // Stash names in case we need them to build the indices array later. + StoreObjectField(result, JSRegExpResult::kNamesOffset, names); + // Allocate a new object to store the named capture properties. // TODO(jgruber): Could be optimized by adding the object map to the heap // root list. @@ -305,9 +316,7 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TVARIABLE(IntPtrT, var_i, IntPtrZero()); - Variable* vars[] = {&var_i}; - const int vars_count = sizeof(vars) / sizeof(vars[0]); - Label loop(this, vars_count, vars); + Label loop(this, &var_i); Goto(&loop); BIND(&loop); @@ -355,9 +364,10 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( } void RegExpBuiltinsAssembler::GetStringPointers( - Node* const string_data, Node* const offset, Node* const last_index, - Node* const string_length, String::Encoding encoding, - Variable* var_string_start, Variable* var_string_end) { + TNode string_data, TNode offset, + TNode last_index, TNode string_length, + String::Encoding encoding, TVariable* var_string_start, + TVariable* var_string_end) { DCHECK_EQ(var_string_start->rep(), MachineType::PointerRepresentation()); DCHECK_EQ(var_string_end->rep(), MachineType::PointerRepresentation()); @@ -365,13 +375,14 @@ void RegExpBuiltinsAssembler::GetStringPointers( ? UINT8_ELEMENTS : UINT16_ELEMENTS; - TNode const from_offset = ElementOffsetFromIndex( - IntPtrAdd(offset, last_index), kind, INTPTR_PARAMETERS); - var_string_start->Bind(IntPtrAdd(string_data, from_offset)); + TNode from_offset = + ElementOffsetFromIndex(IntPtrAdd(offset, last_index), kind); + *var_string_start = + ReinterpretCast(IntPtrAdd(string_data, from_offset)); - TNode const to_offset = ElementOffsetFromIndex( - IntPtrAdd(offset, string_length), kind, INTPTR_PARAMETERS); - var_string_end->Bind(IntPtrAdd(string_data, to_offset)); + TNode to_offset = + ElementOffsetFromIndex(IntPtrAdd(offset, string_length), kind); + *var_string_end = ReinterpretCast(IntPtrAdd(string_data, to_offset)); } TNode RegExpBuiltinsAssembler::RegExpExecInternal( @@ -507,27 +518,18 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( GotoIf(TaggedIsSmi(var_code.value()), &runtime); TNode code = CAST(var_code.value()); - // Tier-up in runtime if ticks are non-zero and tier-up hasn't happened yet - // and ensure that a RegExp stack is allocated when using compiled Irregexp. + // Ensure that a RegExp stack is allocated when using compiled Irregexp. + // TODO(jgruber): Guarantee an allocated stack and remove this check. { - Label next(this), check_tier_up(this); - GotoIfNot(TaggedIsSmi(var_bytecode.value()), &check_tier_up); + Label next(this); + GotoIfNot(TaggedIsSmi(var_bytecode.value()), &next); CSA_ASSERT(this, SmiEqual(CAST(var_bytecode.value()), SmiConstant(JSRegExp::kUninitializedValue))); - // Ensure RegExp stack is allocated. TNode stack_size = UncheckedCast( Load(MachineType::IntPtr(), regexp_stack_memory_size_address)); - GotoIf(IntPtrEqual(stack_size, IntPtrZero()), &runtime); - Goto(&next); - - // Check if tier-up is requested. - BIND(&check_tier_up); - TNode ticks = CAST( - UnsafeLoadFixedArrayElement(data, JSRegExp::kIrregexpTierUpTicksIndex)); - GotoIf(SmiToInt32(ticks), &runtime); + Branch(IntPtrEqual(stack_size, IntPtrZero()), &runtime, &next); - Goto(&next); BIND(&next); } @@ -603,13 +605,18 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( TNode code_entry = LoadCodeObjectEntry(code); - TNode result = UncheckedCast(CallCFunction( - code_entry, retval_type, std::make_pair(arg0_type, arg0), - std::make_pair(arg1_type, arg1), std::make_pair(arg2_type, arg2), - std::make_pair(arg3_type, arg3), std::make_pair(arg4_type, arg4), - std::make_pair(arg5_type, arg5), std::make_pair(arg6_type, arg6), - std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8), - std::make_pair(arg9_type, arg9))); + // AIX uses function descriptors on CFunction calls. code_entry in this case + // may also point to a Regex interpreter entry trampoline which does not + // have a function descriptor. This method is ineffective on other platforms + // and is equivalent to CallCFunction. + TNode result = + UncheckedCast(CallCFunctionWithoutFunctionDescriptor( + code_entry, retval_type, std::make_pair(arg0_type, arg0), + std::make_pair(arg1_type, arg1), std::make_pair(arg2_type, arg2), + std::make_pair(arg3_type, arg3), std::make_pair(arg4_type, arg4), + std::make_pair(arg5_type, arg5), std::make_pair(arg6_type, arg6), + std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8), + std::make_pair(arg9_type, arg9))); // Check the result. // We expect exactly one result since we force the called regexp to behave @@ -656,18 +663,18 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( // Fill match and capture offsets in match_info. { - TNode limit_offset = ElementOffsetFromIndex( - register_count, INT32_ELEMENTS, SMI_PARAMETERS, 0); + TNode limit_offset = + ElementOffsetFromIndex(register_count, INT32_ELEMENTS, 0); TNode to_offset = ElementOffsetFromIndex( IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), PACKED_ELEMENTS, - INTPTR_PARAMETERS, RegExpMatchInfo::kHeaderSize - kHeapObjectTag); + RegExpMatchInfo::kHeaderSize - kHeapObjectTag); TVARIABLE(IntPtrT, var_to_offset, to_offset); VariableList vars({&var_to_offset}, zone()); - BuildFastLoop( + BuildFastLoop( vars, IntPtrZero(), limit_offset, - [=, &var_to_offset](Node* offset) { + [&](TNode offset) { TNode value = UncheckedCast(Load( MachineType::Int32(), static_offsets_vector_address, offset)); TNode smi_value = SmiFromInt32(value); @@ -675,7 +682,7 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( var_to_offset.value(), smi_value); Increment(&var_to_offset, kTaggedSize); }, - kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kInt32Size, IndexAdvanceMode::kPost); } var_result = match_info; @@ -733,7 +740,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult( TNode context, TNode maybe_regexp, TNode string, Label* if_didnotmatch, const bool is_fastpath) { if (!is_fastpath) { - ThrowIfNotInstanceType(context, maybe_regexp, JS_REGEXP_TYPE, + ThrowIfNotInstanceType(context, maybe_regexp, JS_REG_EXP_TYPE, "RegExp.prototype.exec"); } @@ -894,14 +901,13 @@ TNode RegExpBuiltinsAssembler::IsReceiverInitialRegExpPrototype( return TaggedEqual(receiver, initial_prototype); } -Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( - SloppyTNode context, SloppyTNode object, - SloppyTNode map) { +TNode RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( + TNode context, TNode object, TNode map) { Label out(this); - VARIABLE(var_result, MachineRepresentation::kWord32); + TVARIABLE(BoolT, var_result); #ifdef V8_ENABLE_FORCE_SLOW_PATH - var_result.Bind(Int32Constant(0)); + var_result = Int32FalseConstant(); GotoIfForceSlowPath(&out); #endif @@ -912,13 +918,13 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset); TNode const has_initialmap = TaggedEqual(map, initial_map); - var_result.Bind(has_initialmap); + var_result = has_initialmap; GotoIfNot(has_initialmap, &out); // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. TNode last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object)); - var_result.Bind(TaggedIsPositiveSmi(last_index)); + var_result = TaggedIsPositiveSmi(last_index); Goto(&out); BIND(&out); @@ -939,7 +945,7 @@ TNode RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec( GotoIfForceSlowPath(&out); #endif - TNode is_regexp = HasInstanceType(object, JS_REGEXP_TYPE); + TNode is_regexp = HasInstanceType(object, JS_REG_EXP_TYPE); var_result = is_regexp; GotoIfNot(is_regexp, &out); @@ -970,8 +976,8 @@ TNode RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec( return var_result.value(); } -Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( - SloppyTNode context, SloppyTNode object) { +TNode RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( + TNode context, TNode object) { CSA_ASSERT(this, TaggedIsNotSmi(object)); return IsFastRegExpNoPrototype(context, object, LoadMap(CAST(object))); } @@ -1046,10 +1052,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp_Permissive( if_isunmodified, if_ismodified); } -void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context, - Node* const object, - Label* if_isunmodified, - Label* if_ismodified) { +void RegExpBuiltinsAssembler::BranchIfFastRegExpResult( + const TNode context, const TNode object, + Label* if_isunmodified, Label* if_ismodified) { // Could be a Smi. TNode const map = LoadReceiverMap(object); @@ -1061,15 +1066,6 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context, if_ismodified); } -// Slow path stub for RegExpPrototypeExec to decrease code size. -TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) { - TNode regexp = CAST(Parameter(Descriptor::kReceiver)); - TNode string = CAST(Parameter(Descriptor::kString)); - TNode context = CAST(Parameter(Descriptor::kContext)); - - Return(RegExpPrototypeExecBody(context, regexp, string, false)); -} - // Fast path stub for ATOM regexps. String matching is done by StringIndexOf, // and {match_info} is updated on success. // The slow path is implemented in RegExp::AtomExec. @@ -1149,33 +1145,6 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) { Return(RegExpExecInternal(context, regexp, string, last_index, match_info)); } -// ES#sec-regexp.prototype.exec -// RegExp.prototype.exec ( string ) -TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) { - TNode maybe_receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode maybe_string = CAST(Parameter(Descriptor::kString)); - TNode context = CAST(Parameter(Descriptor::kContext)); - - // Ensure {maybe_receiver} is a JSRegExp. - ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE, - "RegExp.prototype.exec"); - TNode receiver = CAST(maybe_receiver); - - // Convert {maybe_string} to a String. - TNode string = ToString_Inline(context, maybe_string); - - Label if_isfastpath(this), if_isslowpath(this); - Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath, - &if_isslowpath); - - BIND(&if_isfastpath); - Return(RegExpPrototypeExecBody(context, receiver, string, true)); - - BIND(&if_isslowpath); - Return(CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context, receiver, - string)); -} - TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, TNode regexp, bool is_fastpath) { @@ -1246,8 +1215,8 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, { TNode const result = AllocateSeqOneByteString(var_length.value()); - VARIABLE(var_offset, MachineType::PointerRepresentation(), - IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + TVARIABLE(IntPtrT, var_offset, + IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); #define CASE_FOR_FLAG(FLAG, CHAR) \ do { \ @@ -1256,7 +1225,7 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, TNode const value = Int32Constant(CHAR); \ StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \ var_offset.value(), value); \ - var_offset.Bind(IntPtrAdd(var_offset.value(), int_one)); \ + var_offset = IntPtrAdd(var_offset.value(), int_one); \ Goto(&next); \ BIND(&next); \ } while (false) @@ -1273,64 +1242,11 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, } } -// ES#sec-isregexp IsRegExp ( argument ) -TNode RegExpBuiltinsAssembler::IsRegExp(TNode context, - TNode maybe_receiver) { - Label out(this), if_isregexp(this); - - TVARIABLE(BoolT, var_result, Int32FalseConstant()); - - GotoIf(TaggedIsSmi(maybe_receiver), &out); - GotoIfNot(IsJSReceiver(CAST(maybe_receiver)), &out); - - TNode receiver = CAST(maybe_receiver); - - // Check @@match. - { - TNode value = - GetProperty(context, receiver, isolate()->factory()->match_symbol()); - - Label match_isundefined(this), match_isnotundefined(this); - Branch(IsUndefined(value), &match_isundefined, &match_isnotundefined); - - BIND(&match_isundefined); - Branch(IsJSRegExp(receiver), &if_isregexp, &out); - - BIND(&match_isnotundefined); - Label match_istrueish(this), match_isfalseish(this); - BranchIfToBooleanIsTrue(value, &match_istrueish, &match_isfalseish); - - // The common path. Symbol.match exists, equals the RegExpPrototypeMatch - // function (and is thus trueish), and the receiver is a JSRegExp. - BIND(&match_istrueish); - GotoIf(IsJSRegExp(receiver), &if_isregexp); - CallRuntime(Runtime::kIncrementUseCounter, context, - SmiConstant(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp)); - Goto(&if_isregexp); - - BIND(&match_isfalseish); - GotoIfNot(IsJSRegExp(receiver), &out); - CallRuntime(Runtime::kIncrementUseCounter, context, - SmiConstant(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp)); - Goto(&out); - } - - BIND(&if_isregexp); - var_result = Int32TrueConstant(); - Goto(&out); - - BIND(&out); - return var_result.value(); -} - // ES#sec-regexpinitialize // Runtime Semantics: RegExpInitialize ( obj, pattern, flags ) -Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context, - Node* const regexp, - Node* const maybe_pattern, - Node* const maybe_flags) { - CSA_ASSERT(this, IsJSRegExp(regexp)); - +TNode RegExpBuiltinsAssembler::RegExpInitialize( + const TNode context, const TNode regexp, + const TNode maybe_pattern, const TNode maybe_flags) { // Normalize pattern. TNode const pattern = Select( IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); }, @@ -1437,7 +1353,7 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) { // Allocate. - VARIABLE(var_regexp, MachineRepresentation::kTagged); + TVARIABLE(JSRegExp, var_regexp); { Label allocate_jsregexp(this), allocate_generic(this, Label::kDeferred), next(this); @@ -1448,25 +1364,23 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) { { TNode const initial_map = CAST(LoadObjectField( regexp_function, JSFunction::kPrototypeOrInitialMapOffset)); - TNode const regexp = AllocateJSObjectFromMap(initial_map); - var_regexp.Bind(regexp); + var_regexp = CAST(AllocateJSObjectFromMap(initial_map)); Goto(&next); } BIND(&allocate_generic); { ConstructorBuiltinsAssembler constructor_assembler(this->state()); - TNode const regexp = constructor_assembler.EmitFastNewObject( - context, regexp_function, CAST(var_new_target.value())); - var_regexp.Bind(regexp); + var_regexp = CAST(constructor_assembler.EmitFastNewObject( + context, regexp_function, CAST(var_new_target.value()))); Goto(&next); } BIND(&next); } - Node* const result = RegExpInitialize(context, var_regexp.value(), - var_pattern.value(), var_flags.value()); + const TNode result = RegExpInitialize( + context, var_regexp.value(), var_pattern.value(), var_flags.value()); Return(result); } @@ -1478,12 +1392,12 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) { TNode maybe_flags = CAST(Parameter(Descriptor::kFlags)); TNode context = CAST(Parameter(Descriptor::kContext)); - ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE, + ThrowIfNotInstanceType(context, maybe_receiver, JS_REG_EXP_TYPE, "RegExp.prototype.compile"); - Node* const receiver = maybe_receiver; + const TNode receiver = CAST(maybe_receiver); - VARIABLE(var_flags, MachineRepresentation::kTagged, maybe_flags); - VARIABLE(var_pattern, MachineRepresentation::kTagged, maybe_pattern); + TVARIABLE(Object, var_flags, maybe_flags); + TVARIABLE(Object, var_pattern, maybe_pattern); // Handle a JSRegExp pattern. { @@ -1492,8 +1406,6 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) { GotoIf(TaggedIsSmi(maybe_pattern), &next); GotoIfNot(IsJSRegExp(CAST(maybe_pattern)), &next); - Node* const pattern = maybe_pattern; - // {maybe_flags} must be undefined in this case, otherwise throw. { Label next(this); @@ -1504,19 +1416,20 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) { BIND(&next); } - TNode const new_flags = FlagsGetter(context, CAST(pattern), true); + const TNode pattern = CAST(maybe_pattern); + TNode const new_flags = FlagsGetter(context, pattern, true); TNode const new_pattern = LoadObjectField(pattern, JSRegExp::kSourceOffset); - var_flags.Bind(new_flags); - var_pattern.Bind(new_pattern); + var_flags = new_flags; + var_pattern = new_pattern; Goto(&next); BIND(&next); } - Node* const result = RegExpInitialize(context, receiver, var_pattern.value(), - var_flags.value()); + const TNode result = RegExpInitialize( + context, receiver, var_pattern.value(), var_flags.value()); Return(result); } @@ -1586,54 +1499,6 @@ TNode RegExpBuiltinsAssembler::FlagGetter(TNode context, : SlowFlagGetter(context, regexp, flag); } -// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S ) -TNode RegExpBuiltinsAssembler::RegExpExec(TNode context, - Node* regexp, Node* string) { - TVARIABLE(Object, var_result); - Label out(this); - - // Take the slow path of fetching the exec property, calling it, and - // verifying its return value. - - // Get the exec property. - TNode const exec = - GetProperty(context, regexp, isolate()->factory()->exec_string()); - - // Is {exec} callable? - Label if_iscallable(this), if_isnotcallable(this); - - GotoIf(TaggedIsSmi(exec), &if_isnotcallable); - - TNode const exec_map = LoadMap(CAST(exec)); - Branch(IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable); - - BIND(&if_iscallable); - { - Callable call_callable = CodeFactory::Call(isolate()); - var_result = CAST(CallJS(call_callable, context, exec, regexp, string)); - - GotoIf(IsNull(var_result.value()), &out); - - ThrowIfNotJSReceiver(context, var_result.value(), - MessageTemplate::kInvalidRegExpExecResult, ""); - - Goto(&out); - } - - BIND(&if_isnotcallable); - { - ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE, - "RegExp.prototype.exec"); - - var_result = CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context, - regexp, string); - Goto(&out); - } - - BIND(&out); - return var_result.value(); -} - TNode RegExpBuiltinsAssembler::AdvanceStringIndex( SloppyTNode string, SloppyTNode index, SloppyTNode is_unicode, bool is_fastpath) { @@ -1717,7 +1582,7 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( { var_result = is_fastpath ? RegExpPrototypeExecBody(context, CAST(regexp), string, true) - : RegExpExec(context, regexp, string); + : RegExpExec(context, CAST(regexp), string); Goto(&done); } @@ -1735,9 +1600,9 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( // Loop preparations. Within the loop, collect results from RegExpExec // and store match strings in the array. - Variable* vars[] = {array.var_array(), array.var_length(), - array.var_capacity()}; - Label loop(this, 3, vars), out(this); + Label loop(this, + {array.var_array(), array.var_length(), array.var_capacity()}), + out(this); // Check if the regexp is an ATOM type. If then, keep the literal string to // search for so that we can avoid calling substring in the loop below. @@ -1758,7 +1623,7 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( BIND(&loop); { - VARIABLE(var_match, MachineRepresentation::kTagged); + TVARIABLE(String, var_match); Label if_didmatch(this), if_didnotmatch(this); if (is_fastpath) { @@ -1776,24 +1641,24 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( match_indices, RegExpMatchInfo::kFirstCaptureIndex); TNode const match_to = UnsafeLoadFixedArrayElement( match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1); - var_match.Bind(CallBuiltin(Builtins::kSubString, context, string, - match_from, match_to)); + var_match = CAST(CallBuiltin(Builtins::kSubString, context, string, + match_from, match_to)); Goto(&if_didmatch); } BIND(&donotsubstring); - var_match.Bind(var_search_string.value()); + var_match = var_search_string.value(); Goto(&if_didmatch); } else { DCHECK(!is_fastpath); - TNode const result = RegExpExec(context, regexp, string); + TNode const result = RegExpExec(context, CAST(regexp), string); Label load_match(this); Branch(IsNull(result), &if_didnotmatch, &load_match); BIND(&load_match); - var_match.Bind( - ToString_Inline(context, GetProperty(context, result, SmiZero()))); + var_match = + ToString_Inline(context, GetProperty(context, result, SmiZero())); Goto(&if_didmatch); } @@ -1807,11 +1672,11 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( BIND(&if_didmatch); { - Node* match = var_match.value(); + TNode match = var_match.value(); // Store the match, growing the fixed array if needed. - array.Push(CAST(match)); + array.Push(match); // Advance last index if the match is the empty string. @@ -1855,128 +1720,11 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeMatchBody( return var_result.value(); } -void RegExpMatchAllAssembler::Generate(TNode context, - TNode native_context, - TNode receiver, - TNode maybe_string) { - // 1. Let R be the this value. - // 2. If Type(R) is not Object, throw a TypeError exception. - ThrowIfNotJSReceiver(context, receiver, - MessageTemplate::kIncompatibleMethodReceiver, - "RegExp.prototype.@@matchAll"); - - // 3. Let S be ? ToString(O). - TNode string = ToString_Inline(context, maybe_string); - - TVARIABLE(Object, var_matcher); - TVARIABLE(BoolT, var_global); - TVARIABLE(BoolT, var_unicode); - Label create_iterator(this), if_fast_regexp(this), - if_slow_regexp(this, Label::kDeferred); - - // Strict, because following code uses the flags property. - // TODO(jgruber): Handle slow flag accesses on the fast path and make this - // permissive. - BranchIfFastRegExp_Strict(context, CAST(receiver), &if_fast_regexp, - &if_slow_regexp); - - BIND(&if_fast_regexp); - { - TNode fast_regexp = CAST(receiver); - TNode source = - LoadObjectField(fast_regexp, JSRegExp::kSourceOffset); - - // 4. Let C be ? SpeciesConstructor(R, %RegExp%). - // 5. Let flags be ? ToString(? Get(R, "flags")). - // 6. Let matcher be ? Construct(C, « R, flags »). - TNode flags = FlagsGetter(context, fast_regexp, true); - var_matcher = RegExpCreate(context, native_context, source, flags); - CSA_ASSERT(this, - IsFastRegExpPermissive(context, CAST(var_matcher.value()))); - - // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). - // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). - FastStoreLastIndex(CAST(var_matcher.value()), - FastLoadLastIndex(fast_regexp)); - - // 9. If flags contains "g", let global be true. - // 10. Else, let global be false. - var_global = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kGlobal); - - // 11. If flags contains "u", let fullUnicode be true. - // 12. Else, let fullUnicode be false. - var_unicode = FastFlagGetter(CAST(var_matcher.value()), JSRegExp::kUnicode); - Goto(&create_iterator); - } - - BIND(&if_slow_regexp); - { - // 4. Let C be ? SpeciesConstructor(R, %RegExp%). - TNode regexp_fun = CAST( - LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX)); - TNode species_constructor = - SpeciesConstructor(native_context, receiver, regexp_fun); - - // 5. Let flags be ? ToString(? Get(R, "flags")). - TNode flags = - GetProperty(context, receiver, isolate()->factory()->flags_string()); - TNode flags_string = ToString_Inline(context, flags); - - // 6. Let matcher be ? Construct(C, « R, flags »). - var_matcher = - Construct(context, species_constructor, receiver, flags_string); - - // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). - TNode last_index = - ToLength_Inline(context, SlowLoadLastIndex(context, receiver)); - - // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). - SlowStoreLastIndex(context, var_matcher.value(), last_index); - - // 9. If flags contains "g", let global be true. - // 10. Else, let global be false. - TNode global_char_string = StringConstant("g"); - TNode global_ix = - CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string, - global_char_string, SmiZero())); - var_global = SmiNotEqual(global_ix, SmiConstant(-1)); - - // 11. If flags contains "u", let fullUnicode be true. - // 12. Else, let fullUnicode be false. - TNode unicode_char_string = StringConstant("u"); - TNode unicode_ix = - CAST(CallBuiltin(Builtins::kStringIndexOf, context, flags_string, - unicode_char_string, SmiZero())); - var_unicode = SmiNotEqual(unicode_ix, SmiConstant(-1)); - Goto(&create_iterator); - } - - BIND(&create_iterator); - { - { - // UseCounter for matchAll with non-g RegExp. - // https://crbug.com/v8/9551 - Label next(this); - GotoIf(var_global.value(), &next); - CallRuntime(Runtime::kIncrementUseCounter, context, - SmiConstant(v8::Isolate::kRegExpMatchAllWithNonGlobalRegExp)); - Goto(&next); - BIND(&next); - } - - // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode). - TNode iterator = - CreateRegExpStringIterator(native_context, var_matcher.value(), string, - var_global.value(), var_unicode.value()); - Return(iterator); - } -} - // ES#sec-createregexpstringiterator // CreateRegExpStringIterator ( R, S, global, fullUnicode ) TNode RegExpMatchAllAssembler::CreateRegExpStringIterator( - TNode native_context, TNode regexp, TNode string, - TNode global, TNode full_unicode) { + TNode native_context, TNode regexp, + TNode string, TNode global, TNode full_unicode) { TNode map = CAST(LoadContextElement( native_context, Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX)); @@ -2016,164 +1764,11 @@ TNode RegExpMatchAllAssembler::CreateRegExpStringIterator( return iterator; } -// https://tc39.github.io/proposal-string-matchall/ -// RegExp.prototype [ @@matchAll ] ( string ) -TF_BUILTIN(RegExpPrototypeMatchAll, RegExpMatchAllAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode native_context = LoadNativeContext(context); - TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode maybe_string = CAST(Parameter(Descriptor::kString)); - Generate(context, native_context, receiver, maybe_string); -} - -void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast( - TNode context, TNode regexp, TNode string) { - CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp)); - - // Grab the initial value of last index. - TNode previous_last_index = FastLoadLastIndex(regexp); - - // Ensure last index is 0. - FastStoreLastIndex(regexp, SmiZero()); - - // Call exec. - Label if_didnotmatch(this); - TNode match_indices = RegExpPrototypeExecBodyWithoutResult( - context, regexp, string, &if_didnotmatch, true); - - // Successful match. - { - // Reset last index. - FastStoreLastIndex(regexp, previous_last_index); - - // Return the index of the match. - TNode const index = LoadFixedArrayElement( - match_indices, RegExpMatchInfo::kFirstCaptureIndex); - Return(index); - } - - BIND(&if_didnotmatch); - { - // Reset last index and return -1. - FastStoreLastIndex(regexp, previous_last_index); - Return(SmiConstant(-1)); - } -} - -void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow( - TNode context, Node* const regexp, Node* const string) { - CSA_ASSERT(this, IsJSReceiver(regexp)); - CSA_ASSERT(this, IsString(string)); - - Isolate* const isolate = this->isolate(); - - TNode const smi_zero = SmiZero(); - - // Grab the initial value of last index. - TNode const previous_last_index = - SlowLoadLastIndex(context, CAST(regexp)); - - // Ensure last index is 0. - { - Label next(this), slow(this, Label::kDeferred); - BranchIfSameValue(previous_last_index, smi_zero, &next, &slow); - - BIND(&slow); - SlowStoreLastIndex(context, regexp, smi_zero); - Goto(&next); - BIND(&next); - } - - // Call exec. - TNode const exec_result = RegExpExec(context, regexp, string); - - // Reset last index if necessary. - { - Label next(this), slow(this, Label::kDeferred); - TNode const current_last_index = - SlowLoadLastIndex(context, CAST(regexp)); - - BranchIfSameValue(current_last_index, previous_last_index, &next, &slow); - - BIND(&slow); - SlowStoreLastIndex(context, regexp, previous_last_index); - Goto(&next); - BIND(&next); - } - - // Return -1 if no match was found. - { - Label next(this); - GotoIfNot(IsNull(exec_result), &next); - Return(SmiConstant(-1)); - BIND(&next); - } - - // Return the index of the match. - { - Label fast_result(this), slow_result(this, Label::kDeferred); - BranchIfFastRegExpResult(context, exec_result, &fast_result, &slow_result); - - BIND(&fast_result); - { - TNode const index = - LoadObjectField(CAST(exec_result), JSRegExpResult::kIndexOffset); - Return(index); - } - - BIND(&slow_result); - { - Return(GetProperty(context, exec_result, - isolate->factory()->index_string())); - } - } -} - -// ES#sec-regexp.prototype-@@search -// RegExp.prototype [ @@search ] ( string ) -TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) { - TNode maybe_receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode maybe_string = CAST(Parameter(Descriptor::kString)); - TNode context = CAST(Parameter(Descriptor::kContext)); - - // Ensure {maybe_receiver} is a JSReceiver. - ThrowIfNotJSReceiver(context, maybe_receiver, - MessageTemplate::kIncompatibleMethodReceiver, - "RegExp.prototype.@@search"); - TNode receiver = CAST(maybe_receiver); - - // Convert {maybe_string} to a String. - TNode const string = ToString_Inline(context, maybe_string); - - Label fast_path(this), slow_path(this); - BranchIfFastRegExp_Permissive(context, receiver, &fast_path, &slow_path); - - BIND(&fast_path); - // TODO(pwong): Could be optimized to remove the overhead of calling the - // builtin (at the cost of a larger builtin). - Return(CallBuiltin(Builtins::kRegExpSearchFast, context, receiver, string)); - - BIND(&slow_path); - RegExpPrototypeSearchBodySlow(context, receiver, string); -} - -// Helper that skips a few initial checks. and assumes... -// 1) receiver is a "fast" RegExp -// 2) pattern is a string -TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) { - TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode string = CAST(Parameter(Descriptor::kPattern)); - TNode context = CAST(Parameter(Descriptor::kContext)); - - RegExpPrototypeSearchBodyFast(context, receiver, string); -} - // Generates the fast path for @@split. {regexp} is an unmodified, non-sticky // JSRegExp, {string} is a String, and {limit} is a Smi. -void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode context, - TNode regexp, - TNode string, - TNode const limit) { +TNode RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( + TNode context, TNode regexp, TNode string, + TNode const limit) { CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp)); CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky))); @@ -2182,11 +1777,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode context, const ElementsKind kind = PACKED_ELEMENTS; const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS; - Node* const allocation_site = nullptr; + TNode allocation_site = {}; TNode const native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(kind, native_context); Label return_empty_array(this, Label::kDeferred); + TVARIABLE(JSArray, var_result); + Label done(this); // If limit is zero, return an empty array. { @@ -2220,13 +1817,13 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode context, { TNode length = SmiConstant(1); TNode capacity = IntPtrConstant(1); - TNode result = AllocateJSArray(kind, array_map, capacity, - length, allocation_site, mode); + var_result = AllocateJSArray(kind, array_map, capacity, length, + allocation_site, mode); - TNode fixed_array = CAST(LoadElements(result)); + TNode fixed_array = CAST(LoadElements(var_result.value())); UnsafeStoreFixedArrayElement(fixed_array, 0, string); - Return(result); + Goto(&done); } } @@ -2240,11 +1837,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode context, TVARIABLE(Smi, var_last_matched_until, SmiZero()); TVARIABLE(Smi, var_next_search_from, SmiZero()); - Variable* vars[] = {array.var_array(), array.var_length(), - array.var_capacity(), &var_last_matched_until, - &var_next_search_from}; - const int vars_count = sizeof(vars) / sizeof(vars[0]); - Label loop(this, vars_count, vars), push_suffix_and_out(this), out(this); + Label loop(this, {array.var_array(), array.var_length(), array.var_capacity(), + &var_last_matched_until, &var_next_search_from}), + push_suffix_and_out(this), out(this); Goto(&loop); BIND(&loop); @@ -2321,19 +1916,17 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode context, match_indices, RegExpMatchInfo::kNumberOfCapturesIndex)); TNode const int_num_registers = SmiUntag(num_registers); - VARIABLE(var_reg, MachineType::PointerRepresentation()); - var_reg.Bind(IntPtrConstant(2)); + TVARIABLE(IntPtrT, var_reg, IntPtrConstant(2)); - Variable* vars[] = {array.var_array(), array.var_length(), - array.var_capacity(), &var_reg}; - const int vars_count = sizeof(vars) / sizeof(vars[0]); - Label nested_loop(this, vars_count, vars), nested_loop_out(this); + Label nested_loop(this, {array.var_array(), array.var_length(), + array.var_capacity(), &var_reg}), + nested_loop_out(this); Branch(IntPtrLessThan(var_reg.value(), int_num_registers), &nested_loop, &nested_loop_out); BIND(&nested_loop); { - Node* const reg = var_reg.value(); + const TNode reg = var_reg.value(); TNode const from = LoadFixedArrayElement( match_indices, reg, RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode); @@ -2342,30 +1935,30 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode context, (RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize, mode)); Label select_capture(this), select_undefined(this), store_value(this); - VARIABLE(var_value, MachineRepresentation::kTagged); + TVARIABLE(Object, var_value); Branch(SmiEqual(to, SmiConstant(-1)), &select_undefined, &select_capture); BIND(&select_capture); { - var_value.Bind( - CallBuiltin(Builtins::kSubString, context, string, from, to)); + var_value = + CallBuiltin(Builtins::kSubString, context, string, from, to); Goto(&store_value); } BIND(&select_undefined); { - var_value.Bind(UndefinedConstant()); + var_value = UndefinedConstant(); Goto(&store_value); } BIND(&store_value); { - array.Push(CAST(var_value.value())); + array.Push(var_value.value()); GotoIf(WordEqual(array.length(), int_limit), &out); - TNode const new_reg = IntPtrAdd(reg, IntPtrConstant(2)); - var_reg.Bind(new_reg); + const TNode new_reg = IntPtrAdd(reg, IntPtrConstant(2)); + var_reg = new_reg; Branch(IntPtrLessThan(new_reg, int_num_registers), &nested_loop, &nested_loop_out); @@ -2382,316 +1975,29 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(TNode context, BIND(&push_suffix_and_out); { - TNode const from = var_last_matched_until.value(); - Node* const to = string_length; + const TNode from = var_last_matched_until.value(); + const TNode to = string_length; array.Push(CallBuiltin(Builtins::kSubString, context, string, from, to)); Goto(&out); } BIND(&out); { - TNode const result = array.ToJSArray(context); - Return(result); + var_result = array.ToJSArray(context); + Goto(&done); } BIND(&return_empty_array); { TNode length = SmiZero(); TNode capacity = IntPtrZero(); - TNode result = AllocateJSArray(kind, array_map, capacity, length, - allocation_site, mode); - Return(result); - } -} - -// Helper that skips a few initial checks. -TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) { - TNode regexp = CAST(Parameter(Descriptor::kRegExp)); - TNode string = CAST(Parameter(Descriptor::kString)); - TNode maybe_limit = CAST(Parameter(Descriptor::kLimit)); - TNode context = CAST(Parameter(Descriptor::kContext)); - - CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) { - BranchIfFastRegExp_Strict(context, regexp, ok, not_ok); - }); - - // Verify {maybe_limit}. - - VARIABLE(var_limit, MachineRepresentation::kTagged, maybe_limit); - Label if_limitissmimax(this), runtime(this, Label::kDeferred); - - { - Label next(this); - - GotoIf(IsUndefined(maybe_limit), &if_limitissmimax); - Branch(TaggedIsPositiveSmi(maybe_limit), &next, &runtime); - - // We need to be extra-strict and require the given limit to be either - // undefined or a positive smi. We can't call ToUint32(maybe_limit) since - // that might move us onto the slow path, resulting in ordering spec - // violations (see https://crbug.com/801171). - - BIND(&if_limitissmimax); - { - // TODO(jgruber): In this case, we can probably avoid generation of limit - // checks in Generate_RegExpPrototypeSplitBody. - var_limit.Bind(SmiConstant(Smi::kMaxValue)); - Goto(&next); - } - - BIND(&next); - } - - // Due to specific shortcuts we take on the fast path (specifically, we don't - // allocate a new regexp instance as specced), we need to ensure that the - // given regexp is non-sticky to avoid invalid results. See crbug.com/v8/6706. - - GotoIf(FastFlagGetter(regexp, JSRegExp::kSticky), &runtime); - - // We're good to go on the fast path, which is inlined here. - - RegExpPrototypeSplitBody(context, regexp, string, CAST(var_limit.value())); - - BIND(&runtime); - Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string, - var_limit.value())); -} - -// ES#sec-regexp.prototype-@@split -// RegExp.prototype [ @@split ] ( string, limit ) -TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) { - const int kStringArg = 0; - const int kLimitArg = 1; - - TNode argc = - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); - CodeStubArguments args(this, argc); - - TNode maybe_receiver = args.GetReceiver(); - TNode maybe_string = args.GetOptionalArgumentValue(kStringArg); - TNode maybe_limit = args.GetOptionalArgumentValue(kLimitArg); - TNode context = CAST(Parameter(Descriptor::kContext)); - - // Ensure {maybe_receiver} is a JSReceiver. - ThrowIfNotJSReceiver(context, maybe_receiver, - MessageTemplate::kIncompatibleMethodReceiver, - "RegExp.prototype.@@split"); - TNode receiver = CAST(maybe_receiver); - - // Convert {maybe_string} to a String. - TNode string = ToString_Inline(context, maybe_string); - - // Strict: Reads the flags property. - // TODO(jgruber): Handle slow flag accesses on the fast path and make this - // permissive. - Label stub(this), runtime(this, Label::kDeferred); - BranchIfFastRegExp_Strict(context, receiver, &stub, &runtime); - - BIND(&stub); - args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context, receiver, - string, maybe_limit)); - - BIND(&runtime); - args.PopAndReturn(CallRuntime(Runtime::kRegExpSplit, context, receiver, - string, maybe_limit)); -} - -class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler { - public: - explicit RegExpStringIteratorAssembler(compiler::CodeAssemblerState* state) - : RegExpBuiltinsAssembler(state) {} - - protected: - TNode LoadFlags(TNode iterator) { - return LoadObjectField(iterator, JSRegExpStringIterator::kFlagsOffset); - } - - TNode HasDoneFlag(TNode flags) { - return UncheckedCast( - IsSetSmi(flags, 1 << JSRegExpStringIterator::kDoneBit)); - } - - TNode HasGlobalFlag(TNode flags) { - return UncheckedCast( - IsSetSmi(flags, 1 << JSRegExpStringIterator::kGlobalBit)); - } - - TNode HasUnicodeFlag(TNode flags) { - return UncheckedCast( - IsSetSmi(flags, 1 << JSRegExpStringIterator::kUnicodeBit)); - } - - void SetDoneFlag(TNode iterator, TNode flags) { - TNode new_flags = - SmiOr(flags, SmiConstant(1 << JSRegExpStringIterator::kDoneBit)); - StoreObjectFieldNoWriteBarrier( - iterator, JSRegExpStringIterator::kFlagsOffset, new_flags); - } -}; - -// https://tc39.github.io/proposal-string-matchall/ -// %RegExpStringIteratorPrototype%.next ( ) -TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) { - const char* method_name = "%RegExpStringIterator%.prototype.next"; - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode maybe_receiver = CAST(Parameter(Descriptor::kReceiver)); - - Label if_match(this), if_no_match(this, Label::kDeferred), - return_empty_done_result(this, Label::kDeferred); - - // 1. Let O be the this value. - // 2. If Type(O) is not Object, throw a TypeError exception. - // 3. If O does not have all of the internal slots of a RegExp String Iterator - // Object Instance (see 5.3), throw a TypeError exception. - ThrowIfNotInstanceType(context, maybe_receiver, - JS_REGEXP_STRING_ITERATOR_TYPE, method_name); - TNode receiver = CAST(maybe_receiver); - - // 4. If O.[[Done]] is true, then - // a. Return ! CreateIterResultObject(undefined, true). - TNode flags = LoadFlags(receiver); - GotoIf(HasDoneFlag(flags), &return_empty_done_result); - - // 5. Let R be O.[[IteratingRegExp]]. - TNode iterating_regexp = CAST(LoadObjectField( - receiver, JSRegExpStringIterator::kIteratingRegExpOffset)); - - // For extra safety, also check the type in release mode. - CSA_CHECK(this, IsJSReceiver(iterating_regexp)); - - // 6. Let S be O.[[IteratedString]]. - TNode iterating_string = CAST( - LoadObjectField(receiver, JSRegExpStringIterator::kIteratedStringOffset)); - - // 7. Let global be O.[[Global]]. - // See if_match. - - // 8. Let fullUnicode be O.[[Unicode]]. - // See if_global. - - // 9. Let match be ? RegExpExec(R, S). - TVARIABLE(Object, var_match); - TVARIABLE(BoolT, var_is_fast_regexp); - { - Label if_fast(this), if_slow(this, Label::kDeferred); - BranchIfFastRegExp_Permissive(context, iterating_regexp, &if_fast, - &if_slow); - - BIND(&if_fast); - { - TNode match_indices = - RegExpPrototypeExecBodyWithoutResult( - context, iterating_regexp, iterating_string, &if_no_match, true); - var_match = ConstructNewResultFromMatchInfo( - context, iterating_regexp, match_indices, iterating_string); - var_is_fast_regexp = Int32TrueConstant(); - Goto(&if_match); - } - - BIND(&if_slow); - { - var_match = RegExpExec(context, iterating_regexp, iterating_string); - var_is_fast_regexp = Int32FalseConstant(); - Branch(IsNull(var_match.value()), &if_no_match, &if_match); - } - } - - // 10. If match is null, then - BIND(&if_no_match); - { - // a. Set O.[[Done]] to true. - SetDoneFlag(receiver, flags); - - // b. Return ! CreateIterResultObject(undefined, true). - Goto(&return_empty_done_result); + var_result = AllocateJSArray(kind, array_map, capacity, length, + allocation_site, mode); + Goto(&done); } - // 11. Else, - BIND(&if_match); - { - Label if_global(this), if_not_global(this, Label::kDeferred), - return_result(this); - - // a. If global is true, - Branch(HasGlobalFlag(flags), &if_global, &if_not_global); - BIND(&if_global); - { - Label if_fast(this), if_slow(this, Label::kDeferred); - // ii. If matchStr is the empty string, - Branch(var_is_fast_regexp.value(), &if_fast, &if_slow); - BIND(&if_fast); - { - // i. Let matchStr be ? ToString(? Get(match, "0")). - CSA_ASSERT_BRANCH(this, [&](Label* ok, Label* not_ok) { - BranchIfFastRegExpResult(context, var_match.value(), ok, not_ok); - }); - CSA_ASSERT(this, - SmiNotEqual(LoadFastJSArrayLength(CAST(var_match.value())), - SmiZero())); - TNode result_fixed_array = - CAST(LoadElements(CAST(var_match.value()))); - TNode match_str = - CAST(LoadFixedArrayElement(result_fixed_array, 0)); - - // When iterating_regexp is fast, we assume it stays fast even after - // accessing the first match from the RegExp result. - CSA_ASSERT(this, IsFastRegExpPermissive(context, iterating_regexp)); - GotoIfNot(IsEmptyString(match_str), &return_result); - - // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). - TNode this_index = FastLoadLastIndex(CAST(iterating_regexp)); - - // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode next_index = AdvanceStringIndexFast( - iterating_string, this_index, HasUnicodeFlag(flags)); - - // 3. Perform ? Set(R, "lastIndex", nextIndex, true). - FastStoreLastIndex(CAST(iterating_regexp), next_index); - - // iii. Return ! CreateIterResultObject(match, false). - Goto(&return_result); - } - BIND(&if_slow); - { - // i. Let matchStr be ? ToString(? Get(match, "0")). - TNode match_str = ToString_Inline( - context, GetProperty(context, var_match.value(), SmiZero())); - - GotoIfNot(IsEmptyString(match_str), &return_result); - - // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). - TNode last_index = SlowLoadLastIndex(context, iterating_regexp); - TNode this_index = ToLength_Inline(context, last_index); - - // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode next_index = AdvanceStringIndex( - iterating_string, this_index, HasUnicodeFlag(flags), false); - - // 3. Perform ? Set(R, "lastIndex", nextIndex, true). - SlowStoreLastIndex(context, iterating_regexp, next_index); - - // iii. Return ! CreateIterResultObject(match, false). - Goto(&return_result); - } - } - // b. Else, - BIND(&if_not_global); - { - // i. Set O.[[Done]] to true. - SetDoneFlag(receiver, flags); - - // ii. Return ! CreateIterResultObject(match, false). - Goto(&return_result); - } - BIND(&return_result); - { - Return(AllocateJSIteratorResult(context, var_match.value(), - FalseConstant())); - } - } - BIND(&return_empty_done_result); - Return( - AllocateJSIteratorResult(context, UndefinedConstant(), TrueConstant())); + BIND(&done); + return var_result.value(); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h index de841f57b292f0..c6de458ef2a2d9 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.h +++ b/deps/v8/src/builtins/builtins-regexp-gen.h @@ -25,8 +25,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode RegExpCreate(TNode context, TNode initial_map, TNode regexp_string, TNode flags); - TNode IsRegExp(TNode context, TNode maybe_receiver); - TNode SmiZero(); TNode IntPtrZero(); @@ -37,7 +35,8 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { // and input string. TNode AllocateRegExpResult( TNode context, TNode length, TNode index, - TNode input, TNode* elements_out = nullptr); + TNode input, TNode match_info, + TNode* elements_out = nullptr); TNode FastLoadLastIndexBeforeSmiCheck(TNode regexp); TNode FastLoadLastIndex(TNode regexp) { @@ -56,10 +55,12 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { // Loads {var_string_start} and {var_string_end} with the corresponding // offsets into the given {string_data}. - void GetStringPointers(Node* const string_data, Node* const offset, - Node* const last_index, Node* const string_length, - String::Encoding encoding, Variable* var_string_start, - Variable* var_string_end); + void GetStringPointers(TNode string_data, TNode offset, + TNode last_index, + TNode string_length, + String::Encoding encoding, + TVariable* var_string_start, + TVariable* var_string_end); // Low level logic around the actual call into pattern matching code. TNode RegExpExecInternal(TNode context, @@ -136,17 +137,17 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { // Performs fast path checks on the given object itself, but omits prototype // checks. - Node* IsFastRegExpNoPrototype(SloppyTNode context, - SloppyTNode object); - Node* IsFastRegExpNoPrototype(SloppyTNode context, - SloppyTNode object, - SloppyTNode map); + TNode IsFastRegExpNoPrototype(TNode context, + TNode object); + TNode IsFastRegExpNoPrototype(TNode context, + TNode object, TNode map); // For debugging only. Uses a slow GetProperty call to fetch object.exec. TNode IsFastRegExpWithOriginalExec(TNode context, TNode object); - void BranchIfFastRegExpResult(Node* const context, Node* const object, + void BranchIfFastRegExpResult(const TNode context, + const TNode object, Label* if_isunmodified, Label* if_ismodified); TNode FlagsGetter(TNode context, TNode regexp, @@ -164,10 +165,10 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode FlagGetter(TNode context, TNode regexp, JSRegExp::Flag flag, bool is_fastpath); - Node* RegExpInitialize(Node* const context, Node* const regexp, - Node* const maybe_pattern, Node* const maybe_flags); - - TNode RegExpExec(TNode context, Node* regexp, Node* string); + TNode RegExpInitialize(const TNode context, + const TNode regexp, + const TNode maybe_pattern, + const TNode maybe_flags); TNode AdvanceStringIndex(SloppyTNode string, SloppyTNode index, @@ -179,20 +180,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { return CAST(AdvanceStringIndex(string, index, is_unicode, true)); } + TNode AdvanceStringIndexSlow(TNode string, TNode index, + TNode is_unicode) { + return CAST(AdvanceStringIndex(string, index, is_unicode, false)); + } + TNode RegExpPrototypeMatchBody(TNode context, TNode regexp, TNode const string, const bool is_fastpath); - void RegExpPrototypeSearchBodyFast(TNode context, - TNode regexp, - TNode string); - void RegExpPrototypeSearchBodySlow(TNode context, Node* const regexp, - Node* const string); - - void RegExpPrototypeSplitBody(TNode context, TNode regexp, - TNode const string, - TNode const limit); + TNode RegExpPrototypeSplitBody(TNode context, + TNode regexp, + TNode const string, + TNode const limit); }; class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler { @@ -200,13 +201,11 @@ class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler { explicit RegExpMatchAllAssembler(compiler::CodeAssemblerState* state) : RegExpBuiltinsAssembler(state) {} - TNode CreateRegExpStringIterator(TNode native_context, + TNode CreateRegExpStringIterator(TNode native_context, TNode regexp, TNode string, TNode global, TNode full_unicode); - void Generate(TNode context, TNode native_context, - TNode receiver, TNode maybe_string); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc index 8ae89187ecbc67..85cb4f10f77ce3 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -11,8 +11,6 @@ namespace v8 { namespace internal { using compiler::Node; -template -using TNode = compiler::TNode; class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { public: @@ -255,7 +253,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64); TNode value_integer = ToInteger_Inline(CAST(context), CAST(value)); - Node* value_word32 = TruncateTaggedToWord32(context, value_integer); + TNode value_word32 = TruncateTaggedToWord32(context, value_integer); #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); @@ -338,7 +336,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); #endif - Node* value_word32 = TruncateTaggedToWord32(context, value_integer); + TNode value_word32 = TruncateTaggedToWord32(context, value_integer); int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, @@ -444,8 +442,10 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); #endif - Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer); - Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer); + TNode old_value_word32 = + TruncateTaggedToWord32(context, old_value_integer); + TNode new_value_word32 = + TruncateTaggedToWord32(context, new_value_integer); int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, @@ -571,7 +571,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); #endif - Node* value_word32 = TruncateTaggedToWord32(context, value_integer); + TNode value_word32 = TruncateTaggedToWord32(context, value_integer); int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index fc2745ed0a4ae5..425ffc46d29bc9 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -8,8 +8,10 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/codegen/code-factory.h" +#include "src/execution/protectors.h" #include "src/heap/factory-inl.h" #include "src/heap/heap-inl.h" +#include "src/logging/counters.h" #include "src/objects/objects.h" #include "src/objects/property-cell.h" @@ -17,8 +19,6 @@ namespace v8 { namespace internal { using Node = compiler::Node; -template -using TNode = compiler::TNode; Node* StringBuiltinsAssembler::DirectStringData(Node* string, Node* string_instance_type) { @@ -120,14 +120,14 @@ Node* StringBuiltinsAssembler::CallSearchStringRaw(Node* const subject_ptr, return result; } -TNode StringBuiltinsAssembler::PointerToStringDataAtIndex( - Node* const string_data, Node* const index, String::Encoding encoding) { +TNode StringBuiltinsAssembler::PointerToStringDataAtIndex( + TNode string_data, TNode index, + String::Encoding encoding) { const ElementsKind kind = (encoding == String::ONE_BYTE_ENCODING) ? UINT8_ELEMENTS : UINT16_ELEMENTS; - TNode const offset_in_bytes = - ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS); - return Signed(IntPtrAdd(string_data, offset_in_bytes)); + TNode offset_in_bytes = ElementOffsetFromIndex(index, kind); + return RawPtrAdd(string_data, offset_in_bytes); } void StringBuiltinsAssembler::GenerateStringEqual(TNode left, @@ -289,6 +289,262 @@ void StringBuiltinsAssembler::StringEqual_Loop( } } +TNode StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint( + TNode codepoint) { + VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant()); + + Label if_isword16(this), if_isword32(this), return_result(this); + + Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16, + &if_isword32); + + BIND(&if_isword16); + { + var_result.Bind(StringFromSingleCharCode(codepoint)); + Goto(&return_result); + } + + BIND(&if_isword32); + { + TNode value = AllocateSeqTwoByteString(2); + StoreNoWriteBarrier( + MachineRepresentation::kWord32, value, + IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), + codepoint); + var_result.Bind(value); + Goto(&return_result); + } + + BIND(&return_result); + return CAST(var_result.value()); +} + +TNode StringBuiltinsAssembler::AllocateConsString(TNode length, + TNode left, + TNode right) { + // Added string can be a cons string. + Comment("Allocating ConsString"); + TNode left_instance_type = LoadInstanceType(left); + TNode right_instance_type = LoadInstanceType(right); + + // Determine the resulting ConsString map to use depending on whether + // any of {left} or {right} has two byte encoding. + STATIC_ASSERT(kOneByteStringTag != 0); + STATIC_ASSERT(kTwoByteStringTag == 0); + TNode combined_instance_type = + Word32And(left_instance_type, right_instance_type); + TNode result_map = CAST(Select( + IsSetWord32(combined_instance_type, kStringEncodingMask), + [=] { return ConsOneByteStringMapConstant(); }, + [=] { return ConsStringMapConstant(); })); + TNode result = AllocateInNewSpace(ConsString::kSize); + StoreMapNoWriteBarrier(result, result_map); + StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length, + MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset, + Int32Constant(String::kEmptyHashField), + MachineRepresentation::kWord32); + StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left); + StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right); + return CAST(result); +} + +TNode StringBuiltinsAssembler::StringAdd(Node* context, + TNode left, + TNode right) { + TVARIABLE(String, result); + Label check_right(this), runtime(this, Label::kDeferred), cons(this), + done(this, &result), done_native(this, &result); + Counters* counters = isolate()->counters(); + + TNode left_length = LoadStringLengthAsWord32(left); + GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right); + result = right; + Goto(&done_native); + + BIND(&check_right); + TNode right_length = LoadStringLengthAsWord32(right); + GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons); + result = left; + Goto(&done_native); + + BIND(&cons); + { + TNode new_length = Uint32Add(left_length, right_length); + + // If new length is greater than String::kMaxLength, goto runtime to + // throw. Note: we also need to invalidate the string length protector, so + // can't just throw here directly. + GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)), + &runtime); + + TVARIABLE(String, var_left, left); + TVARIABLE(String, var_right, right); + Variable* input_vars[2] = {&var_left, &var_right}; + Label non_cons(this, 2, input_vars); + Label slow(this, Label::kDeferred); + GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)), + &non_cons); + + result = + AllocateConsString(new_length, var_left.value(), var_right.value()); + Goto(&done_native); + + BIND(&non_cons); + + Comment("Full string concatenate"); + TNode left_instance_type = LoadInstanceType(var_left.value()); + TNode right_instance_type = LoadInstanceType(var_right.value()); + // Compute intersection and difference of instance types. + + TNode ored_instance_types = + Word32Or(left_instance_type, right_instance_type); + TNode xored_instance_types = + Word32Xor(left_instance_type, right_instance_type); + + // Check if both strings have the same encoding and both are sequential. + GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime); + GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow); + + TNode word_left_length = Signed(ChangeUint32ToWord(left_length)); + TNode word_right_length = Signed(ChangeUint32ToWord(right_length)); + + Label two_byte(this); + GotoIf(Word32Equal(Word32And(ored_instance_types, + Int32Constant(kStringEncodingMask)), + Int32Constant(kTwoByteStringTag)), + &two_byte); + // One-byte sequential string case + result = AllocateSeqOneByteString(new_length); + CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), + IntPtrConstant(0), word_left_length, + String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); + CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), + word_left_length, word_right_length, + String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); + Goto(&done_native); + + BIND(&two_byte); + { + // Two-byte sequential string case + result = AllocateSeqTwoByteString(new_length); + CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), + IntPtrConstant(0), word_left_length, + String::TWO_BYTE_ENCODING, + String::TWO_BYTE_ENCODING); + CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), + word_left_length, word_right_length, + String::TWO_BYTE_ENCODING, + String::TWO_BYTE_ENCODING); + Goto(&done_native); + } + + BIND(&slow); + { + // Try to unwrap indirect strings, restart the above attempt on success. + MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right, + right_instance_type, &non_cons); + Goto(&runtime); + } + } + BIND(&runtime); + { + result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right)); + Goto(&done); + } + + BIND(&done_native); + { + IncrementCounter(counters->string_add_native(), 1); + Goto(&done); + } + + BIND(&done); + return result.value(); +} + +void StringBuiltinsAssembler::BranchIfCanDerefIndirectString( + TNode string, TNode instance_type, Label* can_deref, + Label* cannot_deref) { + TNode representation = + Word32And(instance_type, Int32Constant(kStringRepresentationMask)); + GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref); + GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)), + cannot_deref); + // Cons string. + TNode rhs = + LoadObjectField(string, ConsString::kSecondOffset); + GotoIf(IsEmptyString(rhs), can_deref); + Goto(cannot_deref); +} + +void StringBuiltinsAssembler::DerefIndirectString(TVariable* var_string, + TNode instance_type) { +#ifdef DEBUG + Label can_deref(this), cannot_deref(this); + BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref, + &cannot_deref); + BIND(&cannot_deref); + DebugBreak(); // Should be able to dereference string. + Goto(&can_deref); + BIND(&can_deref); +#endif // DEBUG + + STATIC_ASSERT(static_cast(ThinString::kActualOffset) == + static_cast(ConsString::kFirstOffset)); + *var_string = + LoadObjectField(var_string->value(), ThinString::kActualOffset); +} + +void StringBuiltinsAssembler::MaybeDerefIndirectString( + TVariable* var_string, TNode instance_type, + Label* did_deref, Label* cannot_deref) { + Label deref(this); + BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref, + cannot_deref); + + BIND(&deref); + { + DerefIndirectString(var_string, instance_type); + Goto(did_deref); + } +} + +void StringBuiltinsAssembler::MaybeDerefIndirectStrings( + TVariable* var_left, TNode left_instance_type, + TVariable* var_right, TNode right_instance_type, + Label* did_something) { + Label did_nothing_left(this), did_something_left(this), + didnt_do_anything(this); + MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left, + &did_nothing_left); + + BIND(&did_something_left); + { + MaybeDerefIndirectString(var_right, right_instance_type, did_something, + did_something); + } + + BIND(&did_nothing_left); + { + MaybeDerefIndirectString(var_right, right_instance_type, did_something, + &didnt_do_anything); + } + + BIND(&didnt_do_anything); + // Fall through if neither string was an indirect string. +} + +TNode StringBuiltinsAssembler::DerefIndirectString( + TNode string, TNode instance_type, Label* cannot_deref) { + Label deref(this); + BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref); + BIND(&deref); + STATIC_ASSERT(static_cast(ThinString::kActualOffset) == + static_cast(ConsString::kFirstOffset)); + return LoadObjectField(string, ThinString::kActualOffset); +} + TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) { TNode left = CAST(Parameter(Descriptor::kLeft)); TNode right = CAST(Parameter(Descriptor::kRight)); @@ -504,19 +760,6 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) { Operation::kGreaterThanOrEqual); } -TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) { - TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - TNode position = - UncheckedCast(Parameter(Descriptor::kPosition)); - - // Load the character code at the {position} from the {receiver}. - TNode code = StringCharCodeAt(receiver, position); - - // And return the single character string with only that {code} - TNode result = StringFromSingleCharCode(code); - Return(result); -} - TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* position = Parameter(Descriptor::kPosition); @@ -551,14 +794,14 @@ TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) { // ES6 section 21.1 String Objects // ES6 #sec-string.fromcharcode -TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { +TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) { // TODO(ishell): use constants from Descriptor once the JSFunction linkage // arguments are reordered. TNode argc = UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); Node* context = Parameter(Descriptor::kContext); - CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); + CodeStubArguments arguments(this, argc); // Check if we have exactly one argument (plus the implicit receiver), i.e. // if the parent frame is not an arguments adaptor frame. Label if_oneargument(this), if_notoneargument(this); @@ -571,7 +814,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // for one-byte code units, or fall back to creating a single character // string on the fly otherwise. TNode code = arguments.AtIndex(0); - Node* code32 = TruncateTaggedToWord32(context, code); + TNode code32 = TruncateTaggedToWord32(context, code); TNode code16 = Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit))); TNode result = StringFromSingleCharCode(code16); @@ -585,16 +828,14 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // Assume that the resulting string contains only one-byte characters. TNode one_byte_result = AllocateSeqOneByteString(Unsigned(argc)); - TVARIABLE(IntPtrT, var_max_index); - var_max_index = IntPtrConstant(0); + TVARIABLE(IntPtrT, var_max_index, IntPtrConstant(0)); // Iterate over the incoming arguments, converting them to 8-bit character // codes. Stop if any of the conversions generates a code that doesn't fit // in 8 bits. CodeStubAssembler::VariableList vars({&var_max_index}, zone()); - arguments.ForEach(vars, [this, context, &two_byte, &var_max_index, &code16, - one_byte_result](Node* arg) { - Node* code32 = TruncateTaggedToWord32(context, arg); + arguments.ForEach(vars, [&](TNode arg) { + TNode code32 = TruncateTaggedToWord32(context, arg); code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)); GotoIf( @@ -604,7 +845,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // The {code16} fits into the SeqOneByteString {one_byte_result}. TNode offset = ElementOffsetFromIndex( var_max_index.value(), UINT8_ELEMENTS, - CodeStubAssembler::INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize - kHeapObjectTag); StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result, offset, code16); @@ -629,7 +869,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // Write the character that caused the 8-bit to 16-bit fault. TNode max_index_offset = ElementOffsetFromIndex(var_max_index.value(), UINT16_ELEMENTS, - CodeStubAssembler::INTPTR_PARAMETERS, SeqTwoByteString::kHeaderSize - kHeapObjectTag); StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result, max_index_offset, code16); @@ -640,14 +879,13 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // using a 16-bit representation. arguments.ForEach( vars, - [this, context, two_byte_result, &var_max_index](Node* arg) { - Node* code32 = TruncateTaggedToWord32(context, arg); + [&](TNode arg) { + TNode code32 = TruncateTaggedToWord32(context, arg); TNode code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)); TNode offset = ElementOffsetFromIndex( var_max_index.value(), UINT16_ELEMENTS, - CodeStubAssembler::INTPTR_PARAMETERS, SeqTwoByteString::kHeaderSize - kHeapObjectTag); StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result, offset, code16); @@ -723,9 +961,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&one_one); { - TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::ONE_BYTE_ENCODING); - TNode const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::ONE_BYTE_ENCODING); Label direct_memchr_call(this), generic_fast_path(this); @@ -736,8 +974,8 @@ void StringBuiltinsAssembler::StringIndexOf( // search strings. BIND(&direct_memchr_call); { - TNode const string_addr = - IntPtrAdd(adjusted_subject_ptr, start_position); + TNode const string_addr = + RawPtrAdd(adjusted_subject_ptr, start_position); TNode const search_length = IntPtrSub(subject_length, start_position); TNode const search_byte = @@ -745,14 +983,14 @@ void StringBuiltinsAssembler::StringIndexOf( TNode const memchr = ExternalConstant(ExternalReference::libc_memchr_function()); - TNode const result_address = UncheckedCast( + TNode const result_address = UncheckedCast( CallCFunction(memchr, MachineType::Pointer(), std::make_pair(MachineType::Pointer(), string_addr), std::make_pair(MachineType::IntPtr(), search_byte), std::make_pair(MachineType::UintPtr(), search_length))); GotoIf(WordEqual(result_address, int_zero), &return_minus_1); TNode const result_index = - IntPtrAdd(IntPtrSub(result_address, string_addr), start_position); + IntPtrAdd(RawPtrSub(result_address, string_addr), start_position); f_return(SmiTag(result_index)); } @@ -767,9 +1005,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&one_two); { - TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::ONE_BYTE_ENCODING); - TNode const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::TWO_BYTE_ENCODING); Node* const result = CallSearchStringRaw( @@ -780,9 +1018,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&two_one); { - TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::TWO_BYTE_ENCODING); - TNode const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::ONE_BYTE_ENCODING); Node* const result = CallSearchStringRaw( @@ -793,9 +1031,9 @@ void StringBuiltinsAssembler::StringIndexOf( BIND(&two_two); { - TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( + TNode const adjusted_subject_ptr = PointerToStringDataAtIndex( subject_ptr, subject_offset, String::TWO_BYTE_ENCODING); - TNode const adjusted_search_ptr = PointerToStringDataAtIndex( + TNode const adjusted_search_ptr = PointerToStringDataAtIndex( search_ptr, search_offset, String::TWO_BYTE_ENCODING); Node* const result = CallSearchStringRaw( @@ -1300,8 +1538,8 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) { // maybe_regexp is a fast regexp and receiver is a string. TNode s = CAST(receiver); - RegExpMatchAllAssembler regexp_asm(state()); - regexp_asm.Generate(context, native_context, maybe_regexp, s); + Return( + RegExpPrototypeMatchAllImpl(context, native_context, maybe_regexp, s)); }; auto if_generic_call = [=](Node* fn) { Callable call_callable = CodeFactory::Call(isolate()); @@ -1368,9 +1606,9 @@ TNode StringBuiltinsAssembler::StringToArray( TNode string_data_offset = to_direct.offset(); TNode cache = SingleCharacterStringCacheConstant(); - BuildFastLoop( + BuildFastLoop( IntPtrConstant(0), length, - [&](Node* index) { + [&](TNode index) { // TODO(jkummerow): Implement a CSA version of DisallowHeapAllocation // and use that to guard ToDirectStringAssembler.PointerToData(). CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime), @@ -1387,7 +1625,7 @@ TNode StringBuiltinsAssembler::StringToArray( StoreFixedArrayElement(elements, index, entry); }, - 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context); result_array = AllocateJSArray(array_map, elements, length_smi); @@ -1614,7 +1852,7 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) { } } -TF_BUILTIN(StringSubstring, CodeStubAssembler) { +TF_BUILTIN(StringSubstring, StringBuiltinsAssembler) { TNode string = CAST(Parameter(Descriptor::kString)); TNode from = UncheckedCast(Parameter(Descriptor::kFrom)); TNode to = UncheckedCast(Parameter(Descriptor::kTo)); @@ -1870,9 +2108,248 @@ void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration( DCHECK(isolate()->heap()->string_iterator_protector().IsPropertyCell()); Branch( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), if_true, if_false); } +void StringBuiltinsAssembler::CopyStringCharacters( + Node* from_string, Node* to_string, TNode from_index, + TNode to_index, TNode character_count, + String::Encoding from_encoding, String::Encoding to_encoding) { + // Cannot assert IsString(from_string) and IsString(to_string) here because + // SubString can pass in faked sequential strings when handling external + // subject strings. + bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING; + bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING; + DCHECK_IMPLIES(to_one_byte, from_one_byte); + Comment("CopyStringCharacters ", + from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ", + to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING"); + + ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; + ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag; + TNode from_offset = + ElementOffsetFromIndex(from_index, from_kind, header_size); + TNode to_offset = + ElementOffsetFromIndex(to_index, to_kind, header_size); + TNode byte_count = + ElementOffsetFromIndex(character_count, from_kind); + TNode limit_offset = IntPtrAdd(from_offset, byte_count); + + // Prepare the fast loop + MachineType type = + from_one_byte ? MachineType::Uint8() : MachineType::Uint16(); + MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8 + : MachineRepresentation::kWord16; + int from_increment = 1 << ElementsKindToShiftSize(from_kind); + int to_increment = 1 << ElementsKindToShiftSize(to_kind); + + TVARIABLE(IntPtrT, current_to_offset, to_offset); + VariableList vars({¤t_to_offset}, zone()); + int to_index_constant = 0, from_index_constant = 0; + bool index_same = (from_encoding == to_encoding) && + (from_index == to_index || + (ToInt32Constant(from_index, &from_index_constant) && + ToInt32Constant(to_index, &to_index_constant) && + from_index_constant == to_index_constant)); + BuildFastLoop( + vars, from_offset, limit_offset, + [&](TNode offset) { + Node* value = Load(type, from_string, offset); + StoreNoWriteBarrier(rep, to_string, + index_same ? offset : current_to_offset.value(), + value); + if (!index_same) { + Increment(¤t_to_offset, to_increment); + } + }, + from_increment, IndexAdvanceMode::kPost); +} + +// A wrapper around CopyStringCharacters which determines the correct string +// encoding, allocates a corresponding sequential string, and then copies the +// given character range using CopyStringCharacters. +// |from_string| must be a sequential string. +// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length. +TNode StringBuiltinsAssembler::AllocAndCopyStringCharacters( + Node* from, Node* from_instance_type, TNode from_index, + TNode character_count) { + Label end(this), one_byte_sequential(this), two_byte_sequential(this); + TVARIABLE(String, var_result); + + Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential, + &two_byte_sequential); + + // The subject string is a sequential one-byte string. + BIND(&one_byte_sequential); + { + TNode result = AllocateSeqOneByteString( + Unsigned(TruncateIntPtrToInt32(character_count))); + CopyStringCharacters(from, result, from_index, IntPtrConstant(0), + character_count, String::ONE_BYTE_ENCODING, + String::ONE_BYTE_ENCODING); + var_result = result; + Goto(&end); + } + + // The subject string is a sequential two-byte string. + BIND(&two_byte_sequential); + { + TNode result = AllocateSeqTwoByteString( + Unsigned(TruncateIntPtrToInt32(character_count))); + CopyStringCharacters(from, result, from_index, IntPtrConstant(0), + character_count, String::TWO_BYTE_ENCODING, + String::TWO_BYTE_ENCODING); + var_result = result; + Goto(&end); + } + + BIND(&end); + return var_result.value(); +} + +TNode StringBuiltinsAssembler::SubString(TNode string, + TNode from, + TNode to) { + TVARIABLE(String, var_result); + ToDirectStringAssembler to_direct(state(), string); + Label end(this), runtime(this); + + TNode const substr_length = IntPtrSub(to, from); + TNode const string_length = LoadStringLengthAsWord(string); + + // Begin dispatching based on substring length. + + Label original_string_or_invalid_length(this); + GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length), + &original_string_or_invalid_length); + + // A real substring (substr_length < string_length). + Label empty(this); + GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty); + + Label single_char(this); + GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char); + + // Deal with different string types: update the index if necessary + // and extract the underlying string. + + TNode direct_string = to_direct.TryToDirect(&runtime); + TNode offset = IntPtrAdd(from, to_direct.offset()); + TNode const instance_type = to_direct.instance_type(); + + // The subject string can only be external or sequential string of either + // encoding at this point. + Label external_string(this); + { + if (FLAG_string_slices) { + Label next(this); + + // Short slice. Copy instead of slicing. + GotoIf(IntPtrLessThan(substr_length, + IntPtrConstant(SlicedString::kMinLength)), + &next); + + // Allocate new sliced string. + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + Label one_byte_slice(this), two_byte_slice(this); + Branch(IsOneByteStringInstanceType(to_direct.instance_type()), + &one_byte_slice, &two_byte_slice); + + BIND(&one_byte_slice); + { + var_result = AllocateSlicedOneByteString( + Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, + SmiTag(offset)); + Goto(&end); + } + + BIND(&two_byte_slice); + { + var_result = AllocateSlicedTwoByteString( + Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, + SmiTag(offset)); + Goto(&end); + } + + BIND(&next); + } + + // The subject string can only be external or sequential string of either + // encoding at this point. + GotoIf(to_direct.is_external(), &external_string); + + var_result = AllocAndCopyStringCharacters(direct_string, instance_type, + offset, substr_length); + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + Goto(&end); + } + + // Handle external string. + BIND(&external_string); + { + TNode const fake_sequential_string = + to_direct.PointerToString(&runtime); + + var_result = AllocAndCopyStringCharacters( + fake_sequential_string, instance_type, offset, substr_length); + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + Goto(&end); + } + + BIND(&empty); + { + var_result = EmptyStringConstant(); + Goto(&end); + } + + // Substrings of length 1 are generated through CharCodeAt and FromCharCode. + BIND(&single_char); + { + TNode char_code = StringCharCodeAt(string, from); + var_result = StringFromSingleCharCode(char_code); + Goto(&end); + } + + BIND(&original_string_or_invalid_length); + { + CSA_ASSERT(this, IntPtrEqual(substr_length, string_length)); + + // Equal length - check if {from, to} == {0, str.length}. + GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime); + + // Return the original string (substr_length == string_length). + + Counters* counters = isolate()->counters(); + IncrementCounter(counters->sub_string_native(), 1); + + var_result = string; + Goto(&end); + } + + // Fall back to a runtime call. + BIND(&runtime); + { + var_result = + CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string, + SmiTag(from), SmiTag(to))); + Goto(&end); + } + + BIND(&end); + return var_result.value(); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h index 64d5a77615d8f6..0dfcf88a8c77da 100644 --- a/deps/v8/src/builtins/builtins-string-gen.h +++ b/deps/v8/src/builtins/builtins-string-gen.h @@ -33,6 +33,25 @@ class StringBuiltinsAssembler : public CodeStubAssembler { SloppyTNode index, UnicodeEncoding encoding); + TNode StringFromSingleUTF16EncodedCodePoint(TNode codepoint); + + // Return a new string object which holds a substring containing the range + // [from,to[ of string. + TNode SubString(TNode string, TNode from, + TNode to); + + // Copies |character_count| elements from |from_string| to |to_string| + // starting at the |from_index|'th character. |from_string| and |to_string| + // can either be one-byte strings or two-byte strings, although if + // |from_string| is two-byte, then |to_string| must be two-byte. + // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <= + // |from_index| <= |from_index| + |character_count| <= from_string.length and + // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length. + V8_EXPORT_PRIVATE void CopyStringCharacters( + Node* from_string, Node* to_string, TNode from_index, + TNode to_index, TNode character_count, + String::Encoding from_encoding, String::Encoding to_encoding); + protected: void StringEqual_Loop(Node* lhs, Node* lhs_instance_type, MachineType lhs_type, Node* rhs, @@ -51,8 +70,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler { Node* const search_ptr, Node* const search_length, Node* const start_position); - TNode PointerToStringDataAtIndex(Node* const string_data, - Node* const index, + TNode PointerToStringDataAtIndex(TNode string_data, + TNode index, String::Encoding encoding); // substr and slice have a common way of handling the {start} argument. @@ -82,6 +101,38 @@ class StringBuiltinsAssembler : public CodeStubAssembler { return SmiLessThan(value, SmiConstant(0)); } + TNode AllocateConsString(TNode length, TNode left, + TNode right); + + TNode StringAdd(Node* context, TNode left, + TNode right); + + // Check if |string| is an indirect (thin or flat cons) string type that can + // be dereferenced by DerefIndirectString. + void BranchIfCanDerefIndirectString(TNode string, + TNode instance_type, + Label* can_deref, Label* cannot_deref); + // Allocate an appropriate one- or two-byte ConsString with the first and + // second parts specified by |left| and |right|. + // Unpack an indirect (thin or flat cons) string type. + void DerefIndirectString(TVariable* var_string, + TNode instance_type); + // Check if |var_string| has an indirect (thin or flat cons) string type, and + // unpack it if so. + void MaybeDerefIndirectString(TVariable* var_string, + TNode instance_type, Label* did_deref, + Label* cannot_deref); + // Check if |var_left| or |var_right| has an indirect (thin or flat cons) + // string type, and unpack it/them if so. Fall through if nothing was done. + void MaybeDerefIndirectStrings(TVariable* var_left, + TNode left_instance_type, + TVariable* var_right, + TNode right_instance_type, + Label* did_something); + TNode DerefIndirectString(TNode string, + TNode instance_type, + Label* cannot_deref); + // Implements boilerplate logic for {match, split, replace, search} of the // form: // @@ -103,6 +154,12 @@ class StringBuiltinsAssembler : public CodeStubAssembler { Handle symbol, DescriptorIndexNameValue additional_property_to_check, const NodeFunction0& regexp_call, const NodeFunction1& generic_call); + + private: + TNode AllocAndCopyStringCharacters(Node* from, + Node* from_instance_type, + TNode from_index, + TNode character_count); }; class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler { diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc index 04a96c7e46d020..ba2346d661c7fa 100644 --- a/deps/v8/src/builtins/builtins-string.cc +++ b/deps/v8/src/builtins/builtins-string.cc @@ -136,20 +136,21 @@ BUILTIN(StringPrototypeLocaleCompare) { HandleScope handle_scope(isolate); isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringLocaleCompare); + const char* method = "String.prototype.localeCompare"; #ifdef V8_INTL_SUPPORT - TO_THIS_STRING(str1, "String.prototype.localeCompare"); + TO_THIS_STRING(str1, method); Handle str2; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, str2, Object::ToString(isolate, args.atOrUndefined(isolate, 1))); RETURN_RESULT_OR_FAILURE( - isolate, Intl::StringLocaleCompare(isolate, str1, str2, - args.atOrUndefined(isolate, 2), - args.atOrUndefined(isolate, 3))); + isolate, Intl::StringLocaleCompare( + isolate, str1, str2, args.atOrUndefined(isolate, 2), + args.atOrUndefined(isolate, 3), method)); #else DCHECK_EQ(2, args.length()); - TO_THIS_STRING(str1, "String.prototype.localeCompare"); + TO_THIS_STRING(str1, method); Handle str2; ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2, Object::ToString(isolate, args.at(1))); diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 448ff66603f94e..c69034e813b1c8 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -8,6 +8,7 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/builtins/growable-fixed-array-gen.h" +#include "src/execution/protectors.h" #include "src/handles/handles-inl.h" #include "src/heap/factory-inl.h" @@ -15,8 +16,6 @@ namespace v8 { namespace internal { using compiler::Node; -template -using TNode = compiler::TNode; // ----------------------------------------------------------------------------- // ES6 section 22.2 TypedArray Objects @@ -117,8 +116,8 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) { // ES6 #sec-get-%typedarray%.prototype.bytelength TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { const char* const kMethodName = "get TypedArray.prototype.byteLength"; - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); @@ -135,8 +134,8 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { // ES6 #sec-get-%typedarray%.prototype.byteoffset TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) { const char* const kMethodName = "get TypedArray.prototype.byteOffset"; - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); @@ -153,8 +152,8 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) { // ES6 #sec-get-%typedarray%.prototype.length TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) { const char* const kMethodName = "get TypedArray.prototype.length"; - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); @@ -318,8 +317,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource( // Grab pointers and byte lengths we need later on. - TNode target_data_ptr = LoadJSTypedArrayBackingStore(target); - TNode source_data_ptr = LoadJSTypedArrayBackingStore(source); + TNode target_data_ptr = LoadJSTypedArrayDataPtr(target); + TNode source_data_ptr = LoadJSTypedArrayDataPtr(source); TNode source_el_kind = LoadElementsKind(source); TNode target_el_kind = LoadElementsKind(target); @@ -538,13 +537,83 @@ TNode TypedArrayBuiltinsAssembler::IsSharedArrayBuffer( return IsSetWord32(bitfield); } +void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( + TNode holder, TNode base, TNode offset) { + offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag), + offset); + if (COMPRESS_POINTERS_BOOL) { + TNode full_base = Signed(BitcastTaggedToWord(base)); + TNode compressed_base = TruncateIntPtrToInt32(full_base); + // TODO(v8:9706): Add a way to directly use kRootRegister value. + TNode isolate_root = + IntPtrSub(full_base, ChangeInt32ToIntPtr(compressed_base)); + // Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset. + DCHECK_EQ( + isolate()->isolate_root(), + JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate())); + // See JSTypedArray::SetOnHeapDataPtr() for details. + offset = Unsigned(IntPtrAdd(offset, isolate_root)); + } + + StoreObjectField(holder, JSTypedArray::kBasePointerOffset, base); + StoreObjectFieldNoWriteBarrier( + holder, JSTypedArray::kExternalPointerOffset, offset); +} + +void TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr( + TNode holder, TNode base, TNode offset) { + StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kBasePointerOffset, + SmiConstant(0)); + + base = RawPtrAdd(base, Signed(offset)); + StoreObjectFieldNoWriteBarrier( + holder, JSTypedArray::kExternalPointerOffset, base); +} + +void TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged( + TNode context, TNode typed_array, + TNode index_node, TNode value, ElementsKind elements_kind) { + TNode data_ptr = LoadJSTypedArrayDataPtr(typed_array); + switch (elements_kind) { + case UINT8_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + case INT8_ELEMENTS: + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, SmiToInt32(CAST(value)), + SMI_PARAMETERS); + break; + case UINT32_ELEMENTS: + case INT32_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + TruncateTaggedToWord32(context, value), SMI_PARAMETERS); + break; + case FLOAT32_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))), + SMI_PARAMETERS); + break; + case FLOAT64_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS); + break; + case BIGUINT64_ELEMENTS: + case BIGINT64_ELEMENTS: + StoreElement(data_ptr, elements_kind, index_node, + UncheckedCast(value), SMI_PARAMETERS); + break; + default: + UNREACHABLE(); + } +} + // ES #sec-get-%typedarray%.prototype.set TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) { const char* method_name = "%TypedArray%.prototype.set"; + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); TNode context = CAST(Parameter(Descriptor::kContext)); - CodeStubArguments args( - this, - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount))); + CodeStubArguments args(this, argc); Label if_source_is_typed_array(this), if_source_is_fast_jsarray(this), if_offset_is_out_of_bounds(this, Label::kDeferred), @@ -618,7 +687,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) { // ES #sec-get-%typedarray%.prototype-@@tostringtag TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Label if_receiverisheapobject(this), return_undefined(this); Branch(TaggedIsSmi(receiver), &return_undefined, &if_receiverisheapobject); @@ -645,12 +714,12 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) { #undef TYPED_ARRAY_CASE }; - // We offset the dispatch by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, so - // that this can be turned into a non-sparse table switch for ideal - // performance. + // We offset the dispatch by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND, so that + // this can be turned into a non-sparse table switch for ideal performance. BIND(&if_receiverisheapobject); + TNode receiver_heap_object = CAST(receiver); TNode elements_kind = - Int32Sub(LoadElementsKind(receiver), + Int32Sub(LoadElementsKind(receiver_heap_object), Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); Switch(elements_kind, &return_undefined, elements_kinds, elements_kind_labels, kTypedElementsKindCount); @@ -710,8 +779,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { TNode length = ChangeInt32ToIntPtr( UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount))); // 2. Let items be the List of arguments passed to this function. - CodeStubArguments args(this, length, nullptr, INTPTR_PARAMETERS, - CodeStubArguments::ReceiverMode::kHasReceiver); + CodeStubArguments args(this, length); Label if_not_constructor(this, Label::kDeferred), if_detached(this, Label::kDeferred); @@ -737,10 +805,10 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { DispatchTypedArrayByElementsKind( elements_kind, [&](ElementsKind kind, int size, int typed_array_fun_index) { - BuildFastLoop( + BuildFastLoop( IntPtrConstant(0), length, - [&](Node* index) { - TNode item = args.AtIndex(index, INTPTR_PARAMETERS); + [&](TNode index) { + TNode item = args.AtIndex(index); Node* value = PrepareValueForWriteToTypedArray(item, kind, context); @@ -752,12 +820,11 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { // GC may move backing store in ToNumber, thus load backing // store everytime in this loop. - TNode backing_store = - LoadJSTypedArrayBackingStore(new_typed_array); - StoreElement(backing_store, kind, index, value, - INTPTR_PARAMETERS); + TNode data_ptr = + LoadJSTypedArrayDataPtr(new_typed_array); + StoreElement(data_ptr, kind, index, value, INTPTR_PARAMETERS); }, - 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); }); // 8. Return newObj. @@ -773,6 +840,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { // ES6 #sec-%typedarray%.from TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { + TNode argc = + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); TNode context = CAST(Parameter(Descriptor::kContext)); Label check_iterator(this), from_array_like(this), fast_path(this), @@ -782,9 +851,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { if_iterator_fn_not_callable(this, Label::kDeferred), if_detached(this, Label::kDeferred); - CodeStubArguments args( - this, - ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount))); + CodeStubArguments args(this, argc); TNode source = args.GetOptionalArgumentValue(0); // 5. If thisArg is present, let T be thisArg; else let T be undefined. @@ -866,7 +933,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { TNode protector_cell = ArrayIteratorProtectorConstant(); GotoIfNot( TaggedEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset), - SmiConstant(Isolate::kProtectorValid)), + SmiConstant(Protectors::kProtectorValid)), &check_iterator); // Source is a TypedArray with unmodified iterator behavior. Use the @@ -950,15 +1017,15 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { TNode elements_kind = LoadElementsKind(target_obj.value()); // 7e/13 : Copy the elements - BuildFastLoop( + BuildFastLoop( SmiConstant(0), final_length.value(), - [&](Node* index) { + [&](TNode index) { TNode const k_value = GetProperty(context, final_source.value(), index); TNode const mapped_value = - CAST(CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg, - k_value, index)); + CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg, + k_value, index); DispatchTypedArrayByElementsKind( elements_kind, @@ -974,13 +1041,12 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { // GC may move backing store in map_fn, thus load backing // store in each iteration of this loop. - TNode backing_store = - LoadJSTypedArrayBackingStore(target_obj.value()); - StoreElement(backing_store, kind, index, final_value, - SMI_PARAMETERS); + TNode data_ptr = + LoadJSTypedArrayDataPtr(target_obj.value()); + StoreElement(data_ptr, kind, index, final_value, SMI_PARAMETERS); }); }, - 1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost); + 1, IndexAdvanceMode::kPost); args.PopAndReturn(target_obj.value()); diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h index d637bc9c6b6c9b..10a2cb608c6139 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.h +++ b/deps/v8/src/builtins/builtins-typed-array-gen.h @@ -111,6 +111,18 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { TNode elements_kind, const TypedArraySwitchCase& case_function); TNode IsSharedArrayBuffer(TNode buffer); + + void SetJSTypedArrayOnHeapDataPtr(TNode holder, + TNode base, + TNode offset); + void SetJSTypedArrayOffHeapDataPtr(TNode holder, + TNode base, + TNode offset); + void StoreJSTypedArrayElementFromTagged(TNode context, + TNode typed_array, + TNode index_node, + TNode value, + ElementsKind elements_kind); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h index c9d15f09dd260b..c8c9a2522c97d6 100644 --- a/deps/v8/src/builtins/builtins-utils-inl.h +++ b/deps/v8/src/builtins/builtins-utils-inl.h @@ -12,20 +12,21 @@ namespace v8 { namespace internal { -Handle BuiltinArguments::atOrUndefined(Isolate* isolate, int index) { +Handle BuiltinArguments::atOrUndefined(Isolate* isolate, + int index) const { if (index >= length()) { return isolate->factory()->undefined_value(); } return at(index); } -Handle BuiltinArguments::receiver() { return at(0); } +Handle BuiltinArguments::receiver() const { return at(0); } -Handle BuiltinArguments::target() { +Handle BuiltinArguments::target() const { return Arguments::at(Arguments::length() - 1 - kTargetOffset); } -Handle BuiltinArguments::new_target() { +Handle BuiltinArguments::new_target() const { return Arguments::at(Arguments::length() - 1 - kNewTargetOffset); } diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h index 822f9df6ecd9be..601dfd58131d7b 100644 --- a/deps/v8/src/builtins/builtins-utils.h +++ b/deps/v8/src/builtins/builtins-utils.h @@ -23,13 +23,13 @@ class BuiltinArguments : public Arguments { DCHECK_LE(1, this->length()); } - Object operator[](int index) { + Object operator[](int index) const { DCHECK_LT(index, length()); return Arguments::operator[](index); } template - Handle at(int index) { + Handle at(int index) const { DCHECK_LT(index, length()); return Arguments::at(index); } @@ -42,10 +42,10 @@ class BuiltinArguments : public Arguments { static constexpr int kNumExtraArgs = 4; static constexpr int kNumExtraArgsWithReceiver = 5; - inline Handle atOrUndefined(Isolate* isolate, int index); - inline Handle receiver(); - inline Handle target(); - inline Handle new_target(); + inline Handle atOrUndefined(Isolate* isolate, int index) const; + inline Handle receiver() const; + inline Handle target() const; + inline Handle new_target() const; // Gets the total number of arguments including the receiver (but // excluding extra arguments). @@ -77,7 +77,7 @@ class BuiltinArguments : public Arguments { RuntimeCallCounterId::kBuiltin_##name); \ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \ "V8.Builtin_" #name); \ - return Builtin_Impl_##name(args, isolate).ptr(); \ + return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \ } \ \ V8_WARN_UNUSED_RESULT Address Builtin_##name( \ @@ -87,7 +87,7 @@ class BuiltinArguments : public Arguments { return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \ } \ BuiltinArguments args(args_length, args_object); \ - return Builtin_Impl_##name(args, isolate).ptr(); \ + return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \ } \ \ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index e5829dd1b34977..e0750a732c68a3 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -88,14 +88,16 @@ const BuiltinMetadata builtin_metadata[] = {BUILTIN_LIST( } // namespace BailoutId Builtins::GetContinuationBailoutId(Name name) { - DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC); + DCHECK(Builtins::KindOf(name) == TFJ || Builtins::KindOf(name) == TFC || + Builtins::KindOf(name) == TFS); return BailoutId(BailoutId::kFirstBuiltinContinuationId + name); } Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) { int builtin_index = id.ToInt() - BailoutId::kFirstBuiltinContinuationId; DCHECK(Builtins::KindOf(builtin_index) == TFJ || - Builtins::KindOf(builtin_index) == TFC); + Builtins::KindOf(builtin_index) == TFC || + Builtins::KindOf(builtin_index) == TFS); return static_cast(builtin_index); } @@ -204,7 +206,7 @@ void Builtins::PrintBuiltinCode() { CStrVector(FLAG_print_builtin_code_filter))) { CodeTracer::Scope trace_scope(isolate_->GetCodeTracer()); OFStream os(trace_scope.file()); - code->Disassemble(builtin_name, os); + code->Disassemble(builtin_name, os, isolate_); os << "\n"; } } diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq index 7467381690e21d..79f2a0ae010f78 100644 --- a/deps/v8/src/builtins/frames.tq +++ b/deps/v8/src/builtins/frames.tq @@ -24,8 +24,8 @@ Cast(o: Object): FrameType labels CastError { if (TaggedIsNotSmi(o)) goto CastError; assert( - (Convert(BitcastTaggedToWord(o)) >>> kSmiTagSize) < - kFrameTypeCount); + Convert(BitcastTaggedToWordForTagAndSmiBits(o)) < + Convert(kFrameTypeCount << kSmiTagSize)); return %RawDownCast(o); } diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.h b/deps/v8/src/builtins/growable-fixed-array-gen.h index 42f2afb281d694..8f72429a97e3fc 100644 --- a/deps/v8/src/builtins/growable-fixed-array-gen.h +++ b/deps/v8/src/builtins/growable-fixed-array-gen.h @@ -10,8 +10,6 @@ namespace v8 { namespace internal { -template -using TNode = compiler::TNode; // Utility class implementing a growable fixed array through CSA. class GrowableFixedArray : public CodeStubAssembler { diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index feabac3b66abbe..0885b6e633741d 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -5,7 +5,7 @@ #if V8_TARGET_ARCH_IA32 #include "src/api/api-arguments.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/code-factory.h" #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" @@ -785,103 +785,75 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register scratch) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry) { // ----------- S t a t e ------------- // -- edx : new target (preserved for callee if needed, and caller) // -- edi : target function (preserved for callee if needed, and caller) - // -- ecx : feedback vector (also used as scratch, value is not preserved) // ----------------------------------- - DCHECK(!AreAliased(edx, edi, scratch)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(edx, edi, optimized_code_entry)); Register closure = edi; - // Scratch contains feedback_vector. - Register feedback_vector = scratch; - // Load the optimized code from the feedback vector and re-use the register. - Register optimized_code_entry = scratch; - __ mov(optimized_code_entry, - FieldOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); + __ push(edx); + + // Check if the optimized code is marked for deopt. If it is, bailout to a + // given label. + Label found_deoptimized_code; + __ mov(eax, + FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset), + Immediate(1 << Code::kMarkedForDeoptimizationBit)); + __ j(not_zero, &found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx, + eax); + static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch"); + __ LoadCodeObjectEntry(ecx, optimized_code_entry); + __ pop(edx); + __ jmp(ecx); - { - // Optimized code slot is an optimization marker. - - // Fall through if no optimization trigger. - __ cmp(optimized_code_entry, - Immediate(Smi::FromEnum(OptimizationMarker::kNone))); - __ j(equal, &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + __ pop(edx); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ cmp( - optimized_code_entry, - Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); - } - __ jmp(&fallthrough); - } - } +static void MaybeOptimizeCode(MacroAssembler* masm, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- edx : new target (preserved for callee if needed, and caller) + // -- edi : target function (preserved for callee if needed, and caller) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(edx, edi, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, &fallthrough); - - __ push(edx); - - // Check if the optimized code is marked for deopt. If it is, bailout to a - // given label. - Label found_deoptimized_code; - __ mov(eax, - FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); - __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); - __ j(not_zero, &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - edx, eax); - static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch"); - __ LoadCodeObjectEntry(ecx, optimized_code_entry); - __ pop(edx); - __ jmp(ecx); - - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - __ pop(edx); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ cmp( + optimization_marker, + Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); + __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); + } } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -912,20 +884,21 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); __ cmp(bytecode, Immediate(0x3)); __ j(above, &process_bytecode, Label::kNear); + // The code to load the next bytecode is common to both wide and extra wide. + // We can hoist them up here. inc has to happen before test since it + // modifies the ZF flag. + __ inc(bytecode_offset); __ test(bytecode, Immediate(0x1)); + __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ j(not_equal, &extra_wide, Label::kNear); // Load the next bytecode and update table to the wide scaled table. - __ inc(bytecode_offset); - __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ add(bytecode_size_table, Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount)); __ jmp(&process_bytecode, Label::kNear); __ bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ inc(bytecode_offset); - __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); + // Update table to the extra wide scaled table. __ add(bytecode_size_table, Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); @@ -982,9 +955,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE); __ j(not_equal, &push_stack_frame); - // Read off the optimized code slot in the closure's feedback vector, and if - // there is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, ecx); + // Read off the optimized code slot in the feedback vector. + // Load the optimized code from the feedback vector and re-use the register. + Register optimized_code_entry = ecx; + __ mov(optimized_code_entry, + FieldOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ cmp(optimized_code_entry, + Immediate(Smi::FromEnum(OptimizationMarker::kNone))); + __ j(not_equal, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Load the feedback vector and increment the invocation count. __ mov(feedback_vector, @@ -1035,6 +1020,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag))); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. Register frame_size = ecx; @@ -1042,22 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ mov(eax, esp); __ sub(eax, frame_size); __ CompareRealStackLimit(eax); - __ j(above_equal, &ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ j(below, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ Move(eax, masm->isolate()->factory()->undefined_value()); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ jmp(&loop_check); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. - __ push(eax); + __ push(kInterpreterAccumulatorRegister); // Continue loop if not done. __ bind(&loop_check); __ sub(frame_size, Immediate(kSystemPointerSize)); @@ -1067,12 +1050,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If the bytecode array has a valid incoming new target or generator object // register, initialize it with incoming value which was passed in edx. Label no_incoming_new_target_or_generator_register; - __ mov(eax, FieldOperand( + __ mov(ecx, FieldOperand( kInterpreterBytecodeArrayRegister, BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); - __ test(eax, eax); + __ test(ecx, ecx); __ j(zero, &no_incoming_new_target_or_generator_register); - __ mov(Operand(ebp, eax, times_system_pointer_size, 0), edx); + __ mov(Operand(ebp, ecx, times_system_pointer_size, 0), edx); __ bind(&no_incoming_new_target_or_generator_register); // Load accumulator and bytecode offset into registers. @@ -1117,8 +1100,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, edx, ecx); __ ret(0); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code as opposed to an optimization marker. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ int3(); // Should not return. } @@ -2601,14 +2602,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(eax); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq index 41ec0c36e42e0e..ebedbdce75b0f6 100644 --- a/deps/v8/src/builtins/internal-coverage.tq +++ b/deps/v8/src/builtins/internal-coverage.tq @@ -28,8 +28,6 @@ namespace internal_coverage { return UnsafeCast(debugInfo.coverage_info); } - @export // Silence unused warning on release builds. SlotCount is only used - // in an assert. TODO(szuend): Remove once macros and asserts work. macro SlotCount(coverageInfo: CoverageInfo): Smi { assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below. assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask)); diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index 06e8ea539c0dc2..e662e4e75e5afe 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -37,22 +37,45 @@ namespace iterator { extern macro IteratorBuiltinsAssembler::IterableToList( implicit context: Context)(JSAny, JSAny): JSArray; + extern macro IteratorBuiltinsAssembler::StringListFromIterable( + implicit context: Context)(JSAny): JSArray; + extern builtin IterableToListMayPreserveHoles(implicit context: Context)(JSAny, JSAny); extern builtin IterableToListWithSymbolLookup(implicit context: Context)(JSAny); transitioning builtin GetIteratorWithFeedback( - context: Context, receiver: JSAny, feedbackSlot: Smi, + context: Context, receiver: JSAny, loadSlot: Smi, callSlot: Smi, feedback: Undefined | FeedbackVector): JSAny { + let iteratorMethod: JSAny; typeswitch (feedback) { case (Undefined): { - return GetProperty(receiver, IteratorSymbolConstant()); + iteratorMethod = GetProperty(receiver, IteratorSymbolConstant()); } case (feedback: FeedbackVector): { - return LoadIC( - context, receiver, IteratorSymbolConstant(), feedbackSlot, - feedback); + iteratorMethod = LoadIC( + context, receiver, IteratorSymbolConstant(), loadSlot, feedback); + } + } + return CallIteratorWithFeedback( + context, receiver, iteratorMethod, callSlot, feedback); + } + + transitioning builtin CallIteratorWithFeedback( + context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi, + feedback: Undefined | FeedbackVector): JSAny { + const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot)); + CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged); + const iteratorCallable: Callable = Cast(iteratorMethod) + otherwise ThrowCalledNonCallable(iteratorMethod); + const iterator: JSAny = Call(context, iteratorCallable, receiver); + typeswitch (iterator) { + case (JSReceiver): { + return iterator; + } + case (JSPrimitive): { + ThrowSymbolIteratorInvalid(); } } } diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index d3237a1c381c9d..ecfb224fb27d2b 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -1085,18 +1085,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, t0); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ Subu(t1, sp, Operand(t0)); LoadRealStackLimit(masm, a2); - __ Branch(&ok, hs, t1, Operand(a2)); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ Branch(&stack_overflow, lo, t1, Operand(a2)); // If ok, push undefined as the initial value for all register file entries. Label loop_header; @@ -1169,6 +1167,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); // Unreachable code. __ break_(0xCC); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1525,14 +1528,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(a0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2131,7 +2128,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- a1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(a1, &non_callable); __ bind(&non_smi); __ GetObjectType(a1, t1, t2); @@ -2146,12 +2143,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ Branch(&non_callable, eq, t1, Operand(zero_reg)); // Check if target is a proxy and call CallProxy external builtin - __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE)); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), + RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver with the (original) target. __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2); __ sw(a1, MemOperand(kScratchReg)); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 7cb66470a34e36..47dbc340020dab 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -1103,18 +1103,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, a4); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size (word) from the BytecodeArray object. __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ Dsubu(a5, sp, Operand(a4)); LoadRealStackLimit(masm, a2); - __ Branch(&ok, hs, a5, Operand(a2)); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ Branch(&stack_overflow, lo, a5, Operand(a2)); // If ok, push undefined as the initial value for all register file entries. Label loop_header; @@ -1188,6 +1186,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); // Unreachable code. __ break_(0xCC); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + // Unreachable code. + __ break_(0xCC); } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1542,14 +1545,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(a0); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2170,7 +2167,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- a1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(a1, &non_callable); __ bind(&non_smi); __ GetObjectType(a1, t1, t2); @@ -2184,12 +2181,11 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ And(t1, t1, Operand(Map::IsCallableBit::kMask)); __ Branch(&non_callable, eq, t1, Operand(zero_reg)); - __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE)); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), + RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver with the (original) target. __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2); __ Sd(a1, MemOperand(kScratchReg)); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 485b793395240a..ab0c7900d59490 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -863,9 +863,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); } -static void ReplaceClosureCodeWithOptimizedCode( - MacroAssembler* masm, Register optimized_code, Register closure, - Register scratch1, Register scratch2, Register scratch3) { +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { // Store code entry in the closure. __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset), r0); @@ -902,100 +904,73 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, Register scratch2, - Register scratch3) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- r6 : new target (preserved for callee if needed, and caller) // -- r4 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch)); Register closure = r4; - Register optimized_code_entry = scratch1; - - __ LoadP( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ CmpSmiLiteral(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kNone), r0); - __ beq(&fallthrough); - - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ CmpSmiLiteral( - optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ b(&fallthrough); - } - } + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadP(scratch, FieldMemOperand(optimized_code_entry, + Code::kCodeDataContainerOffset)); + __ LoadWordArith( + scratch, + FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); + __ bne(&found_deoptimized_code, cr0); + + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch, r8); + static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); + __ LoadCodeObjectEntry(r5, optimized_code_entry); + __ Jump(r5); - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); - - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadP(scratch2, FieldMemOperand(optimized_code_entry, - Code::kCodeDataContainerOffset)); - __ LoadWordArith( - scratch2, - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0); - __ bne(&found_deoptimized_code, cr0); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - scratch2, scratch3, feedback_vector); - static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); - __ LoadCodeObjectEntry(r5, optimized_code_entry); - __ Jump(r5); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- r6 : new target (preserved for callee if needed, and caller) + // -- r4 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, r4, r6, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ CmpSmiLiteral(optimization_marker, + Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), + r0); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1104,9 +1079,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE)); __ bne(&push_stack_frame); - // Read off the optimized code slot in the feedback vector, and if there - // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8); + Register optimized_code_entry = r7; + + // Read off the optimized code slot in the feedback vector. + __ LoadP(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ CmpSmiLiteral(optimized_code_entry, + Smi::FromEnum(OptimizationMarker::kNone), r0); + __ bne(&optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ LoadWord( @@ -1149,29 +1135,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, r3); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size (word) from the BytecodeArray object. __ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ sub(r8, sp, r5); LoadRealStackLimit(masm, r0); __ cmpl(r8, r0); - __ bge(&ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ blt(&stack_overflow); // If ok, push undefined as the initial value for all register file entries. // TODO(rmcilroy): Consider doing more than one push per loop iteration. Label loop, no_args; - __ LoadRoot(r8, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC); __ beq(&no_args, cr0); __ mtctr(r5); __ bind(&loop); - __ push(r8); + __ push(kInterpreterAccumulatorRegister); __ bdnz(&loop); __ bind(&no_args); } @@ -1189,8 +1173,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ StorePX(r6, MemOperand(fp, r8)); __ bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. + // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. Label do_dispatch; @@ -1231,8 +1215,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, r5); __ blr(); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bkpt(0); // Should not return. } @@ -1596,14 +1598,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset)); - { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(r3); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2260,7 +2256,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- r4 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(r4, &non_callable); __ bind(&non_smi); __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE); @@ -2277,12 +2273,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ cmpi(r8, Operand(JS_PROXY_TYPE)); - __ bne(&non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver the (original) target. __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2)); __ StorePX(r4, MemOperand(sp, r8)); diff --git a/deps/v8/src/builtins/regexp-exec.tq b/deps/v8/src/builtins/regexp-exec.tq new file mode 100644 index 00000000000000..b2ca9de10b57ee --- /dev/null +++ b/deps/v8/src/builtins/regexp-exec.tq @@ -0,0 +1,45 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace regexp { + + extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeExecBody( + implicit context: Context)(JSReceiver, String, constexpr bool): JSAny; + + transitioning macro RegExpPrototypeExecBodyFast(implicit context: Context)( + receiver: JSReceiver, string: String): JSAny { + return RegExpPrototypeExecBody(receiver, string, true); + } + + transitioning macro RegExpPrototypeExecBodySlow(implicit context: Context)( + receiver: JSReceiver, string: String): JSAny { + return RegExpPrototypeExecBody(receiver, string, false); + } + + // Slow path stub for RegExpPrototypeExec to decrease code size. + transitioning builtin + RegExpPrototypeExecSlow(implicit context: Context)( + regexp: JSRegExp, string: String): JSAny { + return RegExpPrototypeExecBodySlow(regexp, string); + } + + extern macro RegExpBuiltinsAssembler::IsFastRegExpNoPrototype( + implicit context: Context)(Object): bool; + + // ES#sec-regexp.prototype.exec + // RegExp.prototype.exec ( string ) + transitioning javascript builtin RegExpPrototypeExec( + js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny { + // Ensure {receiver} is a JSRegExp. + const receiver = Cast(receiver) otherwise ThrowTypeError( + kIncompatibleMethodReceiver, 'RegExp.prototype.exec', receiver); + const string = ToString_Inline(context, string); + + return IsFastRegExpNoPrototype(receiver) ? + RegExpPrototypeExecBodyFast(receiver, string) : + RegExpPrototypeExecSlow(receiver, string); + } +} diff --git a/deps/v8/src/builtins/regexp-match-all.tq b/deps/v8/src/builtins/regexp-match-all.tq new file mode 100644 index 00000000000000..1be6e69afce748 --- /dev/null +++ b/deps/v8/src/builtins/regexp-match-all.tq @@ -0,0 +1,258 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace regexp { + + extern transitioning macro RegExpBuiltinsAssembler::RegExpCreate( + implicit context: Context)(Context, Object, String): Object; + + extern transitioning macro + RegExpMatchAllAssembler::CreateRegExpStringIterator( + NativeContext, Object, String, bool, bool): JSAny; + + @export + transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)( + nativeContext: NativeContext, receiver: JSAny, string: JSAny): JSAny { + // 1. Let R be the this value. + // 2. If Type(R) is not Object, throw a TypeError exception. + ThrowIfNotJSReceiver( + receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@matchAll'); + const receiver = UnsafeCast(receiver); + + // 3. Let S be ? ToString(O). + const string: String = ToString_Inline(context, string); + + let matcher: Object; + let global: bool; + let unicode: bool; + + // 'FastJSRegExp' uses the strict fast path check because following code + // uses the flags property. + // TODO(jgruber): Handle slow flag accesses on the fast path and make this + // permissive. + typeswitch (receiver) { + case (fastRegExp: FastJSRegExp): { + const source = fastRegExp.source; + + // 4. Let C be ? SpeciesConstructor(R, %RegExp%). + // 5. Let flags be ? ToString(? Get(R, "flags")). + // 6. Let matcher be ? Construct(C, « R, flags »). + const flags: String = FastFlagsGetter(fastRegExp); + matcher = RegExpCreate(nativeContext, source, flags); + const matcherRegExp = UnsafeCast(matcher); + assert(IsFastRegExpPermissive(matcherRegExp)); + + // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). + // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). + const fastRegExp = UnsafeCast(receiver); + FastStoreLastIndex(matcherRegExp, fastRegExp.lastIndex); + + // 9. If flags contains "g", let global be true. + // 10. Else, let global be false. + global = FastFlagGetter(matcherRegExp, kGlobal); + + // 11. If flags contains "u", let fullUnicode be true. + // 12. Else, let fullUnicode be false. + unicode = FastFlagGetter(matcherRegExp, kUnicode); + } + case (Object): { + // 4. Let C be ? SpeciesConstructor(R, %RegExp%). + const regexpFun = + UnsafeCast(nativeContext[REGEXP_FUNCTION_INDEX]); + const speciesConstructor = + UnsafeCast(SpeciesConstructor(receiver, regexpFun)); + + // 5. Let flags be ? ToString(? Get(R, "flags")). + const flags = GetProperty(receiver, 'flags'); + const flagsString = ToString_Inline(context, flags); + + // 6. Let matcher be ? Construct(C, « R, flags »). + matcher = Construct(speciesConstructor, receiver, flagsString); + + // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). + const lastIndex: Number = + ToLength_Inline(context, SlowLoadLastIndex(receiver)); + + // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). + SlowStoreLastIndex(UnsafeCast(matcher), lastIndex); + + // 9. If flags contains "g", let global be true. + // 10. Else, let global be false. + const globalCharString: String = StringConstant('g'); + const globalIndex: Smi = + StringIndexOf(flagsString, globalCharString, 0); + global = globalIndex != -1; + + // 11. If flags contains "u", let fullUnicode be true. + // 12. Else, let fullUnicode be false. + const unicodeCharString = StringConstant('u'); + const unicodeIndex: Smi = + StringIndexOf(flagsString, unicodeCharString, 0); + unicode = unicodeIndex != -1; + } + } + + // 13. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode). + return CreateRegExpStringIterator( + nativeContext, matcher, string, global, unicode); + } + + // https://tc39.github.io/proposal-string-matchall/ + // RegExp.prototype [ @@matchAll ] ( string ) + transitioning javascript builtin RegExpPrototypeMatchAll( + js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny { + const nativeContext: NativeContext = LoadNativeContext(context); + return RegExpPrototypeMatchAllImpl(nativeContext, receiver, string); + } + + const kJSRegExpStringIteratorDone: + constexpr int31 generates '1 << JSRegExpStringIterator::kDoneBit'; + const kJSRegExpStringIteratorGlobal: constexpr int31 + generates '1 << JSRegExpStringIterator::kGlobalBit'; + const kJSRegExpStringIteratorUnicode: constexpr int31 + generates '1 << JSRegExpStringIterator::kUnicodeBit'; + + extern macro IsSetSmi(Smi, constexpr int31): bool; + + macro HasDoneFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorDone); + } + + macro HasGlobalFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorGlobal); + } + + macro HasUnicodeFlag(flags: Smi): bool { + return IsSetSmi(flags, kJSRegExpStringIteratorUnicode); + } + + macro SetDoneFlag(iterator: JSRegExpStringIterator, flags: Smi) { + const newFlags: Smi = flags | kJSRegExpStringIteratorDone; + iterator.flags = newFlags; + } + + extern macro RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( + implicit context: Context)(JSReceiver, RegExpMatchInfo, String): + JSRegExpResult; + + // https://tc39.github.io/proposal-string-matchall/ + // %RegExpStringIteratorPrototype%.next ( ) + transitioning javascript builtin RegExpStringIteratorPrototypeNext( + js-implicit context: Context, receiver: JSAny)(): JSAny { + // 1. Let O be the this value. + // 2. If Type(O) is not Object, throw a TypeError exception. + // 3. If O does not have all of the internal slots of a RegExp String + // Iterator Object Instance (see 5.3), throw a TypeError exception. + const methodName: constexpr string = + '%RegExpStringIterator%.prototype.next'; + const receiver = Cast(receiver) otherwise + ThrowTypeError(kIncompatibleMethodReceiver, methodName, receiver); + + try { + // 4. If O.[[Done]] is true, then + // a. Return ! CreateIterResultObject(undefined, true). + const flags: Smi = receiver.flags; + if (HasDoneFlag(flags)) goto ReturnEmptyDoneResult; + + // 5. Let R be O.[[iteratingRegExp]]. + const iteratingRegExp: JSReceiver = receiver.iterating_reg_exp; + + // 6. Let S be O.[[IteratedString]]. + const iteratingString: String = receiver.iterated_string; + + // 7. Let global be O.[[Global]]. + // 8. Let fullUnicode be O.[[Unicode]]. + // 9. Let match be ? RegExpExec(R, S). + let match: Object; + let isFastRegExp: bool = false; + try { + if (IsFastRegExpPermissive(iteratingRegExp)) { + const matchIndices: RegExpMatchInfo = + RegExpPrototypeExecBodyWithoutResultFast( + UnsafeCast(iteratingRegExp), iteratingString) + otherwise IfNoMatch; + match = ConstructNewResultFromMatchInfo( + iteratingRegExp, matchIndices, iteratingString); + isFastRegExp = true; + } else { + match = RegExpExec(iteratingRegExp, iteratingString); + if (match == Null) { + goto IfNoMatch; + } + } + // 11. Else, + // b. Else, handle non-global case first. + if (!HasGlobalFlag(flags)) { + // i. Set O.[[Done]] to true. + SetDoneFlag(receiver, flags); + + // ii. Return ! CreateIterResultObject(match, false). + return AllocateJSIteratorResult(UnsafeCast(match), False); + } + // a. If global is true, + assert(HasGlobalFlag(flags)); + if (isFastRegExp) { + // i. Let matchStr be ? ToString(? Get(match, "0")). + const match = UnsafeCast(match); + const resultFixedArray = UnsafeCast(match.elements); + const matchStr = UnsafeCast(resultFixedArray.objects[0]); + + // When iterating_regexp is fast, we assume it stays fast even after + // accessing the first match from the RegExp result. + assert(IsFastRegExpPermissive(iteratingRegExp)); + const iteratingRegExp = UnsafeCast(iteratingRegExp); + if (matchStr == kEmptyString) { + // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). + const thisIndex: Smi = FastLoadLastIndex(iteratingRegExp); + + // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, + // fullUnicode). + const nextIndex: Smi = AdvanceStringIndexFast( + iteratingString, thisIndex, HasUnicodeFlag(flags)); + + // 3. Perform ? Set(R, "lastIndex", nextIndex, true). + FastStoreLastIndex(iteratingRegExp, nextIndex); + } + + // iii. Return ! CreateIterResultObject(match, false). + return AllocateJSIteratorResult(match, False); + } + assert(!isFastRegExp); + // i. Let matchStr be ? ToString(? Get(match, "0")). + const match = UnsafeCast(match); + const matchStr = + ToString_Inline(context, GetProperty(match, SmiConstant(0))); + + if (matchStr == kEmptyString) { + // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). + const lastIndex: JSAny = SlowLoadLastIndex(iteratingRegExp); + const thisIndex: Number = ToLength_Inline(context, lastIndex); + + // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, + // fullUnicode). + const nextIndex: Number = AdvanceStringIndexSlow( + iteratingString, thisIndex, HasUnicodeFlag(flags)); + + // 3. Perform ? Set(R, "lastIndex", nextIndex, true). + SlowStoreLastIndex(iteratingRegExp, nextIndex); + } + // iii. Return ! CreateIterResultObject(match, false). + return AllocateJSIteratorResult(match, False); + } + // 10. If match is null, then + label IfNoMatch { + // a. Set O.[[Done]] to true. + SetDoneFlag(receiver, flags); + + // b. Return ! CreateIterResultObject(undefined, true). + goto ReturnEmptyDoneResult; + } + } + label ReturnEmptyDoneResult { + return AllocateJSIteratorResult(Undefined, True); + } + } +} diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq index f13724b476ce5d..1333ce97fb9f59 100644 --- a/deps/v8/src/builtins/regexp-replace.tq +++ b/deps/v8/src/builtins/regexp-replace.tq @@ -6,8 +6,6 @@ namespace regexp { - extern builtin - StringIndexOf(implicit context: Context)(String, String, Smi): Smi; extern builtin SubString(implicit context: Context)(String, Smi, Smi): String; @@ -21,9 +19,6 @@ namespace regexp { StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)( String, JSRegExp, Callable): String; - extern macro - RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi; - transitioning macro RegExpReplaceCallableNoExplicitCaptures(implicit context: Context)( matchesElements: FixedArray, matchesLength: intptr, string: String, diff --git a/deps/v8/src/builtins/regexp-search.tq b/deps/v8/src/builtins/regexp-search.tq new file mode 100644 index 00000000000000..3c4e57d734e09b --- /dev/null +++ b/deps/v8/src/builtins/regexp-search.tq @@ -0,0 +1,105 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace regexp { + + transitioning macro + RegExpPrototypeSearchBodyFast(implicit context: Context)( + regexp: JSRegExp, string: String): JSAny { + assert(IsFastRegExpPermissive(regexp)); + + // Grab the initial value of last index. + const previousLastIndex: Smi = FastLoadLastIndex(regexp); + + // Ensure last index is 0. + FastStoreLastIndex(regexp, 0); + + // Call exec. + try { + const matchIndices: RegExpMatchInfo = + RegExpPrototypeExecBodyWithoutResultFast(regexp, string) + otherwise DidNotMatch; + + // Successful match. + // Reset last index. + FastStoreLastIndex(regexp, previousLastIndex); + + // Return the index of the match. + return UnsafeCast( + matchIndices.objects[kRegExpMatchInfoFirstCaptureIndex]); + } + label DidNotMatch { + // Reset last index and return -1. + FastStoreLastIndex(regexp, previousLastIndex); + return SmiConstant(-1); + } + } + + extern macro RegExpBuiltinsAssembler::BranchIfFastRegExpResult( + implicit context: Context)(Object): never labels IsUnmodified, + IsModified; + + macro + IsFastRegExpResult(implicit context: Context)(execResult: HeapObject): bool { + BranchIfFastRegExpResult(execResult) otherwise return true, return false; + } + + transitioning macro RegExpPrototypeSearchBodySlow(implicit context: Context)( + regexp: JSReceiver, string: String): JSAny { + // Grab the initial value of last index. + const previousLastIndex = SlowLoadLastIndex(regexp); + const smiZero: Smi = 0; + + // Ensure last index is 0. + if (!SameValue(previousLastIndex, smiZero)) { + SlowStoreLastIndex(regexp, smiZero); + } + + // Call exec. + const execResult = RegExpExec(regexp, string); + + // Reset last index if necessary. + const currentLastIndex = SlowLoadLastIndex(regexp); + if (!SameValue(currentLastIndex, previousLastIndex)) { + SlowStoreLastIndex(regexp, previousLastIndex); + } + + // Return -1 if no match was found. + if (execResult == Null) { + return SmiConstant(-1); + } + + // Return the index of the match. + const fastExecResult = Cast(execResult) + otherwise return GetProperty(execResult, 'index'); + return fastExecResult.index; + } + + // Helper that skips a few initial checks. and assumes... + // 1) receiver is a "fast permissive" RegExp + // 2) pattern is a string + transitioning builtin RegExpSearchFast(implicit context: Context)( + receiver: JSRegExp, string: String): JSAny { + return RegExpPrototypeSearchBodyFast(receiver, string); + } + + // ES#sec-regexp.prototype-@@search + // RegExp.prototype [ @@search ] ( string ) + transitioning javascript builtin RegExpPrototypeSearch( + js-implicit context: Context, receiver: JSAny)(string: JSAny): JSAny { + ThrowIfNotJSReceiver( + receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@search'); + const receiver = UnsafeCast(receiver); + const string: String = ToString_Inline(context, string); + + if (IsFastRegExpPermissive(receiver)) { + // TODO(pwong): Could be optimized to remove the overhead of calling the + // builtin (at the cost of a larger builtin). + return RegExpSearchFast(UnsafeCast(receiver), string); + } + return RegExpPrototypeSearchBodySlow(receiver, string); + } +} diff --git a/deps/v8/src/builtins/regexp-source.tq b/deps/v8/src/builtins/regexp-source.tq index c1ce1c5e9a6935..266c9e7472f2fa 100644 --- a/deps/v8/src/builtins/regexp-source.tq +++ b/deps/v8/src/builtins/regexp-source.tq @@ -6,9 +6,6 @@ namespace regexp { - const kRegExpPrototypeSourceGetter: constexpr int31 - generates 'v8::Isolate::kRegExpPrototypeSourceGetter'; - // ES6 21.2.5.10. // ES #sec-get-regexp.prototype.source transitioning javascript builtin RegExpPrototypeSourceGetter( diff --git a/deps/v8/src/builtins/regexp-split.tq b/deps/v8/src/builtins/regexp-split.tq new file mode 100644 index 00000000000000..8a9a30a7e90f3d --- /dev/null +++ b/deps/v8/src/builtins/regexp-split.tq @@ -0,0 +1,72 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-regexp-gen.h' + +namespace runtime { + extern transitioning runtime + RegExpSplit(implicit context: Context)(JSReceiver, String, Object): JSAny; +} // namespace runtime + +namespace regexp { + + const kMaxValueSmi: constexpr int31 + generates 'Smi::kMaxValue'; + + extern transitioning macro RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( + implicit context: Context)(JSRegExp, String, Smi): JSArray; + + // Helper that skips a few initial checks. + transitioning builtin + RegExpSplit(implicit context: Context)( + regexp: FastJSRegExp, string: String, limit: JSAny): JSAny { + let sanitizedLimit: Smi; + + // We need to be extra-strict and require the given limit to be either + // undefined or a positive smi. We can't call ToUint32(maybe_limit) since + // that might move us onto the slow path, resulting in ordering spec + // violations (see https://crbug.com/801171). + + if (limit == Undefined) { + // TODO(jgruber): In this case, we can probably avoid generation of limit + // checks in Generate_RegExpPrototypeSplitBody. + sanitizedLimit = SmiConstant(kMaxValueSmi); + } else if (!TaggedIsPositiveSmi(limit)) { + return runtime::RegExpSplit(regexp, string, limit); + } else { + sanitizedLimit = UnsafeCast(limit); + } + + // Due to specific shortcuts we take on the fast path (specifically, we + // don't allocate a new regexp instance as specced), we need to ensure that + // the given regexp is non-sticky to avoid invalid results. See + // crbug.com/v8/6706. + + if (FastFlagGetter(regexp, kSticky)) { + return runtime::RegExpSplit(regexp, string, sanitizedLimit); + } + + // We're good to go on the fast path, which is inlined here. + return RegExpPrototypeSplitBody(regexp, string, sanitizedLimit); + } + + // ES#sec-regexp.prototype-@@split + // RegExp.prototype [ @@split ] ( string, limit ) + transitioning javascript builtin RegExpPrototypeSplit( + js-implicit context: Context, receiver: JSAny)(...arguments): JSAny { + ThrowIfNotJSReceiver( + receiver, kIncompatibleMethodReceiver, 'RegExp.prototype.@@split'); + const receiver = UnsafeCast(receiver); + const string: String = ToString_Inline(context, arguments[0]); + const limit = arguments[1]; + + // Strict: Reads the flags property. + // TODO(jgruber): Handle slow flag accesses on the fast path and make this + // permissive. + const fastRegExp = Cast(receiver) + otherwise return runtime::RegExpSplit(receiver, string, limit); + return RegExpSplit(fastRegExp, string, limit); + } + +} diff --git a/deps/v8/src/builtins/regexp-test.tq b/deps/v8/src/builtins/regexp-test.tq index 938dfa51f391f5..f2ebb7c2597273 100644 --- a/deps/v8/src/builtins/regexp-test.tq +++ b/deps/v8/src/builtins/regexp-test.tq @@ -20,7 +20,7 @@ namespace regexp { otherwise return False; return True; } - const matchIndices = RegExpExec(context, receiver, str); + const matchIndices = RegExpExec(receiver, str); return SelectBooleanConstant(matchIndices != Null); } diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq index 7352d2738fa4a4..e48e7c584deabd 100644 --- a/deps/v8/src/builtins/regexp.tq +++ b/deps/v8/src/builtins/regexp.tq @@ -22,8 +22,34 @@ namespace regexp { BranchIfFastRegExp_Permissive(o) otherwise return true, return false; } - extern macro RegExpBuiltinsAssembler::RegExpExec(Context, Object, Object): - Object; + const kInvalidRegExpExecResult: constexpr MessageTemplate + generates 'MessageTemplate::kInvalidRegExpExecResult'; + + // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S ) + @export + transitioning macro RegExpExec(implicit context: Context)( + receiver: JSReceiver, string: String): JSAny { + // Take the slow path of fetching the exec property, calling it, and + // verifying its return value. + + const exec = GetProperty(receiver, 'exec'); + + // Is {exec} callable? + typeswitch (exec) { + case (execCallable: Callable): { + const result = Call(context, execCallable, receiver, string); + if (result != Null) { + ThrowIfNotJSReceiver(result, kInvalidRegExpExecResult, ''); + } + return result; + } + case (Object): { + const regexp = Cast(receiver) otherwise ThrowTypeError( + kIncompatibleMethodReceiver, 'RegExp.prototype.exec', receiver); + return RegExpPrototypeExecSlow(regexp, string); + } + } + } extern macro RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast( @@ -161,4 +187,59 @@ namespace regexp { otherwise return SlowFlagsGetter(receiver); return FastFlagsGetter(fastRegexp); } + + extern transitioning macro RegExpBuiltinsAssembler::SlowLoadLastIndex( + implicit context: Context)(JSAny): JSAny; + extern transitioning macro RegExpBuiltinsAssembler::SlowStoreLastIndex( + implicit context: Context)(JSAny, JSAny): void; + + extern macro RegExpBuiltinsAssembler::FastLoadLastIndex(JSRegExp): Smi; + extern macro RegExpBuiltinsAssembler::FastStoreLastIndex(JSRegExp, Smi): void; + + extern builtin + StringIndexOf(implicit context: Context)(String, String, Smi): Smi; + + extern macro + RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi; + extern macro + RegExpBuiltinsAssembler::AdvanceStringIndexSlow(String, Number, bool): Smi; + + type UseCounterFeature extends int31 + constexpr 'v8::Isolate::UseCounterFeature'; + const kRegExpMatchIsTrueishOnNonJSRegExp: constexpr UseCounterFeature + generates 'v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp'; + const kRegExpMatchIsFalseishOnJSRegExp: constexpr UseCounterFeature + generates 'v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp'; + const kRegExpPrototypeSourceGetter: constexpr UseCounterFeature + generates 'v8::Isolate::kRegExpPrototypeSourceGetter'; + + // ES#sec-isregexp IsRegExp ( argument ) + @export + transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool { + const receiver = Cast(obj) otherwise return false; + + // Check @match. + const value = GetProperty(receiver, MatchSymbolConstant()); + if (value == Undefined) { + return Is(receiver); + } + + assert(value != Undefined); + // The common path. Symbol.match exists, equals the RegExpPrototypeMatch + // function (and is thus trueish), and the receiver is a JSRegExp. + if (ToBoolean(value)) { + if (!Is(receiver)) { + IncrementUseCounter( + context, SmiConstant(kRegExpMatchIsTrueishOnNonJSRegExp)); + } + return true; + } + + assert(!ToBoolean(value)); + if (Is(receiver)) { + IncrementUseCounter( + context, SmiConstant(kRegExpMatchIsFalseishOnJSRegExp)); + } + return false; + } } diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 7dca12d17e44e4..7fc6b91ba37783 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -103,7 +103,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, // here which will cause scratch to become negative. __ SubP(scratch, sp, scratch); // Check if the arguments will overflow the stack. - __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r0, num_args, Operand(kSystemPointerSizeLog2)); __ CmpP(scratch, r0); __ ble(stack_overflow); // Signed comparison. } @@ -147,11 +147,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // sp[2]: number of arguments (smi-tagged) Label loop, no_args; __ beq(&no_args); - __ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch, r2, Operand(kSystemPointerSizeLog2)); __ SubP(sp, sp, scratch); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(scratch, MemOperand(scratch, -kPointerSize)); + __ lay(scratch, MemOperand(scratch, -kSystemPointerSize)); __ LoadP(r0, MemOperand(scratch, r6)); __ StoreP(r0, MemOperand(scratch, sp)); __ BranchOnCount(r1, &loop); @@ -177,7 +177,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiToPtrArrayOffset(scratch, scratch); __ AddP(sp, sp, scratch); - __ AddP(sp, sp, Operand(kPointerSize)); + __ AddP(sp, sp, Operand(kSystemPointerSize)); __ Ret(); __ bind(&stack_overflow); @@ -213,11 +213,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(r5); // ----------- S t a t e ------------- - // -- sp[0*kPointerSize]: new target - // -- sp[1*kPointerSize]: padding - // -- r3 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: new target + // -- sp[1*kSystemPointerSize]: padding + // -- r3 and sp[2*kSystemPointerSize]: constructor function + // -- sp[3*kSystemPointerSize]: number of arguments (tagged) + // -- sp[4*kSystemPointerSize]: context // ----------------------------------- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); @@ -239,11 +239,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2: receiver - // -- Slot 4 / sp[0*kPointerSize]: new target - // -- Slot 3 / sp[1*kPointerSize]: padding - // -- Slot 2 / sp[2*kPointerSize]: constructor function - // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[4*kPointerSize]: context + // -- Slot 4 / sp[0*kSystemPointerSize]: new target + // -- Slot 3 / sp[1*kSystemPointerSize]: padding + // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function + // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kSystemPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -259,12 +259,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r5: new target - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: padding - // -- sp[3*kPointerSize]: constructor function - // -- sp[4*kPointerSize]: number of arguments (tagged) - // -- sp[5*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: implicit receiver + // -- sp[2*kSystemPointerSize]: padding + // -- sp[3*kSystemPointerSize]: constructor function + // -- sp[4*kSystemPointerSize]: number of arguments (tagged) + // -- sp[5*kSystemPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -295,21 +295,21 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // -- r5: new target // -- r6: pointer to last argument // -- cr0: condition indicating whether r2 is zero - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: padding - // -- r3 and sp[3*kPointerSize]: constructor function - // -- sp[4*kPointerSize]: number of arguments (tagged) - // -- sp[5*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: implicit receiver + // -- sp[2*kSystemPointerSize]: padding + // -- r3 and sp[3*kSystemPointerSize]: constructor function + // -- sp[4*kSystemPointerSize]: number of arguments (tagged) + // -- sp[5*kSystemPointerSize]: context // ----------------------------------- __ ltgr(r2, r2); __ beq(&no_args); - __ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r8, r2, Operand(kSystemPointerSizeLog2)); __ SubP(sp, sp, r8); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(r8, MemOperand(r8, -kPointerSize)); + __ lay(r8, MemOperand(r8, -kSystemPointerSize)); __ LoadP(r0, MemOperand(r8, r6)); __ StoreP(r0, MemOperand(r8, sp)); __ BranchOnCount(r1, &loop); @@ -321,11 +321,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0: constructor result - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: padding - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments - // -- sp[4*kPointerSize]: context + // -- sp[0*kSystemPointerSize]: implicit receiver + // -- sp[1*kSystemPointerSize]: padding + // -- sp[2*kSystemPointerSize]: constructor function + // -- sp[3*kSystemPointerSize]: number of arguments + // -- sp[4*kSystemPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -376,7 +376,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ SmiToPtrArrayOffset(r3, r3); __ AddP(sp, sp, r3); - __ AddP(sp, sp, Operand(kPointerSize)); + __ AddP(sp, sp, Operand(kSystemPointerSize)); __ Ret(); } @@ -465,16 +465,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { r3, JSGeneratorObject::kParametersAndRegistersOffset)); { Label loop, done_loop; - __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2)); __ SubP(sp, r5); // ip = stack offset // r5 = parameter array offset __ LoadImmP(ip, Operand::Zero()); - __ SubP(r5, Operand(kPointerSize)); + __ SubP(r5, Operand(kSystemPointerSize)); __ blt(&done_loop); - __ lgfi(r1, Operand(-kPointerSize)); + __ lgfi(r1, Operand(-kSystemPointerSize)); __ bind(&loop); @@ -483,7 +483,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ StoreP(r0, MemOperand(sp, ip)); // update offsets - __ lay(ip, MemOperand(ip, kPointerSize)); + __ lay(ip, MemOperand(ip, kSystemPointerSize)); __ BranchRelativeOnIdxHighP(r5, r1, &loop); @@ -550,9 +550,9 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { namespace { constexpr int kPushedStackSpace = - (kNumCalleeSaved + 2) * kPointerSize + - kNumCalleeSavedDoubles * kDoubleSize + 5 * kPointerSize + - EntryFrameConstants::kCallerFPOffset - kPointerSize; + (kNumCalleeSaved + 2) * kSystemPointerSize + + kNumCalleeSavedDoubles * kDoubleSize + 5 * kSystemPointerSize + + EntryFrameConstants::kCallerFPOffset - kSystemPointerSize; // Called with the native C calling convention. The corresponding function // signature is either: @@ -607,9 +607,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // Requires us to save the callee-preserved registers r6-r13 // General convention is to also save r14 (return addr) and // sp/r15 as well in a single STM/STMG - __ lay(sp, MemOperand(sp, -10 * kPointerSize)); + __ lay(sp, MemOperand(sp, -10 * kSystemPointerSize)); __ StoreMultipleP(r6, sp, MemOperand(sp, 0)); - pushed_stack_space += (kNumCalleeSaved + 2) * kPointerSize; + pushed_stack_space += (kNumCalleeSaved + 2) * kSystemPointerSize; // Initialize the root register. // C calling convention. The first argument is passed in r2. @@ -625,8 +625,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // SMI Marker // kCEntryFPAddress // Frame type - __ lay(sp, MemOperand(sp, -5 * kPointerSize)); - pushed_stack_space += 5 * kPointerSize; + __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize)); + pushed_stack_space += 5 * kSystemPointerSize; // Push a bad frame pointer to fail if it is used. __ LoadImmP(r9, Operand(-1)); @@ -637,16 +637,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ Move(r6, ExternalReference::Create( IsolateAddressId::kCEntryFPAddress, masm->isolate())); __ LoadP(r6, MemOperand(r6)); - __ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize)); + __ StoreMultipleP(r6, r9, MemOperand(sp, kSystemPointerSize)); Register scrach = r8; // Set up frame pointer for the frame to be pushed. - // Need to add kPointerSize, because sp has one extra + // Need to add kSystemPointerSize, because sp has one extra // frame already for the frame type being pushed later. - __ lay(fp, MemOperand( - sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize)); - pushed_stack_space += EntryFrameConstants::kCallerFPOffset - kPointerSize; + __ lay(fp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + + kSystemPointerSize)); + pushed_stack_space += + EntryFrameConstants::kCallerFPOffset - kSystemPointerSize; // restore r6 __ LoadRR(r6, r1); @@ -736,7 +737,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // Reload callee-saved preserved regs, return address reg (r14) and sp __ LoadMultipleP(r6, sp, MemOperand(sp, 0)); - __ la(sp, MemOperand(sp, 10 * kPointerSize)); + __ la(sp, MemOperand(sp, 10 * kSystemPointerSize)); // saving floating point registers #if V8_TARGET_ARCH_S390X @@ -790,7 +791,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, // here which will cause scratch1 to become negative. __ SubP(scratch1, sp, scratch1); // Check if the arguments will overflow the stack. - __ ShiftLeftP(scratch2, argc, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch2, argc, Operand(kSystemPointerSizeLog2)); __ CmpP(scratch1, scratch2); __ bgt(&okay); // Signed comparison. @@ -807,7 +808,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r4: function // r5: receiver // r6: argc - // [fp + kPushedStackSpace + 20 * kPointerSize]: argv + // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv // r0,r2,r7-r9, cp may be clobbered // Enter an internal frame. @@ -831,7 +832,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r3: new.target // r4: function // r6: argc - // [fp + kPushedStackSpace + 20 * kPointerSize]: argv + // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv // r0,r2,r5,r7-r9, cp may be clobbered // Setup new.target, argc and function. @@ -862,15 +863,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r9: scratch reg to hold index into argv Label argLoop, argExit; intptr_t zero = 0; - __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2)); __ SubRR(sp, r7); // Buy the stack frame to fit args __ LoadImmP(r9, Operand(zero)); // Initialize argv index __ bind(&argLoop); __ CmpPH(r7, Operand(zero)); __ beq(&argExit, Label::kNear); - __ lay(r7, MemOperand(r7, -kPointerSize)); + __ lay(r7, MemOperand(r7, -kSystemPointerSize)); __ LoadP(r8, MemOperand(r9, r6)); // read next parameter - __ la(r9, MemOperand(r9, kPointerSize)); // r9++; + __ la(r9, MemOperand(r9, kSystemPointerSize)); // r9++; __ LoadP(r0, MemOperand(r8)); // dereference handle __ StoreP(r0, MemOperand(r7, sp)); // push parameter __ b(&argLoop); @@ -920,9 +921,11 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); } -static void ReplaceClosureCodeWithOptimizedCode( - MacroAssembler* masm, Register optimized_code, Register closure, - Register scratch1, Register scratch2, Register scratch3) { +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { // Store code entry in the closure. __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset), r0); @@ -960,100 +963,72 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, Register scratch2, - Register scratch3) { +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch) { // ----------- S t a t e ------------- // -- r5 : new target (preserved for callee if needed, and caller) // -- r3 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, r3, r5, scratch1, scratch2, scratch3)); - - Label optimized_code_slot_is_weak_ref, fallthrough; + DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch)); Register closure = r3; - Register optimized_code_entry = scratch1; - - __ LoadP( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ CmpSmiLiteral(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kNone), r0); - __ beq(&fallthrough); - - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ CmpSmiLiteral( - optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); - __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); - } - __ b(&fallthrough, Label::kNear); - } - } + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadP(scratch, FieldMemOperand(optimized_code_entry, + Code::kCodeDataContainerOffset)); + __ LoadW(scratch, FieldMemOperand( + scratch, CodeDataContainer::kKindSpecificFlagsOffset)); + __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); + __ bne(&found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure + // into the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch, r7); + static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); + __ LoadCodeObjectEntry(r4, optimized_code_entry); + __ Jump(r4); - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough); - - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadP(scratch2, FieldMemOperand(optimized_code_entry, - Code::kCodeDataContainerOffset)); - __ LoadW( - scratch2, - FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset)); - __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0); - __ bne(&found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - scratch2, scratch3, feedback_vector); - static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); - __ LoadCodeObjectEntry(r4, optimized_code_entry); - __ Jump(r4); + // Optimized code slot contains deoptimized code, evict it and re-enter + // the closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +} - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { + // ----------- S t a t e ------------- + // -- r5 : new target (preserved for callee if needed, and caller) + // -- r3 : target function (preserved for callee if needed, and caller) + // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. + // ----------------------------------- + DCHECK(!AreAliased(feedback_vector, r3, r5, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ CmpSmiLiteral(optimization_marker, + Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), + r0); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } - - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1163,9 +1138,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE)); __ bne(&push_stack_frame); - // Read off the optimized code slot in the feedback vector, and if there - // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7); + Register optimized_code_entry = r6; + + // Read off the optimized code slot in the feedback vector. + __ LoadP(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ CmpSmiLiteral(optimized_code_entry, + Smi::FromEnum(OptimizationMarker::kNone), r0); + __ bne(&optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ LoadW(r1, FieldMemOperand(feedback_vector, @@ -1202,29 +1189,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(kInterpreterBytecodeArrayRegister, r4); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size (word) from the BytecodeArray object. __ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ SubP(r8, sp, r4); __ CmpLogicalP(r8, RealStackLimitAsMemOperand(masm)); - __ bge(&ok); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ blt(&stack_overflow); // If ok, push undefined as the initial value for all register file entries. // TODO(rmcilroy): Consider doing more than one push per loop iteration. Label loop, no_args; - __ LoadRoot(r8, RootIndex::kUndefinedValue); - __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2)); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + __ ShiftRightP(r4, r4, Operand(kSystemPointerSizeLog2)); __ LoadAndTestP(r4, r4); __ beq(&no_args); __ LoadRR(r1, r4); __ bind(&loop); - __ push(r8); + __ push(kInterpreterAccumulatorRegister); __ SubP(r1, Operand(1)); __ bne(&loop); __ bind(&no_args); @@ -1238,12 +1223,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); __ CmpP(r8, Operand::Zero()); __ beq(&no_incoming_new_target_or_generator_register); - __ ShiftLeftP(r8, r8, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r8, r8, Operand(kSystemPointerSizeLog2)); __ StoreP(r5, MemOperand(fp, r8)); __ bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. + // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. Label do_dispatch; @@ -1254,7 +1239,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r5, r5, Operand(kSystemPointerSizeLog2)); __ LoadP(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, r5)); __ Call(kJavaScriptCallCodeStartRegister); @@ -1285,8 +1270,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { LeaveInterpreterFrame(masm, r4); __ Ret(); + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8); + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); __ bkpt(0); // Should not return. } @@ -1296,11 +1299,11 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, Label loop, skip; __ CmpP(count, Operand::Zero()); __ beq(&skip); - __ AddP(index, index, Operand(kPointerSize)); // Bias up for LoadPU + __ AddP(index, index, Operand(kSystemPointerSize)); // Bias up for LoadPU __ LoadRR(r0, count); __ bind(&loop); - __ LoadP(scratch, MemOperand(index, -kPointerSize)); - __ lay(index, MemOperand(index, -kPointerSize)); + __ LoadP(scratch, MemOperand(index, -kSystemPointerSize)); + __ lay(index, MemOperand(index, -kSystemPointerSize)); __ push(scratch); __ SubP(r0, Operand(1)); __ bne(&loop); @@ -1474,7 +1477,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { Register scratch = temps.Acquire(); __ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch, scratch, Operand(kSystemPointerSizeLog2)); __ LoadP(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, scratch)); __ Jump(kJavaScriptCallCodeStartRegister); @@ -1540,7 +1543,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { } for (int i = j - 1; i >= 0; --i) { __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset + - i * kPointerSize)); + i * kSystemPointerSize)); __ push(r6); } for (int i = 0; i < 3 - j; ++i) { @@ -1589,9 +1592,10 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. __ StoreP( - r2, MemOperand( - sp, config->num_allocatable_general_registers() * kPointerSize + - BuiltinContinuationFrameConstants::kFixedFrameSize)); + r2, + MemOperand(sp, config->num_allocatable_general_registers() * + kSystemPointerSize + + BuiltinContinuationFrameConstants::kFixedFrameSize)); } for (int i = allocatable_register_count - 1; i >= 0; --i) { int code = config->GetAllocatableGeneralCode(i); @@ -1647,14 +1651,8 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ push(r2); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -1707,16 +1705,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Register arg_size = r7; Register new_sp = r5; Register scratch = r6; - __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ LoadRR(r4, scratch); __ LoadP(r3, MemOperand(new_sp, 0)); // receiver - __ CmpP(arg_size, Operand(kPointerSize)); + __ CmpP(arg_size, Operand(kSystemPointerSize)); __ blt(&skip); - __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg + __ LoadP(scratch, MemOperand(new_sp, 1 * -kSystemPointerSize)); // thisArg __ beq(&skip); - __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argArray + __ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argArray __ bind(&skip); __ LoadRR(sp, new_sp); __ StoreP(scratch, MemOperand(sp, 0)); @@ -1765,7 +1763,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r2: actual number of arguments // 2. Get the callable to call (passed as receiver) from the stack. - __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r4, r2, Operand(kSystemPointerSizeLog2)); __ LoadP(r3, MemOperand(sp, r4)); // 3. Shift arguments and return address one slot down on the stack @@ -1780,9 +1778,9 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { __ AddP(r4, sp, r4); __ bind(&loop); - __ LoadP(scratch, MemOperand(r4, -kPointerSize)); + __ LoadP(scratch, MemOperand(r4, -kSystemPointerSize)); __ StoreP(scratch, MemOperand(r4)); - __ SubP(r4, Operand(kPointerSize)); + __ SubP(r4, Operand(kSystemPointerSize)); __ CmpP(r4, sp); __ bne(&loop); // Adjust the actual number of arguments and remove the top element @@ -1812,19 +1810,20 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Register arg_size = r7; Register new_sp = r5; Register scratch = r6; - __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); __ LoadRoot(r3, RootIndex::kUndefinedValue); __ LoadRR(scratch, r3); __ LoadRR(r4, r3); - __ CmpP(arg_size, Operand(kPointerSize)); + __ CmpP(arg_size, Operand(kSystemPointerSize)); __ blt(&skip); - __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target + __ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target __ beq(&skip); - __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument - __ CmpP(arg_size, Operand(2 * kPointerSize)); + __ LoadP(scratch, + MemOperand(new_sp, 2 * -kSystemPointerSize)); // thisArgument + __ CmpP(arg_size, Operand(2 * kSystemPointerSize)); __ beq(&skip); - __ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList + __ LoadP(r4, MemOperand(new_sp, 3 * -kSystemPointerSize)); // argumentsList __ bind(&skip); __ LoadRR(sp, new_sp); __ StoreP(scratch, MemOperand(sp, 0)); @@ -1862,21 +1861,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Label skip; Register arg_size = r7; Register new_sp = r6; - __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(arg_size, r2, Operand(kSystemPointerSizeLog2)); __ AddP(new_sp, sp, arg_size); __ LoadRoot(r3, RootIndex::kUndefinedValue); __ LoadRR(r4, r3); __ LoadRR(r5, r3); __ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined) - __ CmpP(arg_size, Operand(kPointerSize)); + __ CmpP(arg_size, Operand(kSystemPointerSize)); __ blt(&skip); - __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target + __ LoadP(r3, MemOperand(new_sp, 1 * -kSystemPointerSize)); // target __ LoadRR(r5, r3); // new.target defaults to target __ beq(&skip); - __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList - __ CmpP(arg_size, Operand(2 * kPointerSize)); + __ LoadP(r4, MemOperand(new_sp, 2 * -kSystemPointerSize)); // argumentsList + __ CmpP(arg_size, Operand(2 * kSystemPointerSize)); __ beq(&skip); - __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target + __ LoadP(r5, MemOperand(new_sp, 3 * -kSystemPointerSize)); // new.target __ bind(&skip); __ LoadRR(sp, new_sp); } @@ -1912,15 +1911,15 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { // Function // ArgC as SMI // Padding <--- New SP - __ lay(sp, MemOperand(sp, -5 * kPointerSize)); + __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize)); // Cleanse the top nibble of 31-bit pointers. __ CleanseP(r14); - __ StoreP(r14, MemOperand(sp, 4 * kPointerSize)); - __ StoreP(fp, MemOperand(sp, 3 * kPointerSize)); - __ StoreP(r6, MemOperand(sp, 2 * kPointerSize)); - __ StoreP(r3, MemOperand(sp, 1 * kPointerSize)); - __ StoreP(r2, MemOperand(sp, 0 * kPointerSize)); + __ StoreP(r14, MemOperand(sp, 4 * kSystemPointerSize)); + __ StoreP(fp, MemOperand(sp, 3 * kSystemPointerSize)); + __ StoreP(r6, MemOperand(sp, 2 * kSystemPointerSize)); + __ StoreP(r3, MemOperand(sp, 1 * kSystemPointerSize)); + __ StoreP(r2, MemOperand(sp, 0 * kSystemPointerSize)); __ Push(Smi::zero()); // Padding. __ la(fp, MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); @@ -1933,7 +1932,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // Get the number of arguments passed (as a smi), tear down the frame and // then tear down the parameters. __ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); - int stack_adjustment = kPointerSize; // adjust for receiver + int stack_adjustment = kSystemPointerSize; // adjust for receiver __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment); __ SmiToPtrArrayOffset(r3, r3); __ lay(sp, MemOperand(sp, r3)); @@ -1981,12 +1980,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Label loop, no_args, skip; __ CmpP(r6, Operand::Zero()); __ beq(&no_args); - __ AddP(r4, r4, - Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); + __ AddP( + r4, r4, + Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize)); __ LoadRR(r1, r6); __ bind(&loop); - __ LoadP(scratch, MemOperand(r4, kPointerSize)); - __ la(r4, MemOperand(r4, kPointerSize)); + __ LoadP(scratch, MemOperand(r4, kSystemPointerSize)); + __ la(r4, MemOperand(r4, kSystemPointerSize)); __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip, Label::kNear); __ LoadRoot(scratch, RootIndex::kUndefinedValue); @@ -2070,11 +2070,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Forward the arguments from the caller frame. { Label loop; - __ AddP(r6, r6, Operand(kPointerSize)); + __ AddP(r6, r6, Operand(kSystemPointerSize)); __ AddP(r2, r2, r7); __ bind(&loop); { - __ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2)); + __ ShiftLeftP(scratch, r7, Operand(kSystemPointerSizeLog2)); __ LoadP(scratch, MemOperand(r6, scratch)); __ push(scratch); __ SubP(r7, r7, Operand(1)); @@ -2132,7 +2132,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadGlobalProxy(r5); } else { Label convert_to_object, convert_receiver; - __ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r5, r2, Operand(kSystemPointerSizeLog2)); __ LoadP(r5, MemOperand(sp, r5)); __ JumpIfSmi(r5, &convert_to_object); STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); @@ -2169,7 +2169,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); __ bind(&convert_receiver); } - __ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r6, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r5, MemOperand(sp, r6)); } __ bind(&done_convert); @@ -2226,7 +2226,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { { Label done; __ LoadRR(scratch, sp); // preserve previous stack pointer - __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r9, r6, Operand(kSystemPointerSizeLog2)); __ SubP(sp, sp, r9); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack @@ -2256,7 +2256,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ bind(&loop); __ LoadP(r0, MemOperand(scratch, r7)); __ StoreP(r0, MemOperand(sp, r7)); - __ AddP(r7, r7, Operand(kPointerSize)); + __ AddP(r7, r7, Operand(kSystemPointerSize)); __ BranchOnCount(r1, &loop); __ bind(&skip); } @@ -2268,10 +2268,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ AddP(r4, r4, r9); __ LoadRR(r1, r6); __ bind(&loop); - __ LoadP(r0, MemOperand(r4, -kPointerSize)); - __ lay(r4, MemOperand(r4, -kPointerSize)); + __ LoadP(r0, MemOperand(r4, -kSystemPointerSize)); + __ lay(r4, MemOperand(r4, -kSystemPointerSize)); __ StoreP(r0, MemOperand(sp, r7)); - __ AddP(r7, r7, Operand(kPointerSize)); + __ AddP(r7, r7, Operand(kSystemPointerSize)); __ BranchOnCount(r1, &loop); __ AddP(r2, r2, r6); } @@ -2291,7 +2291,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // Patch the receiver to [[BoundThis]]. __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); - __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r1, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r5, MemOperand(sp, r1)); // Push the [[BoundArguments]] onto the stack. @@ -2311,7 +2311,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- r3 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_function, non_smi; + Label non_callable, non_smi; __ JumpIfSmi(r3, &non_callable); __ bind(&non_smi); __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE); @@ -2328,14 +2328,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ CmpP(r7, Operand(JS_PROXY_TYPE)); - __ bne(&non_function); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). - __ bind(&non_function); // Overwrite the original receiver the (original) target. - __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r3, MemOperand(sp, r7)); // Let the "call_as_function_delegate" take care of the rest. __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3); @@ -2449,7 +2447,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ bind(&non_proxy); { // Overwrite the original receiver with the (original) target. - __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r7, r2, Operand(kSystemPointerSizeLog2)); __ StoreP(r3, MemOperand(sp, r7)); // Let the "call_as_constructor_delegate" take care of the rest. __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3); @@ -2504,8 +2502,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ SmiToPtrArrayOffset(r2, r2); __ AddP(r2, fp); // adjust for return address and receiver - __ AddP(r2, r2, Operand(2 * kPointerSize)); - __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2)); + __ AddP(r2, r2, Operand(2 * kSystemPointerSize)); + __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2)); __ SubP(r6, r2, r6); // Copy the arguments (including the receiver) to the new stack frame. @@ -2520,7 +2518,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ LoadP(r0, MemOperand(r2, 0)); __ push(r0); __ CmpP(r2, r6); // Compare before moving to next argument. - __ lay(r2, MemOperand(r2, -kPointerSize)); + __ lay(r2, MemOperand(r2, -kSystemPointerSize)); __ bne(©); __ b(&invoke); @@ -2548,22 +2546,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { Label copy; __ bind(©); // Adjust load for return address and receiver. - __ LoadP(r0, MemOperand(r2, 2 * kPointerSize)); + __ LoadP(r0, MemOperand(r2, 2 * kSystemPointerSize)); __ push(r0); __ CmpP(r2, fp); // Compare before moving to next argument. - __ lay(r2, MemOperand(r2, -kPointerSize)); + __ lay(r2, MemOperand(r2, -kSystemPointerSize)); __ bne(©); // Fill the remaining expected arguments with undefined. // r3: function // r4: expected number of argumentus __ LoadRoot(r0, RootIndex::kUndefinedValue); - __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r6, r4, Operand(kSystemPointerSizeLog2)); __ SubP(r6, fp, r6); // Adjust for frame. __ SubP(r6, r6, Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + - kPointerSize)); + kSystemPointerSize)); Label fill; __ bind(&fill); @@ -2608,7 +2606,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Remove superfluous parameters from the stack. __ SubP(r6, r2, r4); __ lgr(r2, r4); - __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2)); + __ ShiftLeftP(r6, r6, Operand(kSystemPointerSizeLog2)); __ lay(sp, MemOperand(sp, r6)); __ b(&dont_adapt_arguments); } @@ -2708,8 +2706,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ LoadRR(r3, r4); } else { // Compute the argv pointer. - __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2)); - __ lay(r3, MemOperand(r3, sp, -kPointerSize)); + __ ShiftLeftP(r3, r2, Operand(kSystemPointerSizeLog2)); + __ lay(r3, MemOperand(r3, sp, -kSystemPointerSize)); } // Enter the exit frame that transitions from JavaScript to C++. @@ -2751,7 +2749,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // by one register each. __ LoadRR(r4, r3); __ LoadRR(r3, r2); - __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); + __ la(r2, + MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize)); isolate_reg = r5; // Clang doesn't preserve r2 (result buffer) // write to r8 (preserved) before entry @@ -2765,7 +2764,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // If return value is on the stack, pop it to registers. if (needs_return_buffer) { __ LoadRR(r2, r8); - __ LoadP(r3, MemOperand(r2, kPointerSize)); + __ LoadP(r3, MemOperand(r2, kSystemPointerSize)); __ LoadP(r2, MemOperand(r2)); } @@ -2870,7 +2869,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Push(result_reg, scratch); // Account for saved regs. - int argument_offset = 2 * kPointerSize; + int argument_offset = 2 * kSystemPointerSize; // Load double input. __ LoadDouble(double_scratch, MemOperand(sp, argument_offset)); @@ -2884,7 +2883,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Push(scratch_high, scratch_low); // Account for saved regs. - argument_offset += 2 * kPointerSize; + argument_offset += 2 * kSystemPointerSize; __ LoadlW(scratch_high, MemOperand(sp, argument_offset + Register::kExponentOffset)); @@ -2958,7 +2957,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ bind(&done); __ Pop(scratch_high, scratch_low); - argument_offset -= 2 * kPointerSize; + argument_offset -= 2 * kSystemPointerSize; __ bind(&fastpath_done); __ StoreP(result_reg, MemOperand(sp, argument_offset)); @@ -3159,33 +3158,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Set up FunctionCallbackInfo's implicit_args on the stack as follows: // // Target state: - // sp[0 * kPointerSize]: kHolder - // sp[1 * kPointerSize]: kIsolate - // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) - // sp[3 * kPointerSize]: undefined (kReturnValue) - // sp[4 * kPointerSize]: kData - // sp[5 * kPointerSize]: undefined (kNewTarget) + // sp[0 * kSystemPointerSize]: kHolder + // sp[1 * kSystemPointerSize]: kIsolate + // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue) + // sp[3 * kSystemPointerSize]: undefined (kReturnValue) + // sp[4 * kSystemPointerSize]: kData + // sp[5 * kSystemPointerSize]: undefined (kNewTarget) // Reserve space on the stack. - __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kPointerSize))); + __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize))); // kHolder. - __ StoreP(holder, MemOperand(sp, 0 * kPointerSize)); + __ StoreP(holder, MemOperand(sp, 0 * kSystemPointerSize)); // kIsolate. __ Move(scratch, ExternalReference::isolate_address(masm->isolate())); - __ StoreP(scratch, MemOperand(sp, 1 * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, 1 * kSystemPointerSize)); // kReturnValueDefaultValue and kReturnValue. __ LoadRoot(scratch, RootIndex::kUndefinedValue); - __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize)); - __ StoreP(scratch, MemOperand(sp, 3 * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, 2 * kSystemPointerSize)); + __ StoreP(scratch, MemOperand(sp, 3 * kSystemPointerSize)); // kData. - __ StoreP(call_data, MemOperand(sp, 4 * kPointerSize)); + __ StoreP(call_data, MemOperand(sp, 4 * kSystemPointerSize)); // kNewTarget. - __ StoreP(scratch, MemOperand(sp, 5 * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, 5 * kSystemPointerSize)); // Keep a pointer to kHolder (= implicit_args) in a scratch register. // We use it below to set up the FunctionCallbackInfo object. @@ -3207,33 +3206,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). // Arguments are after the return address (pushed by EnterExitFrame()). - __ StoreP(scratch, - MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * + kSystemPointerSize)); // FunctionCallbackInfo::values_ (points at the first varargs argument passed // on the stack). - __ AddP(scratch, scratch, Operand((FCA::kArgsLength - 1) * kPointerSize)); - __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2)); + __ AddP(scratch, scratch, + Operand((FCA::kArgsLength - 1) * kSystemPointerSize)); + __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2)); __ AddP(scratch, scratch, r1); - __ StoreP(scratch, - MemOperand(sp, (kStackFrameExtraParamSlot + 2) * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) * + kSystemPointerSize)); // FunctionCallbackInfo::length_. - __ StoreW(argc, - MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kPointerSize)); + __ StoreW(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) * + kSystemPointerSize)); // We also store the number of bytes to drop from the stack after returning // from the API function here. __ mov(scratch, - Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize)); - __ ShiftLeftP(r1, argc, Operand(kPointerSizeLog2)); + Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize)); + __ ShiftLeftP(r1, argc, Operand(kSystemPointerSizeLog2)); __ AddP(scratch, r1); - __ StoreP(scratch, - MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kPointerSize)); + __ StoreP(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) * + kSystemPointerSize)); // v8::InvocationCallback's argument. __ lay(r2, - MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); + MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize)); ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); @@ -3241,11 +3241,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // TODO(jgruber): Document what these arguments are. static constexpr int kStackSlotsAboveFCA = 2; MemOperand return_value_operand( - fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); + fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize); static constexpr int kUseStackSpaceOperand = 0; MemOperand stack_space_operand( - sp, (kStackFrameExtraParamSlot + 4) * kPointerSize); + sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize); AllowExternalCallThatCantCauseGC scope(masm); CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, @@ -3293,7 +3293,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { // Load address of v8::PropertyAccessorInfo::args_ array and name handle. __ LoadRR(r2, sp); // r2 = Handle - __ AddP(r3, r2, Operand(1 * kPointerSize)); // r3 = v8::PCI::args_ + __ AddP(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_ // If ABI passes Handles (pointer-sized struct) in a register: // @@ -3321,14 +3321,14 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { if (!ABI_PASSES_HANDLES_IN_REGS) { // pass 1st arg by reference - __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize)); - __ AddP(r2, sp, Operand(arg0Slot * kPointerSize)); + __ StoreP(r2, MemOperand(sp, arg0Slot * kSystemPointerSize)); + __ AddP(r2, sp, Operand(arg0Slot * kSystemPointerSize)); } // Create v8::PropertyCallbackInfo object on the stack and initialize // it's args_ field. - __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize)); - __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize)); + __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize)); + __ AddP(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize)); // r3 = v8::PropertyCallbackInfo& ExternalReference thunk_ref = @@ -3340,7 +3340,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { // +3 is to skip prolog, return address and name handle. MemOperand return_value_operand( - fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); + fp, + (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize); MemOperand* const kUseStackSpaceConstant = nullptr; CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, kStackUnwindSpace, kUseStackSpaceConstant, diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index e93d7008e47b96..e3f39a0906a350 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -264,22 +264,17 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { namespace { Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index, - const char* name, interpreter::OperandScale operand_scale, interpreter::Bytecode bytecode) { DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); Handle code = interpreter::GenerateBytecodeHandler( - isolate, bytecode, operand_scale, builtin_index, - BuiltinAssemblerOptions(isolate, builtin_index)); + isolate, Builtins::name(builtin_index), bytecode, operand_scale, + builtin_index, BuiltinAssemblerOptions(isolate, builtin_index)); return *code; } } // namespace -#ifdef _MSC_VER -#pragma optimize( "", off ) -#endif - // static void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { Builtins* builtins = isolate->builtins(); @@ -318,9 +313,8 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { CallDescriptors::InterfaceDescriptor, #Name); \ AddBuiltin(builtins, index++, code); -#define BUILD_BCH(Name, OperandScale, Bytecode) \ - code = GenerateBytecodeHandler(isolate, index, Builtins::name(index), \ - OperandScale, Bytecode); \ +#define BUILD_BCH(Name, OperandScale, Bytecode) \ + code = GenerateBytecodeHandler(isolate, index, OperandScale, Bytecode); \ AddBuiltin(builtins, index++, code); #define BUILD_ASM(Name, InterfaceDescriptor) \ @@ -357,10 +351,5 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { builtins->MarkInitialized(); } -#ifdef _MSC_VER -#pragma optimize( "", on ) -#endif - - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq index c3cc7d949b716c..9590b853e793a3 100644 --- a/deps/v8/src/builtins/string-endswith.tq +++ b/deps/v8/src/builtins/string-endswith.tq @@ -41,7 +41,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. - if (IsRegExp(searchString)) { + if (regexp::IsRegExp(searchString)) { ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq index d36a44fa97d05e..b0bbb8d4a35dc2 100644 --- a/deps/v8/src/builtins/string-iterator.tq +++ b/deps/v8/src/builtins/string-iterator.tq @@ -11,7 +11,7 @@ namespace string_iterator { properties_or_hash: kEmptyFixedArray, elements: kEmptyFixedArray, string: string, - next_index: nextIndex + index: nextIndex }; } @@ -31,7 +31,7 @@ namespace string_iterator { kIncompatibleMethodReceiver, 'String Iterator.prototype.next', receiver); const string = iterator.string; - const position: intptr = SmiUntag(iterator.next_index); + const position: intptr = SmiUntag(iterator.index); const length: intptr = string.length_intptr; if (position >= length) { return AllocateJSIteratorResult(Undefined, True); @@ -40,7 +40,7 @@ namespace string_iterator { const encoding = UTF16; const ch = string::LoadSurrogatePairAt(string, length, position, encoding); const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch); - iterator.next_index = SmiTag(position + value.length_intptr); + iterator.index = SmiTag(position + value.length_intptr); return AllocateJSIteratorResult(value, False); } } diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq index 661cc264c50418..b5ddbdb2ccbe74 100644 --- a/deps/v8/src/builtins/string-slice.tq +++ b/deps/v8/src/builtins/string-slice.tq @@ -4,7 +4,8 @@ namespace string_slice { - extern macro SubString(String, intptr, intptr): String; + extern macro StringBuiltinsAssembler::SubString(String, intptr, intptr): + String; // ES6 #sec-string.prototype.slice ( start, end ) // https://tc39.github.io/ecma262/#sec-string.prototype.slice diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq index 7fa7ec6d5ce952..3238f52b86bd20 100644 --- a/deps/v8/src/builtins/string-startswith.tq +++ b/deps/v8/src/builtins/string-startswith.tq @@ -5,9 +5,6 @@ #include 'src/builtins/builtins-regexp-gen.h' namespace string { - extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context: - Context)(Object): bool; - // https://tc39.github.io/ecma262/#sec-string.prototype.startswith transitioning javascript builtin StringPrototypeStartsWith( js-implicit context: Context, receiver: JSAny)(...arguments): Boolean { @@ -23,7 +20,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. - if (IsRegExp(searchString)) { + if (regexp::IsRegExp(searchString)) { ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq index c97b294a34fedd..813dc35ab2817b 100644 --- a/deps/v8/src/builtins/string-substring.tq +++ b/deps/v8/src/builtins/string-substring.tq @@ -4,7 +4,8 @@ namespace string_substring { - extern macro SubString(String, intptr, intptr): String; + extern macro StringBuiltinsAssembler::SubString(String, intptr, intptr): + String; transitioning macro ToSmiBetweenZeroAnd(implicit context: Context)( value: JSAny, limit: Smi): Smi { diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq index 7f007680e93b73..4f2c342fd554b9 100644 --- a/deps/v8/src/builtins/string.tq +++ b/deps/v8/src/builtins/string.tq @@ -21,7 +21,8 @@ namespace string { extern macro StringBuiltinsAssembler::LoadSurrogatePairAt( String, intptr, intptr, constexpr UnicodeEncoding): int32; - extern macro StringFromSingleUTF16EncodedCodePoint(int32): String; + extern macro StringBuiltinsAssembler::StringFromSingleUTF16EncodedCodePoint( + int32): String; // This function assumes StringPrimitiveWithNoCustomIteration is true. transitioning builtin StringToList(implicit context: Context)(string: String): @@ -187,4 +188,12 @@ namespace string { left: String, right: JSAny): String { return left + ToStringImpl(context, ToPrimitiveDefault(right)); } + + builtin StringCharAt(implicit context: Context)( + receiver: String, position: intptr): String { + // Load the character code at the {position} from the {receiver}. + const code: int32 = StringCharCodeAt(receiver, position); + // And return the single character string with only that {code} + return StringFromSingleCharCode(code); + } } diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index a476739861684f..a6bd445e34a483 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -27,21 +27,16 @@ namespace typed_array_createtypedarray { isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer, byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray { let elements: ByteArray; - let externalPointer: RawPtr; - let basePointer: ByteArray | Smi; if constexpr (isOnHeap) { elements = AllocateByteArray(byteLength); - basePointer = elements; - externalPointer = PointerConstant(kExternalPointerForOnHeapArray); } else { - basePointer = Convert(0); + elements = kEmptyByteArray; // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit // platforms are self-limiting, because we can't allocate an array bigger // than our 32-bit arithmetic range anyway. 64 bit platforms could // theoretically have an offset up to 2^35 - 1. - const backingStore: RawPtr = buffer.backing_store; - externalPointer = backingStore + Convert(byteOffset); + const backingStore: uintptr = Convert(buffer.backing_store); // Assert no overflow has occurred. Only assert if the mock array buffer // allocator is NOT used. When the mock array buffer is used, impossibly @@ -49,9 +44,7 @@ namespace typed_array_createtypedarray { // and this assertion to fail. assert( IsMockArrayBufferAllocatorFlag() || - Convert(externalPointer) >= Convert(backingStore)); - - elements = kEmptyByteArray; + (backingStore + byteOffset) >= backingStore); } // We can't just build the new object with "new JSTypedArray" here because @@ -64,8 +57,16 @@ namespace typed_array_createtypedarray { typedArray.byte_offset = byteOffset; typedArray.byte_length = byteLength; typedArray.length = length; - typedArray.external_pointer = externalPointer; - typedArray.base_pointer = basePointer; + if constexpr (isOnHeap) { + typed_array::SetJSTypedArrayOnHeapDataPtr( + typedArray, elements, byteOffset); + } else { + typed_array::SetJSTypedArrayOffHeapDataPtr( + typedArray, buffer.backing_store, byteOffset); + assert( + typedArray.data_ptr == + (buffer.backing_store + Convert(byteOffset))); + } SetupTypedArrayEmbedderFields(typedArray); return typedArray; } diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq index dc13865590e051..d17ff4a3756b1c 100644 --- a/deps/v8/src/builtins/typed-array-slice.tq +++ b/deps/v8/src/builtins/typed-array-slice.tq @@ -23,7 +23,7 @@ namespace typed_array_slice { // of src and result array are the same and they are not sharing the // same buffer, use memmove. if (srcKind != destInfo.kind) goto IfSlow; - if (BitcastTaggedToWord(dest.buffer) == BitcastTaggedToWord(src.buffer)) { + if (dest.buffer == src.buffer) { goto IfSlow; } diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 59100736a5dc7b..1c901abf752195 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -71,12 +71,17 @@ namespace typed_array { ElementsKind): bool; extern macro LoadFixedTypedArrayElementAsTagged( RawPtr, Smi, constexpr ElementsKind): Numeric; - extern macro StoreJSTypedArrayElementFromTagged( + extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged( Context, JSTypedArray, Smi, JSAny, constexpr ElementsKind); type LoadFn = builtin(Context, JSTypedArray, Smi) => JSAny; type StoreFn = builtin(Context, JSTypedArray, Smi, JSAny) => JSAny; + extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( + JSTypedArray, ByteArray, uintptr): void; + extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr( + JSTypedArray, RawPtr, uintptr): void; + // AttachedJSTypedArray guards that the array's buffer is not detached. transient type AttachedJSTypedArray extends JSTypedArray; @@ -198,7 +203,7 @@ namespace typed_array { builtin StoreFixedElement( context: Context, typedArray: JSTypedArray, index: Smi, value: JSAny): JSAny { - StoreJSTypedArrayElementFromTagged( + typed_array::StoreJSTypedArrayElementFromTagged( context, typedArray, index, value, KindForArrayType()); return Undefined; } diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index b6b407fb3322ef..9679237ff820f7 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -5,8 +5,9 @@ #if V8_TARGET_ARCH_X64 #include "src/api/api-arguments.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/code-factory.h" +#include "src/codegen/x64/assembler-x64.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" #include "src/execution/frames.h" @@ -401,13 +402,13 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ pushq(r13); __ pushq(r14); __ pushq(r15); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. #endif __ pushq(rbx); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // On Win64 XMM6-XMM15 are callee-save. __ AllocateStackSpace(EntryFrameConstants::kXMMRegistersBlockSize); __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6); @@ -507,7 +508,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, } // Restore callee-saved registers (X64 conventions). -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // On Win64 XMM6-XMM15 are callee-save __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0)); __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1)); @@ -523,7 +524,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, #endif __ popq(rbx); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. __ popq(rsi); __ popq(rdi); @@ -611,17 +612,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Push(rdi); __ Push(arg_reg_4); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Load the previous frame pointer to access C arguments on stack __ movq(kScratchRegister, Operand(rbp, 0)); // Load the number of arguments and setup pointer to the arguments. __ movq(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset)); __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset)); -#else // _WIN64 +#else // V8_TARGET_OS_WIN // Load the number of arguments and setup pointer to the arguments. __ movq(rax, r8); __ movq(rbx, r9); -#endif // _WIN64 +#endif // V8_TARGET_OS_WIN // Current stack contents: // [rsp + 2 * kSystemPointerSize ... ] : Internal frame @@ -851,10 +852,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // TODO(juliana): if we remove the code below then we don't need all // the parameters. -static void ReplaceClosureCodeWithOptimizedCode( - MacroAssembler* masm, Register optimized_code, Register closure, - Register scratch1, Register scratch2, Register scratch3) { - +static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, + Register optimized_code, + Register closure, + Register scratch1, + Register scratch2) { // Store the optimized code in the closure. __ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code); @@ -895,104 +897,71 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, __ bind(&no_match); } -static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, - Register feedback_vector, - Register scratch1, Register scratch2, - Register scratch3) { +static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, + Register optimization_marker) { // ----------- S t a t e ------------- // -- rdx : new target (preserved for callee if needed, and caller) // -- rdi : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) + // -- optimization_marker : a Smi containing a non-zero optimization marker. // ----------------------------------- - DCHECK(!AreAliased(feedback_vector, rdx, rdi, scratch1, scratch2, scratch3)); - - Label optimized_code_slot_is_weak_ref, fallthrough; - - Register closure = rdi; - Register optimized_code_entry = scratch1; - Register decompr_scratch = COMPRESS_POINTERS_BOOL ? scratch2 : no_reg; - - __ LoadAnyTaggedField( - optimized_code_entry, - FieldOperand(feedback_vector, - FeedbackVector::kOptimizedCodeWeakOrSmiOffset), - decompr_scratch); - - // Check if the code entry is a Smi. If yes, we interpret it as an - // optimisation marker. Otherwise, interpret it as a weak reference to a code - // object. - __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref); - - { - // Optimized code slot is a Smi optimization marker. - - // Fall through if no optimization trigger. - __ SmiCompare(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kNone)); - __ j(equal, &fallthrough); - - // TODO(v8:8394): The logging of first execution will break if - // feedback vectors are not allocated. We need to find a different way of - // logging these events if required. - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kLogFirstExecution, - Runtime::kFunctionFirstExecution); - TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, - OptimizationMarker::kCompileOptimized, - Runtime::kCompileOptimized_NotConcurrent); - TailCallRuntimeIfMarkerEquals( - masm, optimized_code_entry, - OptimizationMarker::kCompileOptimizedConcurrent, - Runtime::kCompileOptimized_Concurrent); - { - // Otherwise, the marker is InOptimizationQueue, so fall through hoping - // that an interrupt will eventually update the slot with optimized code. - if (FLAG_debug_code) { - __ SmiCompare(optimized_code_entry, - Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)); - __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); - } - __ jmp(&fallthrough); - } + DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker)); + + // TODO(v8:8394): The logging of first execution will break if + // feedback vectors are not allocated. We need to find a different way of + // logging these events if required. + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimized, + Runtime::kCompileOptimized_NotConcurrent); + TailCallRuntimeIfMarkerEquals(masm, optimization_marker, + OptimizationMarker::kCompileOptimizedConcurrent, + Runtime::kCompileOptimized_Concurrent); + + // Otherwise, the marker is InOptimizationQueue, so fall through hoping + // that an interrupt will eventually update the slot with optimized code. + if (FLAG_debug_code) { + __ SmiCompare(optimization_marker, + Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)); + __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); } +} - { - // Optimized code slot is a weak reference. - __ bind(&optimized_code_slot_is_weak_ref); - - __ LoadWeakValue(optimized_code_entry, &fallthrough); +static void TailCallOptimizedCodeSlot(MacroAssembler* masm, + Register optimized_code_entry, + Register scratch1, Register scratch2) { + // ----------- S t a t e ------------- + // -- rdx : new target (preserved for callee if needed, and caller) + // -- rdi : target function (preserved for callee if needed, and caller) + // ----------------------------------- - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - Label found_deoptimized_code; - __ LoadTaggedPointerField( - scratch2, - FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); - __ testl( - FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset), - Immediate(1 << Code::kMarkedForDeoptimizationBit)); - __ j(not_zero, &found_deoptimized_code); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - scratch2, scratch3, feedback_vector); - static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); - __ Move(rcx, optimized_code_entry); - __ JumpCodeObject(rcx); + Register closure = rdi; - // Optimized code slot contains deoptimized code, evict it and re-enter the - // closure's code. - __ bind(&found_deoptimized_code); - GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); - } + // Check if the optimized code is marked for deopt. If it is, call the + // runtime to clear it. + Label found_deoptimized_code; + __ LoadTaggedPointerField( + scratch1, + FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); + __ testl(FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset), + Immediate(1 << Code::kMarkedForDeoptimizationBit)); + __ j(not_zero, &found_deoptimized_code); + + // Optimized code is good, get it into the closure and link the closure into + // the optimized functions list, then tail call the optimized code. + ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, + scratch1, scratch2); + static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); + __ Move(rcx, optimized_code_entry); + __ JumpCodeObject(rcx); - // Fall-through if the optimized code cell is clear and there is no - // optimization marker. - __ bind(&fallthrough); + // Optimized code slot contains deoptimized code, evict it and re-enter the + // closure's code. + __ bind(&found_deoptimized_code); + GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1019,20 +988,21 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); __ cmpb(bytecode, Immediate(0x3)); __ j(above, &process_bytecode, Label::kNear); + // The code to load the next bytecode is common to both wide and extra wide. + // We can hoist them up here. incl has to happen before testb since it + // modifies the ZF flag. + __ incl(bytecode_offset); __ testb(bytecode, Immediate(0x1)); + __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ j(not_equal, &extra_wide, Label::kNear); - // Load the next bytecode and update table to the wide scaled table. - __ incl(bytecode_offset); - __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); + // Update table to the wide scaled table. __ addq(bytecode_size_table, Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount)); __ jmp(&process_bytecode, Label::kNear); __ bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ incl(bytecode_offset); - __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); + // Update table to the extra wide scaled table. __ addq(bytecode_size_table, Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); @@ -1101,7 +1071,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. - MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r11, r15); + + Register optimized_code_entry = rcx; + Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg; + + __ LoadAnyTaggedField( + optimized_code_entry, + FieldOperand(feedback_vector, + FeedbackVector::kOptimizedCodeWeakOrSmiOffset), + decompr_scratch); + + // Check if the optimized code slot is not empty. + Label optimized_code_slot_not_empty; + __ Cmp(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kNone)); + __ j(not_equal, &optimized_code_slot_not_empty); + + Label not_optimized; + __ bind(¬_optimized); // Increment invocation count for the function. __ incl( @@ -1137,28 +1123,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Push(rcx); // Allocate the local and temporary register file on the stack. + Label stack_overflow; { // Load frame size from the BytecodeArray object. __ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kFrameSizeOffset)); // Do a stack check to ensure we don't go over the limit. - Label ok; __ movq(rax, rsp); __ subq(rax, rcx); __ cmpq(rax, RealStackLimitAsOperand(masm)); - __ j(above_equal, &ok, Label::kNear); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ bind(&ok); + __ j(below, &stack_overflow); // If ok, push undefined as the initial value for all register file entries. Label loop_header; Label loop_check; - __ LoadRoot(rax, RootIndex::kUndefinedValue); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ j(always, &loop_check, Label::kNear); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. - __ Push(rax); + __ Push(kInterpreterAccumulatorRegister); // Continue loop if not done. __ bind(&loop_check); __ subq(rcx, Immediate(kSystemPointerSize)); @@ -1169,16 +1153,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // register, initialize it with incoming value which was passed in rdx. Label no_incoming_new_target_or_generator_register; __ movsxlq( - rax, + rcx, FieldOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); - __ testl(rax, rax); + __ testl(rcx, rcx); __ j(zero, &no_incoming_new_target_or_generator_register, Label::kNear); - __ movq(Operand(rbp, rax, times_system_pointer_size, 0), rdx); + __ movq(Operand(rbp, rcx, times_system_pointer_size, 0), rdx); __ bind(&no_incoming_new_target_or_generator_register); - // Load accumulator with undefined. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + // The accumulator is already loaded with undefined. // Load the dispatch table into a register and dispatch to the bytecode // handler at the current bytecode offset. @@ -1201,10 +1184,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ movq(kInterpreterBytecodeArrayRegister, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ movq(kInterpreterBytecodeOffsetRegister, - Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeOffsetRegister); + Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Either return, or advance to the next bytecode and dispatch. Label do_return; @@ -1223,6 +1204,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); __ int3(); // Should not return. + + __ bind(&optimized_code_slot_not_empty); + Label maybe_has_optimized_code; + // Check if optimized code marker is actually a weak reference to the + // optimized code as opposed to an optimization marker. + __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code); + MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); + // Fall through if there's no runnable optimized code. + __ jmp(¬_optimized); + + __ bind(&maybe_has_optimized_code); + // Load code entry from the weak reference, if it was cleared, resume + // execution of unoptimized code. + __ LoadWeakValue(optimized_code_entry, ¬_optimized); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15); + + __ bind(&stack_overflow); + __ CallRuntime(Runtime::kThrowStackOverflow); + __ int3(); // Should not return. } static void Generate_InterpreterPushArgs(MacroAssembler* masm, @@ -1425,10 +1425,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { } // Get the target bytecode offset from the frame. - __ movq(kInterpreterBytecodeOffsetRegister, - Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeOffsetRegister); + Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Dispatch to the target bytecode. __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister, @@ -1443,10 +1441,8 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ movq(kInterpreterBytecodeArrayRegister, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ movq(kInterpreterBytecodeOffsetRegister, - Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeOffsetRegister); + Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); // Load the current bytecode. __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister, @@ -1459,8 +1455,9 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { &if_return); // Convert new bytecode offset to a Smi and save in the stackframe. - __ SmiTag(rbx, kInterpreterBytecodeOffsetRegister); - __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx); + __ SmiTag(kInterpreterBytecodeOffsetRegister); + __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), + kInterpreterBytecodeOffsetRegister); Generate_InterpreterEnterBytecode(masm); @@ -1485,7 +1482,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { // Preserve argument count for later compare. __ movq(rcx, rax); // Push the number of arguments to the callee. - __ SmiTag(rax, rax); + __ SmiTag(rax); __ Push(rax); // Push a copy of the target function and the new target. __ Push(rdi); @@ -1522,7 +1519,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ Drop(2); __ Pop(rcx); - __ SmiUntag(rcx, rcx); + __ SmiUntag(rcx); scope.GenerateLeaveFrame(); __ PopReturnAddressTo(rbx); @@ -1536,7 +1533,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { __ Pop(rdx); __ Pop(rdi); __ Pop(rax); - __ SmiUntag(rax, rax); + __ SmiUntag(rax); } // On failure, tail call back to regular js by re-calling the function // which has be reset to the compile lazy builtin. @@ -1563,7 +1560,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, int code = config->GetAllocatableGeneralCode(i); __ popq(Register::from_code(code)); if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { - __ SmiUntag(Register::from_code(code), Register::from_code(code)); + __ SmiUntag(Register::from_code(code)); } } __ movq( @@ -2274,7 +2271,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // TODO(bmeurer): Inline the allocation here to avoid building the frame // in the fast case? (fall back to AllocateInNewSpace?) FrameScope scope(masm, StackFrame::INTERNAL); - __ SmiTag(rax, rax); + __ SmiTag(rax); __ Push(rax); __ Push(rdi); __ movq(rax, rcx); @@ -2285,7 +2282,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ movq(rcx, rax); __ Pop(rdi); __ Pop(rax); - __ SmiUntag(rax, rax); + __ SmiUntag(rax); } __ LoadTaggedPointerField( rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); @@ -2601,14 +2598,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { } void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - // Lookup the function in the JavaScript frame. - __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ movq(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset)); - { FrameScope scope(masm, StackFrame::INTERNAL); - // Pass function as argument. - __ Push(rax); __ CallRuntime(Runtime::kCompileForOnStackReplacement); } @@ -2647,7 +2638,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was pushed to the stack by the caller as int32. __ Pop(r11); // Convert to Smi for the runtime call. - __ SmiTag(r11, r11); + __ SmiTag(r11); { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2716,7 +2707,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // If argv_mode == kArgvInRegister: // r15: pointer to the first argument -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the // stack to be aligned to 16 bytes. It only allows a single-word to be // returned in register rax. Larger return sizes must be written to an address @@ -2738,7 +2729,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, const Register kCCallArg3 = rcx; const int kArgExtraStackSpace = 0; const int kMaxRegisterResultSize = 2; -#endif // _WIN64 +#endif // V8_TARGET_OS_WIN // Enter the exit frame that transitions from JavaScript to C++. int arg_stack_space = @@ -2809,7 +2800,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); Operand pending_exception_operand = masm->ExternalReferenceAsOperand(pending_exception_address); - __ cmpq(r14, pending_exception_operand); + __ cmp_tagged(r14, pending_exception_operand); __ j(equal, &okay, Label::kNear); __ int3(); __ bind(&okay); diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index feb2f62f7878ec..64d2d7b97deb01 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -1,6 +1,6 @@ bbudge@chromium.org bmeurer@chromium.org -clemensh@chromium.org +clemensb@chromium.org gdeepti@chromium.org ishell@chromium.org jarin@chromium.org diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h index 3fbd679104ea21..45ec07a382822b 100644 --- a/deps/v8/src/codegen/arm/assembler-arm-inl.h +++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h @@ -118,7 +118,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index 9c46063537d62d..6659960bb809d0 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -40,6 +40,7 @@ #include "src/base/bits.h" #include "src/base/cpu.h" +#include "src/base/overflowing-math.h" #include "src/codegen/arm/assembler-arm-inl.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler.h" @@ -452,8 +453,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: { const StringConstantBase* str = request.string(); @@ -4802,15 +4803,17 @@ void Assembler::GrowBuffer() { int rc_delta = (new_start + new_size) - (buffer_start_ + old_size); size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); MemMove(new_start, buffer_start_, pc_offset()); - MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), - reloc_size); + byte* new_reloc_start = reinterpret_cast( + reinterpret_cast
(reloc_info_writer.pos()) + rc_delta); + MemMove(new_reloc_start, reloc_info_writer.pos(), reloc_size); // Switch buffers. buffer_ = std::move(new_buffer); buffer_start_ = new_start; - pc_ += pc_delta; - reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.last_pc() + pc_delta); + pc_ = reinterpret_cast(reinterpret_cast
(pc_) + pc_delta); + byte* new_last_pc = reinterpret_cast( + reinterpret_cast
(reloc_info_writer.last_pc()) + pc_delta); + reloc_info_writer.Reposition(new_reloc_start, new_last_pc); // None of our relocation types are pc relative pointing outside the code // buffer nor pc absolute pointing inside the code buffer, so there is no need @@ -4831,7 +4834,7 @@ void Assembler::dd(uint32_t data) { // blocked before using dd. DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); - *reinterpret_cast(pc_) = data; + base::WriteUnalignedValue(reinterpret_cast
(pc_), data); pc_ += sizeof(uint32_t); } @@ -4840,7 +4843,7 @@ void Assembler::dq(uint64_t value) { // blocked before using dq. DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); - *reinterpret_cast(pc_) = value; + base::WriteUnalignedValue(reinterpret_cast
(pc_), value); pc_ += sizeof(uint64_t); } diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index f669943f34edd6..1d280e5555b8e1 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -41,6 +41,7 @@ #define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_ #include +#include #include #include "src/codegen/arm/constants-arm.h" @@ -305,9 +306,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { explicit Assembler(const AssemblerOptions&, std::unique_ptr = {}); - virtual ~Assembler(); + ~Assembler() override; - virtual void AbortedCodeGeneration() { pending_32_bit_constants_.clear(); } + void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); } // GetCode emits any pending (non-emitted) code and fills the descriptor desc. static constexpr int kNoHandlerTable = 0; diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 7f6d82518ec1dc..6f1adfead26d14 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -573,7 +573,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, Condition cond) { DCHECK_LT(lsb, 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { - int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); + int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u); and_(dst, src1, Operand(mask), LeaveCC, cond); if (lsb != 0) { mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); @@ -1602,57 +1602,43 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - - ExternalReference debug_hook_active = - ExternalReference::debug_hook_on_function_call_address(isolate()); - Move(r4, debug_hook_active); - ldrsb(r4, MemOperand(r4)); - cmp(r4, Operand(0)); - b(eq, &skip_hook); - - { - // Load receiver to pass it later to DebugOnFunctionCall hook. - if (actual.is_reg()) { - mov(r4, actual.reg()); - } else { - mov(r4, Operand(actual.immediate())); - } - ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2)); - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - if (expected.is_reg()) { - SmiTag(expected.reg()); - Push(expected.reg()); - } - if (actual.is_reg()) { - SmiTag(actual.reg()); - Push(actual.reg()); - } - if (new_target.is_valid()) { - Push(new_target); - } - Push(fun); - Push(fun); - Push(r4); - CallRuntime(Runtime::kDebugOnFunctionCall); - Pop(fun); - if (new_target.is_valid()) { - Pop(new_target); - } - if (actual.is_reg()) { - Pop(actual.reg()); - SmiUntag(actual.reg()); - } - if (expected.is_reg()) { - Pop(expected.reg()); - SmiUntag(expected.reg()); - } +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + // Load receiver to pass it later to DebugOnFunctionCall hook. + if (actual.is_reg()) { + ldr(r4, MemOperand(sp, actual.reg(), LSL, kPointerSizeLog2)); + } else { + ldr(r4, MemOperand(sp, actual.immediate() << kPointerSizeLog2)); + } + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); + if (expected.is_reg()) { + SmiTag(expected.reg()); + Push(expected.reg()); + } + if (actual.is_reg()) { + SmiTag(actual.reg()); + Push(actual.reg()); + } + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Push(r4); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + if (actual.is_reg()) { + Pop(actual.reg()); + SmiUntag(actual.reg()); + } + if (expected.is_reg()) { + Pop(expected.reg()); + SmiUntag(expected.reg()); } - bind(&skip_hook); } void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, @@ -1665,7 +1651,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(new_target.is_valid(), new_target == r3); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + ExternalReference debug_hook_active = + ExternalReference::debug_hook_on_function_call_address(isolate()); + Move(r4, debug_hook_active); + ldrsb(r4, MemOperand(r4)); + cmp(r4, Operand(0)); + b(ne, &debug_hook); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -1687,11 +1682,17 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK(flag == JUMP_FUNCTION); JumpCodeObject(code); } - - // Continue here if InvokePrologue does handle the invocation due to - // mismatched parameter counts. - bind(&done); } + b(&done); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + b(&continue_after_hook); + + // Continue here if InvokePrologue does handle the invocation due to + // mismatched parameter counts. + bind(&done); } void MacroAssembler::InvokeFunction(Register fun, Register new_target, diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index bbea40b9a628cc..4807a6d20da3f9 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -633,10 +633,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); - // On function call, call into the debugger if necessary. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + // On function call, call into the debugger. + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index baae106c1c6ad8..ce34da7dc2acf6 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -54,14 +54,12 @@ inline bool CPURegister::IsSP() const { } inline void CPURegList::Combine(const CPURegList& other) { - DCHECK(IsValid()); DCHECK(other.type() == type_); DCHECK(other.RegisterSizeInBits() == size_); list_ |= other.list(); } inline void CPURegList::Remove(const CPURegList& other) { - DCHECK(IsValid()); if (other.type() == type_) { list_ &= ~other.list(); } @@ -84,13 +82,12 @@ inline void CPURegList::Remove(const CPURegister& other1, } inline void CPURegList::Combine(int code) { - DCHECK(IsValid()); DCHECK(CPURegister::Create(code, size_, type_).IsValid()); list_ |= (1ULL << code); + DCHECK(IsValid()); } inline void CPURegList::Remove(int code) { - DCHECK(IsValid()); DCHECK(CPURegister::Create(code, size_, type_).IsValid()); list_ &= ~(1ULL << code); } @@ -311,6 +308,18 @@ Operand Operand::ToExtendedRegister() const { return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); } +Operand Operand::ToW() const { + if (IsShiftedRegister()) { + DCHECK(reg_.Is64Bits()); + return Operand(reg_.W(), shift(), shift_amount()); + } else if (IsExtendedRegister()) { + DCHECK(reg_.Is64Bits()); + return Operand(reg_.W(), extend(), shift_amount()); + } + DCHECK(IsImmediate()); + return *this; +} + Immediate Operand::immediate_for_heap_object_request() const { DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber && immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) || @@ -711,7 +720,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); } - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index c798d3a8a03ed9..ea2f4696bdbca1 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -63,18 +63,16 @@ void CpuFeatures::PrintFeatures() {} // CPURegList utilities. CPURegister CPURegList::PopLowestIndex() { - DCHECK(IsValid()); if (IsEmpty()) { return NoCPUReg; } - int index = CountTrailingZeros(list_, kRegListSizeInBits); + int index = base::bits::CountTrailingZeros(list_); DCHECK((1LL << index) & list_); Remove(index); return CPURegister::Create(index, size_, type_); } CPURegister CPURegList::PopHighestIndex() { - DCHECK(IsValid()); if (IsEmpty()) { return NoCPUReg; } @@ -369,8 +367,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Address pc = reinterpret_cast
(buffer_start_) + request.offset(); switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - Handle object = isolate->factory()->NewHeapNumber( - request.heap_number(), AllocationType::kOld); + Handle object = + isolate->factory()->NewHeapNumber( + request.heap_number()); EmbeddedObjectIndex index = AddEmbeddedObject(object); set_embedded_object_index_referenced_from(pc, index); break; @@ -3967,19 +3966,24 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr, bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); } bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) { - bool offset_is_size_multiple = (((offset >> size) << size) == offset); + bool offset_is_size_multiple = + (static_cast(static_cast(offset >> size) << size) == + offset); return offset_is_size_multiple && is_uint12(offset >> size); } bool Assembler::IsImmLSPair(int64_t offset, unsigned size) { - bool offset_is_size_multiple = (((offset >> size) << size) == offset); + bool offset_is_size_multiple = + (static_cast(static_cast(offset >> size) << size) == + offset); return offset_is_size_multiple && is_int7(offset >> size); } bool Assembler::IsImmLLiteral(int64_t offset) { int inst_size = static_cast(kInstrSizeLog2); bool offset_is_inst_multiple = - (((offset >> inst_size) << inst_size) == offset); + (static_cast(static_cast(offset >> inst_size) + << inst_size) == offset); DCHECK_GT(offset, 0); offset >>= kLoadLiteralScaleLog2; return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width); @@ -4178,9 +4182,9 @@ bool Assembler::IsImmLogical(uint64_t value, unsigned width, unsigned* n, // 1110ss 4 UInt(ss) // 11110s 2 UInt(s) // - // So we 'or' (-d << 1) with our computed s to form imms. + // So we 'or' (-d * 2) with our computed s to form imms. *n = out_n; - *imm_s = ((-d << 1) | (s - 1)) & 0x3F; + *imm_s = ((-d * 2) | (s - 1)) & 0x3F; *imm_r = r; return true; diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index 04ee6d8b750e05..23e8acb1f95c91 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "src/base/optional.h" @@ -105,6 +106,9 @@ class Operand { // which helps in the encoding of instructions that use the stack pointer. inline Operand ToExtendedRegister() const; + // Returns new Operand adapted for using with W registers. + inline Operand ToW() const; + inline Immediate immediate() const; inline int64_t ImmediateValue() const; inline RelocInfo::Mode ImmediateRMode() const; @@ -189,9 +193,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { explicit Assembler(const AssemblerOptions&, std::unique_ptr = {}); - virtual ~Assembler(); + ~Assembler() override; - virtual void AbortedCodeGeneration(); + void AbortedCodeGeneration() override; // System functions --------------------------------------------------------- // Start generating code from the beginning of the buffer, discarding any code @@ -375,7 +379,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Instruction set functions ------------------------------------------------ // Branch / Jump instructions. - // For branches offsets are scaled, i.e. they in instrcutions not in bytes. + // For branches offsets are scaled, i.e. in instructions not in bytes. // Branch to register. void br(const Register& xn); diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h index 914268644a6d54..ccafae5e14f466 100644 --- a/deps/v8/src/codegen/arm64/constants-arm64.h +++ b/deps/v8/src/codegen/arm64/constants-arm64.h @@ -33,6 +33,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128; constexpr uint8_t kInstrSize = 4; constexpr uint8_t kInstrSizeLog2 = 2; constexpr uint8_t kLoadLiteralScaleLog2 = 2; +constexpr uint8_t kLoadLiteralScale = 1 << kLoadLiteralScaleLog2; constexpr int kMaxLoadLiteralRange = 1 * MB; const int kNumberOfRegisters = 32; @@ -146,7 +147,8 @@ const unsigned kFloat16ExponentBias = 15; // Actual value of root register is offset from the root array's start // to take advantage of negative displacement values. // TODO(sigurds): Choose best value. -constexpr int kRootRegisterBias = 256; +// TODO(ishell): Choose best value for ptr-compr. +constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 256 : 0; using float16 = uint16_t; diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc index 05f3654da99d34..ab022affdd11b2 100644 --- a/deps/v8/src/codegen/arm64/instructions-arm64.cc +++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc @@ -71,6 +71,7 @@ static uint64_t RotateRight(uint64_t value, unsigned int rotate, unsigned int width) { DCHECK_LE(width, 64); rotate &= 63; + if (rotate == 0) return value; return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) | (value >> rotate); } @@ -191,16 +192,16 @@ int64_t Instruction::ImmPCOffset() { } else if (BranchType() != UnknownBranchType) { // All PC-relative branches. // Relative branch offsets are instruction-size-aligned. - offset = ImmBranch() << kInstrSizeLog2; + offset = ImmBranch() * kInstrSize; } else if (IsUnresolvedInternalReference()) { // Internal references are always word-aligned. - offset = ImmUnresolvedInternalReference() << kInstrSizeLog2; + offset = ImmUnresolvedInternalReference() * kInstrSize; } else { // Load literal (offset from PC). DCHECK(IsLdrLiteral()); // The offset is always shifted by 2 bits, even for loads to 64-bits // registers. - offset = ImmLLiteral() << kInstrSizeLog2; + offset = ImmLLiteral() * kInstrSize; } return offset; } diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h index 1132ba39db2d8d..7fe732e2baae1a 100644 --- a/deps/v8/src/codegen/arm64/instructions-arm64.h +++ b/deps/v8/src/codegen/arm64/instructions-arm64.h @@ -5,6 +5,7 @@ #ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_ #define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_ +#include "src/base/memory.h" #include "src/codegen/arm64/constants-arm64.h" #include "src/codegen/arm64/register-arm64.h" #include "src/codegen/arm64/utils-arm64.h" @@ -82,11 +83,13 @@ enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister }; class Instruction { public: V8_INLINE Instr InstructionBits() const { - return *reinterpret_cast(this); + // Usually this is aligned, but when de/serializing that's not guaranteed. + return base::ReadUnalignedValue(reinterpret_cast
(this)); } V8_INLINE void SetInstructionBits(Instr new_instr) { - *reinterpret_cast(this) = new_instr; + // Usually this is aligned, but when de/serializing that's not guaranteed. + base::WriteUnalignedValue(reinterpret_cast
(this), new_instr); } int Bit(int pos) const { return (InstructionBits() >> pos) & 1; } @@ -96,7 +99,9 @@ class Instruction { } int32_t SignedBits(int msb, int lsb) const { - int32_t bits = *(reinterpret_cast(this)); + // Usually this is aligned, but when de/serializing that's not guaranteed. + int32_t bits = + base::ReadUnalignedValue(reinterpret_cast
(this)); return signed_bitextract_32(msb, lsb, bits); } @@ -125,7 +130,8 @@ class Instruction { // formed from ImmPCRelLo and ImmPCRelHi. int ImmPCRel() const { DCHECK(IsPCRelAddressing()); - int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); + int offset = (static_cast(ImmPCRelHi()) << ImmPCRelLo_width) | + ImmPCRelLo(); int width = ImmPCRelLo_width + ImmPCRelHi_width; return signed_bitextract_32(width - 1, 0, offset); } @@ -404,7 +410,7 @@ class Instruction { void SetImmLLiteral(Instruction* source); uintptr_t LiteralAddress() { - int offset = ImmLLiteral() << kLoadLiteralScaleLog2; + int offset = ImmLLiteral() * kLoadLiteralScale; return reinterpret_cast(this) + offset; } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index 62bd9c26bfb36d..261fd1e564a62b 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -93,6 +93,15 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand, } } +void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond) { + if (COMPRESS_POINTERS_BOOL) { + Ccmp(rn.W(), operand.ToW(), nzcv, cond); + } else { + Ccmp(rn, operand, nzcv, cond); + } +} + void MacroAssembler::Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); @@ -157,6 +166,14 @@ void TurboAssembler::Cmp(const Register& rn, const Operand& operand) { Subs(AppropriateZeroRegFor(rn), rn, operand); } +void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) { + if (COMPRESS_POINTERS_BOOL) { + Cmp(rn.W(), operand.ToW()); + } else { + Cmp(rn, operand); + } +} + void TurboAssembler::Neg(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -982,7 +999,12 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { AssertSmi(src); } DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); - Asr(dst, src, kSmiShift); + if (COMPRESS_POINTERS_BOOL) { + Asr(dst.W(), src.W(), kSmiShift); + Sxtw(dst, dst); + } else { + Asr(dst, src, kSmiShift); + } } void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { @@ -1002,11 +1024,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } else { DCHECK(SmiValuesAre31Bits()); -#ifdef V8_COMPRESS_POINTERS - Ldrsw(dst, src); -#else - Ldr(dst, src); -#endif + if (COMPRESS_POINTERS_BOOL) { + Ldr(dst.W(), src); + } else { + Ldr(dst, src); + } SmiUntag(dst); } } @@ -1029,13 +1051,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, } void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { - Cmp(x, y); - B(eq, dest); + CompareAndBranch(x, y, eq, dest); } void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { - Cmp(x, y); - B(lt, dest); + CompareAndBranch(x, y, lt, dest); } void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { @@ -1083,7 +1103,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); - const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); + const int shift = base::bits::CountTrailingZeros(unit_size); const Operand size(count, LSL, shift); if (size.IsZero()) { @@ -1136,7 +1156,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); - const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); + const int shift = base::bits::CountTrailingZeros(unit_size); const Operand size(count, LSL, shift); if (size.IsZero()) { @@ -1175,7 +1195,7 @@ void TurboAssembler::DropSlots(int64_t count) { void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); } -void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, +void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, Condition cond, Label* label) { if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) && ((cond == eq) || (cond == ne))) { @@ -1190,6 +1210,16 @@ void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, } } +void TurboAssembler::CompareTaggedAndBranch(const Register& lhs, + const Operand& rhs, Condition cond, + Label* label) { + if (COMPRESS_POINTERS_BOOL) { + CompareAndBranch(lhs.W(), rhs.ToW(), cond, label); + } else { + CompareAndBranch(lhs, rhs, cond, label); + } +} + void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, const uint64_t bit_pattern, Label* label) { diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 0a721b06474987..892458fe8bb9ed 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -295,7 +295,9 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, } else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) { Handle x( reinterpret_cast(operand.ImmediateValue())); - IndirectLoadConstant(rd, x); + // TODO(v8:9706): Fix-it! This load will always uncompress the value + // even when we are loading a compressed embedded object. + IndirectLoadConstant(rd.X(), x); return; } } @@ -650,7 +652,14 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst, // The move was successful; nothing to do here. } else { // Pre-shift the immediate to the least-significant bits of the register. - int shift_low = CountTrailingZeros(imm, reg_size); + int shift_low; + if (reg_size == 64) { + shift_low = base::bits::CountTrailingZeros(imm); + } else { + DCHECK_EQ(reg_size, 32); + shift_low = base::bits::CountTrailingZeros(static_cast(imm)); + } + if (mode == kLimitShiftForSP) { // When applied to the stack pointer, the subsequent arithmetic operation // can use the extend form to shift left by a maximum of four bits. Right @@ -1456,15 +1465,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) { MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } -void MacroAssembler::LoadObject(Register result, Handle object) { - AllowDeferredHandleDereference heap_object_check; - if (object->IsHeapObject()) { - Mov(result, Handle::cast(object)); - } else { - Mov(result, Operand(Smi::cast(*object))); - } -} - void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); } void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, @@ -1923,21 +1923,25 @@ void TurboAssembler::Call(ExternalReference target) { } void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { - STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. -#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) - STATIC_ASSERT(kSmiShiftSize == 0); - Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift); -#else - STATIC_ASSERT(kSmiShiftSize == 31); - Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); -#endif - Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset()); - Ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); + if (SmiValuesAre32Bits()) { + Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); + Add(builtin_index, builtin_index, + IsolateData::builtin_entry_table_offset()); + Ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); + } else { + DCHECK(SmiValuesAre31Bits()); + if (COMPRESS_POINTERS_BOOL) { + Add(builtin_index, kRootRegister, + Operand(builtin_index.W(), SXTW, kSystemPointerSizeLog2 - kSmiShift)); + } else { + Add(builtin_index, kRootRegister, + Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiShift)); + } + Ldr(builtin_index, + MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); + } } void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { @@ -2207,43 +2211,34 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, Bind(®ular_invoke); } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - - Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate())); - Ldrsb(x4, MemOperand(x4)); - Cbz(x4, &skip_hook); - - { - // Load receiver to pass it later to DebugOnFunctionCall hook. - Operand actual_op = actual.is_immediate() ? Operand(actual.immediate()) - : Operand(actual.reg()); - Mov(x4, actual_op); - Ldr(x4, MemOperand(sp, x4, LSL, kSystemPointerSizeLog2)); - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + // Load receiver to pass it later to DebugOnFunctionCall hook. + if (actual.is_reg()) { + Ldr(x4, MemOperand(sp, actual.reg(), LSL, kSystemPointerSizeLog2)); + } else { + Ldr(x4, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2)); + } + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - Register expected_reg = padreg; - Register actual_reg = padreg; - if (expected.is_reg()) expected_reg = expected.reg(); - if (actual.is_reg()) actual_reg = actual.reg(); - if (!new_target.is_valid()) new_target = padreg; + Register expected_reg = padreg; + Register actual_reg = padreg; + if (expected.is_reg()) expected_reg = expected.reg(); + if (actual.is_reg()) actual_reg = actual.reg(); + if (!new_target.is_valid()) new_target = padreg; - // Save values on stack. - SmiTag(expected_reg); - SmiTag(actual_reg); - Push(expected_reg, actual_reg, new_target, fun); - Push(fun, x4); - CallRuntime(Runtime::kDebugOnFunctionCall); + // Save values on stack. + SmiTag(expected_reg); + SmiTag(actual_reg); + Push(expected_reg, actual_reg, new_target, fun); + Push(fun, x4); + CallRuntime(Runtime::kDebugOnFunctionCall); - // Restore values from stack. - Pop(fun, new_target, actual_reg, expected_reg); - SmiUntag(actual_reg); - SmiUntag(expected_reg); - } - Bind(&skip_hook); + // Restore values from stack. + Pop(fun, new_target, actual_reg, expected_reg); + SmiUntag(actual_reg); + SmiUntag(expected_reg); } void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, @@ -2256,7 +2251,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3)); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate())); + Ldrsb(x4, MemOperand(x4)); + Cbnz(x4, &debug_hook); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -2284,6 +2285,12 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, JumpCodeObject(code); } } + B(&done); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + B(&continue_after_hook); // Continue here if InvokePrologue does handle the invocation due to // mismatched parameter counts. @@ -2636,7 +2643,7 @@ void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) { Register temp = temps.AcquireX(); DCHECK(!AreAliased(obj, temp)); LoadRoot(temp, index); - Cmp(obj, temp); + CmpTagged(obj, temp); } void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index, @@ -2669,20 +2676,20 @@ void MacroAssembler::JumpIfIsInRange(const Register& value, void TurboAssembler::LoadTaggedPointerField(const Register& destination, const MemOperand& field_operand) { -#ifdef V8_COMPRESS_POINTERS - DecompressTaggedPointer(destination, field_operand); -#else - Ldr(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressTaggedPointer(destination, field_operand); + } else { + Ldr(destination, field_operand); + } } void TurboAssembler::LoadAnyTaggedField(const Register& destination, const MemOperand& field_operand) { -#ifdef V8_COMPRESS_POINTERS - DecompressAnyTagged(destination, field_operand); -#else - Ldr(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressAnyTagged(destination, field_operand); + } else { + Ldr(destination, field_operand); + } } void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { @@ -2691,33 +2698,31 @@ void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { void TurboAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand) { -#ifdef V8_COMPRESS_POINTERS - RecordComment("[ StoreTagged"); - Str(value.W(), dst_field_operand); - RecordComment("]"); -#else - Str(value, dst_field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + Str(value.W(), dst_field_operand); + } else { + Str(value, dst_field_operand); + } } void TurboAssembler::DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressTaggedSigned"); - Ldrsw(destination, field_operand); + Ldr(destination.W(), field_operand); RecordComment("]"); } void TurboAssembler::DecompressTaggedSigned(const Register& destination, const Register& source) { RecordComment("[ DecompressTaggedSigned"); - Sxtw(destination, source); + Mov(destination.W(), source.W()); RecordComment("]"); } void TurboAssembler::DecompressTaggedPointer(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressTaggedPointer"); - Ldrsw(destination, field_operand); + Ldr(destination.W(), field_operand); Add(destination, kRootRegister, destination); RecordComment("]"); } @@ -2725,57 +2730,22 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination, void TurboAssembler::DecompressTaggedPointer(const Register& destination, const Register& source) { RecordComment("[ DecompressTaggedPointer"); - Add(destination, kRootRegister, Operand(source, SXTW)); + Add(destination, kRootRegister, Operand(source, UXTW)); RecordComment("]"); } void TurboAssembler::DecompressAnyTagged(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressAnyTagged"); - Ldrsw(destination, field_operand); - if (kUseBranchlessPtrDecompressionInGeneratedCode) { - UseScratchRegisterScope temps(this); - // Branchlessly compute |masked_root|: - // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; - STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); - Register masked_root = temps.AcquireX(); - // Sign extend tag bit to entire register. - Sbfx(masked_root, destination, 0, kSmiTagSize); - And(masked_root, masked_root, kRootRegister); - // Now this add operation will either leave the value unchanged if it is a - // smi or add the isolate root if it is a heap object. - Add(destination, masked_root, destination); - } else { - Label done; - JumpIfSmi(destination, &done); - Add(destination, kRootRegister, destination); - bind(&done); - } + Ldr(destination.W(), field_operand); + Add(destination, kRootRegister, destination); RecordComment("]"); } void TurboAssembler::DecompressAnyTagged(const Register& destination, const Register& source) { RecordComment("[ DecompressAnyTagged"); - if (kUseBranchlessPtrDecompressionInGeneratedCode) { - UseScratchRegisterScope temps(this); - // Branchlessly compute |masked_root|: - // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; - STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); - Register masked_root = temps.AcquireX(); - // Sign extend tag bit to entire register. - Sbfx(masked_root, source, 0, kSmiTagSize); - And(masked_root, masked_root, kRootRegister); - // Now this add operation will either leave the value unchanged if it is a - // smi or add the isolate root if it is a heap object. - Add(destination, masked_root, Operand(source, SXTW)); - } else { - Label done; - Sxtw(destination, source); - JumpIfSmi(destination, &done); - Add(destination, kRootRegister, destination); - bind(&done); - } + Add(destination, kRootRegister, Operand(source, UXTW)); RecordComment("]"); } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 94091e862489c5..cb3b51eb527f9f 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -652,6 +652,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { const Operand& operand); inline void Blr(const Register& xn); inline void Cmp(const Register& rn, const Operand& operand); + inline void CmpTagged(const Register& rn, const Operand& operand); inline void Subs(const Register& rd, const Register& rn, const Operand& operand); void Csel(const Register& rd, const Register& rn, const Operand& operand, @@ -843,6 +844,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CheckPageFlag(const Register& object, int mask, Condition cc, Label* condition_met); + // Compare a register with an operand, and branch to label depending on the + // condition. May corrupt the status flags. + inline void CompareAndBranch(const Register& lhs, const Operand& rhs, + Condition cond, Label* label); + inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs, + Condition cond, Label* label); + // Test the bits of register defined by bit_pattern, and branch if ANY of // those bits are set. May corrupt the status flags. inline void TestAndBranchIfAnySet(const Register& reg, @@ -1006,6 +1014,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Conditional macros. inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond); + inline void CcmpTagged(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond); inline void Clz(const Register& rd, const Register& rn); @@ -1597,8 +1607,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { tbx(vd, vn, vn2, vn3, vn4, vm); } - void LoadObject(Register result, Handle object); - inline void PushSizeRegList( RegList registers, unsigned reg_size, CPURegister::RegisterType type = CPURegister::kRegister) { @@ -1643,11 +1651,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // be aligned to 16 bytes. void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset); - // Compare a register with an operand, and branch to label depending on the - // condition. May corrupt the status flags. - inline void CompareAndBranch(const Register& lhs, const Operand& rhs, - Condition cond, Label* label); - // Insert one or more instructions into the instruction stream that encode // some caller-defined data. The instructions used will be executable with no // side effects. @@ -1767,10 +1770,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& actual, Label* done, InvokeFlag flag, bool* definitely_mismatches); - // On function call, call into the debugger if necessary. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + // On function call, call into the debugger. + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); void InvokeFunctionCode(Register function, Register new_target, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index a782bf9cd8dc7f..bc088a3308772f 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -105,7 +105,7 @@ class CPURegister : public RegisterBase { enum RegisterType { kRegister, kVRegister, kNoRegister }; static constexpr CPURegister no_reg() { - return CPURegister{0, 0, kNoRegister}; + return CPURegister{kCode_no_reg, 0, kNoRegister}; } template @@ -597,18 +597,16 @@ class V8_EXPORT_PRIVATE CPURegList { } CPURegister::RegisterType type() const { - DCHECK(IsValid()); return type_; } RegList list() const { - DCHECK(IsValid()); return list_; } inline void set_list(RegList new_list) { - DCHECK(IsValid()); list_ = new_list; + DCHECK(IsValid()); } // Combine another CPURegList into this one. Registers that already exist in @@ -656,7 +654,6 @@ class V8_EXPORT_PRIVATE CPURegList { static CPURegList GetSafepointSavedRegisters(); bool IsEmpty() const { - DCHECK(IsValid()); return list_ == 0; } @@ -664,7 +661,6 @@ class V8_EXPORT_PRIVATE CPURegList { const CPURegister& other2 = NoCPUReg, const CPURegister& other3 = NoCPUReg, const CPURegister& other4 = NoCPUReg) const { - DCHECK(IsValid()); RegList list = 0; if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit(); if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit(); @@ -674,12 +670,10 @@ class V8_EXPORT_PRIVATE CPURegList { } int Count() const { - DCHECK(IsValid()); return CountSetBits(list_, kRegListSizeInBits); } int RegisterSizeInBits() const { - DCHECK(IsValid()); return size_; } @@ -690,7 +684,6 @@ class V8_EXPORT_PRIVATE CPURegList { } int TotalSizeInBytes() const { - DCHECK(IsValid()); return RegisterSizeInBytes() * Count(); } diff --git a/deps/v8/src/codegen/arm64/utils-arm64.cc b/deps/v8/src/codegen/arm64/utils-arm64.cc index 2f972ce5027036..dba2eeb7e1032e 100644 --- a/deps/v8/src/codegen/arm64/utils-arm64.cc +++ b/deps/v8/src/codegen/arm64/utils-arm64.cc @@ -89,15 +89,6 @@ int CountLeadingSignBits(int64_t value, int width) { } } -int CountTrailingZeros(uint64_t value, int width) { - DCHECK((width == 32) || (width == 64)); - if (width == 64) { - return static_cast(base::bits::CountTrailingZeros64(value)); - } - return static_cast(base::bits::CountTrailingZeros32( - static_cast(value & 0xFFFFFFFFF))); -} - int CountSetBits(uint64_t value, int width) { DCHECK((width == 32) || (width == 64)); if (width == 64) { @@ -109,7 +100,7 @@ int CountSetBits(uint64_t value, int width) { int LowestSetBitPosition(uint64_t value) { DCHECK_NE(value, 0U); - return CountTrailingZeros(value, 64) + 1; + return base::bits::CountTrailingZeros(value) + 1; } int HighestSetBitPosition(uint64_t value) { @@ -118,12 +109,14 @@ int HighestSetBitPosition(uint64_t value) { } uint64_t LargestPowerOf2Divisor(uint64_t value) { - return value & (-(int64_t)value); + // Simulate two's complement (instead of casting to signed and negating) to + // avoid undefined behavior on signed overflow. + return value & ((~value) + 1); } int MaskToBit(uint64_t mask) { DCHECK_EQ(CountSetBits(mask, 64), 1); - return CountTrailingZeros(mask, 64); + return base::bits::CountTrailingZeros(mask); } #undef __ diff --git a/deps/v8/src/codegen/arm64/utils-arm64.h b/deps/v8/src/codegen/arm64/utils-arm64.h index 6bddce6fff2ed8..182d781d55da3d 100644 --- a/deps/v8/src/codegen/arm64/utils-arm64.h +++ b/deps/v8/src/codegen/arm64/utils-arm64.h @@ -33,7 +33,6 @@ int float16classify(float16 value); // Bit counting. int CountLeadingZeros(uint64_t value, int width); int CountLeadingSignBits(int64_t value, int width); -V8_EXPORT_PRIVATE int CountTrailingZeros(uint64_t value, int width); V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width); int LowestSetBitPosition(uint64_t value); int HighestSetBitPosition(uint64_t value); @@ -61,7 +60,7 @@ T ReverseBytes(T value, int block_bytes_log2) { static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1}, {4, 5, 6, 7, 0, 1, 2, 3}, {0, 1, 2, 3, 4, 5, 6, 7}}; - T result = 0; + typename std::make_unsigned::type result = 0; for (int i = 0; i < 8; i++) { result <<= 8; result |= bytes[permute_table[block_bytes_log2 - 1][i]]; diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index 498afb03206432..4e354d9e54b29d 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -92,7 +92,7 @@ class DefaultAssemblerBuffer : public AssemblerBuffer { std::unique_ptr Grow(int new_size) override { DCHECK_LT(size(), new_size); - return base::make_unique(new_size); + return std::make_unique(new_size); } private: @@ -121,12 +121,12 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer { std::unique_ptr ExternalAssemblerBuffer(void* start, int size) { - return base::make_unique( + return std::make_unique( reinterpret_cast(start), size); } std::unique_ptr NewAssemblerBuffer(int size) { - return base::make_unique(size); + return std::make_unique(size); } // ----------------------------------------------------------------------------- diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index 98639583d8119f..af70c4a48fb279 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -36,6 +36,7 @@ #define V8_CODEGEN_ASSEMBLER_H_ #include +#include #include #include "src/base/memory.h" diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 7dad8cb95e00a2..3051ce3662c8ce 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -7,9 +7,11 @@ #include "include/v8-internal.h" #include "src/base/macros.h" #include "src/codegen/code-factory.h" +#include "src/codegen/tnode.h" #include "src/common/globals.h" #include "src/execution/frames-inl.h" #include "src/execution/frames.h" +#include "src/execution/protectors.h" #include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop. #include "src/logging/counters.h" #include "src/objects/api-callbacks.h" @@ -17,6 +19,7 @@ #include "src/objects/descriptor-array.h" #include "src/objects/function-kind.h" #include "src/objects/heap-number.h" +#include "src/objects/js-generator.h" #include "src/objects/oddball.h" #include "src/objects/ordered-hash-table-inl.h" #include "src/objects/property-cell.h" @@ -26,10 +29,6 @@ namespace v8 { namespace internal { using compiler::Node; -template -using TNode = compiler::TNode; -template -using SloppyTNode = compiler::SloppyTNode; CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) : compiler::CodeAssembler(state), @@ -135,6 +134,148 @@ void CodeStubAssembler::Check(SloppyTNode condition_node, Check(branch, message, file, line, extra_nodes); } +template <> +TNode CodeStubAssembler::IntPtrToParameter(TNode value) { + return SmiTag(value); +} +template <> +TNode CodeStubAssembler::IntPtrToParameter( + TNode value) { + return value; +} + +void CodeStubAssembler::CollectCallableFeedback( + TNode maybe_target, TNode context, + TNode feedback_vector, TNode slot_id) { + Label extra_checks(this, Label::kDeferred), done(this); + + // Check if we have monomorphic {target} feedback already. + TNode feedback = + LoadFeedbackVectorSlot(feedback_vector, slot_id); + Comment("check if monomorphic"); + TNode is_monomorphic = IsWeakReferenceToObject(feedback, maybe_target); + GotoIf(is_monomorphic, &done); + + // Check if it is a megamorphic {target}. + Comment("check if megamorphic"); + TNode is_megamorphic = TaggedEqual( + feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate()))); + Branch(is_megamorphic, &done, &extra_checks); + + BIND(&extra_checks); + { + Label initialize(this), mark_megamorphic(this); + + Comment("check if weak reference"); + TNode is_uninitialized = TaggedEqual( + feedback, + HeapConstant(FeedbackVector::UninitializedSentinel(isolate()))); + GotoIf(is_uninitialized, &initialize); + CSA_ASSERT(this, IsWeakOrCleared(feedback)); + + // If the weak reference is cleared, we have a new chance to become + // monomorphic. + Comment("check if weak reference is cleared"); + Branch(IsCleared(feedback), &initialize, &mark_megamorphic); + + BIND(&initialize); + { + Comment("check if function in same native context"); + GotoIf(TaggedIsSmi(maybe_target), &mark_megamorphic); + TNode target = CAST(maybe_target); + // Check if the {target} is a JSFunction or JSBoundFunction + // in the current native context. + TVARIABLE(HeapObject, var_current, target); + Label loop(this, &var_current), done_loop(this); + Goto(&loop); + BIND(&loop); + { + Label if_boundfunction(this), if_function(this); + TNode current = var_current.value(); + TNode current_instance_type = LoadInstanceType(current); + GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE), + &if_boundfunction); + Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE), + &if_function, &mark_megamorphic); + + BIND(&if_function); + { + // Check that the JSFunction {current} is in the current native + // context. + TNode current_context = + CAST(LoadObjectField(current, JSFunction::kContextOffset)); + TNode current_native_context = + LoadNativeContext(current_context); + Branch( + TaggedEqual(LoadNativeContext(context), current_native_context), + &done_loop, &mark_megamorphic); + } + BIND(&if_boundfunction); + { + // Continue with the [[BoundTargetFunction]] of {target}. + var_current = LoadObjectField( + current, JSBoundFunction::kBoundTargetFunctionOffset); + Goto(&loop); + } + } + BIND(&done_loop); + StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id, target); + ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize"); + Goto(&done); + } + + BIND(&mark_megamorphic); + { + // MegamorphicSentinel is an immortal immovable object so + // write-barrier is not needed. + Comment("transition to megamorphic"); + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol)); + StoreFeedbackVectorSlot( + feedback_vector, slot_id, + HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())), + SKIP_WRITE_BARRIER); + ReportFeedbackUpdate(feedback_vector, slot_id, + "Call:TransitionMegamorphic"); + Goto(&done); + } + } + + BIND(&done); +} + +void CodeStubAssembler::CollectCallFeedback( + TNode maybe_target, TNode context, + TNode maybe_feedback_vector, TNode slot_id) { + Label feedback_done(this); + // If feedback_vector is not valid, then nothing to do. + GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done); + + // Increment the call count. + TNode feedback_vector = CAST(maybe_feedback_vector); + IncrementCallCount(feedback_vector, slot_id); + + // Collect the callable {target} feedback. + CollectCallableFeedback(maybe_target, context, feedback_vector, slot_id); + Goto(&feedback_done); + + BIND(&feedback_done); +} + +void CodeStubAssembler::IncrementCallCount( + TNode feedback_vector, TNode slot_id) { + Comment("increment call count"); + TNode call_count = + CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize)); + // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call + // count are used as flags. To increment the call count by 1 we hence + // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}. + TNode new_count = SmiAdd( + call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift)); + // Count is Smi, so we don't need a write barrier. + StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count, + SKIP_WRITE_BARRIER, kTaggedSize); +} + void CodeStubAssembler::FastCheck(TNode condition) { Label ok(this), not_ok(this, Label::kDeferred); Branch(condition, &ok, ¬_ok); @@ -221,7 +362,7 @@ TNode CodeStubAssembler::NoContextConstant() { } #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - compiler::TNode().rootAccessorName())>::type>::type> \ CodeStubAssembler::name##Constant() { \ return UncheckedCast().rootAccessorName())>::type>::type> \ CodeStubAssembler::name##Constant() { \ return UncheckedCast CodeStubAssembler::Is##name( \ - SloppyTNode value) { \ - return TaggedEqual(value, name##Constant()); \ - } \ - compiler::TNode CodeStubAssembler::IsNot##name( \ - SloppyTNode value) { \ - return TaggedNotEqual(value, name##Constant()); \ +#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ + TNode CodeStubAssembler::Is##name(SloppyTNode value) { \ + return TaggedEqual(value, name##Constant()); \ + } \ + TNode CodeStubAssembler::IsNot##name(SloppyTNode value) { \ + return TaggedNotEqual(value, name##Constant()); \ } HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) #undef HEAP_CONSTANT_TEST @@ -264,6 +403,21 @@ TNode CodeStubAssembler::BIntConstant(int value) { #endif } +template <> +TNode CodeStubAssembler::IntPtrOrSmiConstant(int value) { + return SmiConstant(value); +} + +template <> +TNode CodeStubAssembler::IntPtrOrSmiConstant(int value) { + return IntPtrConstant(value); +} + +template <> +TNode CodeStubAssembler::IntPtrOrSmiConstant(int value) { + return ReinterpretCast(IntPtrConstant(value)); +} + Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { if (mode == SMI_PARAMETERS) { return SmiConstant(value); @@ -273,41 +427,29 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { } } -TNode CodeStubAssembler::IntPtrOrSmiEqual(Node* left, Node* right, - ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiEqual(CAST(left), CAST(right)); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return IntPtrEqual(UncheckedCast(left), - UncheckedCast(right)); +bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode test) { + Smi smi_test; + if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) { + return true; } + return false; } -TNode CodeStubAssembler::IntPtrOrSmiNotEqual(Node* left, Node* right, - ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiNotEqual(CAST(left), CAST(right)); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return WordNotEqual(UncheckedCast(left), - UncheckedCast(right)); +bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode test) { + int32_t constant_test; + if (ToInt32Constant(test, &constant_test) && constant_test == 0) { + return true; } + return false; } bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode) { - int32_t constant_test; - Smi smi_test; if (mode == INTPTR_PARAMETERS) { - if (ToInt32Constant(test, &constant_test) && constant_test == 0) { - return true; - } + return IsIntPtrOrSmiConstantZero(UncheckedCast(test)); } else { DCHECK_EQ(mode, SMI_PARAMETERS); - if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) { - return true; - } + return IsIntPtrOrSmiConstantZero(UncheckedCast(test)); } return false; } @@ -352,6 +494,10 @@ Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) { } TNode CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode value) { + intptr_t constant; + if (ToIntPtrConstant(value, &constant)) { + return BoolConstant(base::bits::IsPowerOfTwo(constant)); + } // value && !(value & (value - 1)) return IntPtrEqual( Select( @@ -578,21 +724,44 @@ TNode CodeStubAssembler::Float64Trunc(SloppyTNode x) { TNode CodeStubAssembler::IsValidSmi(TNode smi) { if (SmiValuesAre32Bits() && kSystemPointerSize == kInt64Size) { // Check that the Smi value is zero in the lower bits. - TNode value = BitcastTaggedSignedToWord(smi); + TNode value = BitcastTaggedToWordForTagAndSmiBits(smi); return Word32Equal(Int32Constant(0), TruncateIntPtrToInt32(value)); } return Int32TrueConstant(); } -Node* CodeStubAssembler::SmiShiftBitsConstant() { - return IntPtrConstant(kSmiShiftSize + kSmiTagSize); +TNode CodeStubAssembler::IsValidSmiIndex(TNode smi) { + if (COMPRESS_POINTERS_BOOL) { + return WordEqual( + BitcastTaggedToWordForTagAndSmiBits(smi), + BitcastTaggedToWordForTagAndSmiBits(NormalizeSmiIndex(smi))); + } + return Int32TrueConstant(); +} + +TNode CodeStubAssembler::NormalizeSmiIndex(TNode smi_index) { + if (COMPRESS_POINTERS_BOOL) { + TNode raw = + TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(smi_index)); + smi_index = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw)); + } + return smi_index; } TNode CodeStubAssembler::SmiFromInt32(SloppyTNode value) { - TNode value_intptr = ChangeInt32ToIntPtr(value); - TNode smi = - BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant())); - return smi; + if (COMPRESS_POINTERS_BOOL) { + static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1), + "Use shifting instead of add"); + return BitcastWordToTaggedSigned( + ChangeUint32ToWord(Int32Add(value, value))); + } + return SmiTag(ChangeInt32ToIntPtr(value)); +} + +TNode CodeStubAssembler::SmiFromUint32(TNode value) { + CSA_ASSERT(this, IntPtrLessThan(ChangeUint32ToWord(value), + IntPtrConstant(Smi::kMaxValue))); + return SmiFromInt32(Signed(value)); } TNode CodeStubAssembler::IsValidPositiveSmi(TNode value) { @@ -612,6 +781,9 @@ TNode CodeStubAssembler::SmiTag(SloppyTNode value) { if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) { return SmiConstant(constant_value); } + if (COMPRESS_POINTERS_BOOL) { + return SmiFromInt32(TruncateIntPtrToInt32(value)); + } TNode smi = BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); return smi; @@ -622,11 +794,19 @@ TNode CodeStubAssembler::SmiUntag(SloppyTNode value) { if (ToIntPtrConstant(value, &constant_value)) { return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); } - return Signed( - WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant())); + if (COMPRESS_POINTERS_BOOL) { + return ChangeInt32ToIntPtr(SmiToInt32(value)); + } + return Signed(WordSar(BitcastTaggedToWordForTagAndSmiBits(value), + SmiShiftBitsConstant())); } TNode CodeStubAssembler::SmiToInt32(SloppyTNode value) { + if (COMPRESS_POINTERS_BOOL) { + return Signed(Word32Sar( + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)), + SmiShiftBitsConstant32())); + } TNode result = SmiUntag(value); return TruncateIntPtrToInt32(result); } @@ -673,13 +853,13 @@ TNode CodeStubAssembler::TrySmiAdd(TNode lhs, TNode rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { return BitcastWordToTaggedSigned( - TryIntPtrAdd(BitcastTaggedSignedToWord(lhs), - BitcastTaggedSignedToWord(rhs), if_overflow)); + TryIntPtrAdd(BitcastTaggedToWordForTagAndSmiBits(lhs), + BitcastTaggedToWordForTagAndSmiBits(rhs), if_overflow)); } else { DCHECK(SmiValuesAre31Bits()); TNode> pair = Int32AddWithOverflow( - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); @@ -690,8 +870,9 @@ TNode CodeStubAssembler::TrySmiAdd(TNode lhs, TNode rhs, TNode CodeStubAssembler::TrySmiSub(TNode lhs, TNode rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { - TNode> pair = IntPtrSubWithOverflow( - BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs)); + TNode> pair = + IntPtrSubWithOverflow(BitcastTaggedToWordForTagAndSmiBits(lhs), + BitcastTaggedToWordForTagAndSmiBits(rhs)); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); @@ -699,8 +880,8 @@ TNode CodeStubAssembler::TrySmiSub(TNode lhs, TNode rhs, } else { DCHECK(SmiValuesAre31Bits()); TNode> pair = Int32SubWithOverflow( - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); @@ -878,7 +1059,7 @@ TNode CodeStubAssembler::SmiMul(TNode a, TNode b) { } BIND(&answer_zero); { - TNode or_result = Word32Or(lhs32, rhs32); + TNode or_result = Word32Or(lhs32, rhs32); Label if_should_be_negative_zero(this), if_should_be_zero(this); Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero, &if_should_be_zero); @@ -982,41 +1163,27 @@ TNode CodeStubAssembler::TruncateIntPtrToInt32( return ReinterpretCast(value); } -TNode CodeStubAssembler::TaggedIsSmi(SloppyTNode a) { - STATIC_ASSERT(kSmiTagMask < kMaxUInt32); - return Word32Equal(Word32And(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), - Int32Constant(kSmiTagMask)), - Int32Constant(0)); -} - TNode CodeStubAssembler::TaggedIsSmi(TNode a) { STATIC_ASSERT(kSmiTagMask < kMaxUInt32); return Word32Equal( - Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(a)), + Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), Int32Constant(kSmiTagMask)), Int32Constant(0)); } -TNode CodeStubAssembler::TaggedIsNotSmi(SloppyTNode a) { - // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we - // can nonetheless use it to inspect the Smi tag. The assumption here is that - // the GC will not exchange Smis for HeapObjects or vice-versa. - TNode a_bitcast = BitcastTaggedSignedToWord(UncheckedCast(a)); - STATIC_ASSERT(kSmiTagMask < kMaxUInt32); - return Word32NotEqual( - Word32And(TruncateIntPtrToInt32(a_bitcast), Int32Constant(kSmiTagMask)), - Int32Constant(0)); +TNode CodeStubAssembler::TaggedIsNotSmi(TNode a) { + return Word32BinaryNot(TaggedIsSmi(a)); } TNode CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode a) { #if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) return Word32Equal( Word32And( - TruncateIntPtrToInt32(BitcastTaggedToWord(a)), - Uint32Constant(kSmiTagMask | static_cast(kSmiSignMask))), + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), + Uint32Constant(static_cast(kSmiTagMask | kSmiSignMask))), Int32Constant(0)); #else - return WordEqual(WordAnd(BitcastTaggedToWord(a), + return WordEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(a), IntPtrConstant(kSmiTagMask | kSmiSignMask)), IntPtrConstant(0)); #endif @@ -1052,55 +1219,6 @@ TNode CodeStubAssembler::LoadDoubleWithHoleCheck( INTPTR_PARAMETERS, if_hole); } -void CodeStubAssembler::BranchIfPrototypesHaveNoElements( - Node* receiver_map, Label* definitely_no_elements, - Label* possibly_elements) { - CSA_SLOW_ASSERT(this, IsMap(receiver_map)); - VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map); - Label loop_body(this, &var_map); - TNode empty_fixed_array = EmptyFixedArrayConstant(); - TNode empty_slow_element_dictionary = - EmptySlowElementDictionaryConstant(); - Goto(&loop_body); - - BIND(&loop_body); - { - Node* map = var_map.value(); - TNode prototype = LoadMapPrototype(map); - GotoIf(IsNull(prototype), definitely_no_elements); - TNode prototype_map = LoadMap(prototype); - TNode prototype_instance_type = LoadMapInstanceType(prototype_map); - - // Pessimistically assume elements if a Proxy, Special API Object, - // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this - // instance type check, it's not necessary to check for interceptors or - // access checks. - Label if_custom(this, Label::kDeferred), if_notcustom(this); - Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type), - &if_custom, &if_notcustom); - - BIND(&if_custom); - { - // For string JSPrimitiveWrapper wrappers we still support the checks as - // long as they wrap the empty string. - GotoIfNot( - InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), - possibly_elements); - Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype); - Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements); - } - - BIND(&if_notcustom); - { - TNode prototype_elements = LoadElements(CAST(prototype)); - var_map.Bind(prototype_map); - GotoIf(TaggedEqual(prototype_elements, empty_fixed_array), &loop_body); - Branch(TaggedEqual(prototype_elements, empty_slow_element_dictionary), - &loop_body, possibly_elements); - } - } -} - void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode object, Label* if_true, Label* if_false) { GotoIf(TaggedIsSmi(object), if_false); @@ -1118,19 +1236,6 @@ void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) { #endif } -void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects( - Label* if_true) { - STATIC_ASSERT(sizeof(DebugInfo::ExecutionMode) >= sizeof(int32_t)); - - TNode execution_mode_address = ExternalConstant( - ExternalReference::debug_execution_mode_address(isolate())); - TNode execution_mode = - UncheckedCast(Load(MachineType::Int32(), execution_mode_address)); - - GotoIf(Word32Equal(execution_mode, Int32Constant(DebugInfo::kSideEffects)), - if_true); -} - TNode CodeStubAssembler::AllocateRaw(TNode size_in_bytes, AllocationFlags flags, TNode top_address, @@ -1557,7 +1662,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode map, } TNode CodeStubAssembler::LoadFastProperties( - SloppyTNode object) { + SloppyTNode object) { CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object)))); TNode properties = LoadJSReceiverPropertiesOrHash(object); return Select( @@ -1566,7 +1671,7 @@ TNode CodeStubAssembler::LoadFastProperties( } TNode CodeStubAssembler::LoadSlowProperties( - SloppyTNode object) { + SloppyTNode object) { CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object))); TNode properties = LoadJSReceiverPropertiesOrHash(object); return Select( @@ -1862,18 +1967,8 @@ TNode CodeStubAssembler::LoadStringLengthAsWord32( return LoadObjectField(string, String::kLengthOffset); } -Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) { - CSA_ASSERT(this, IsString(seq_string)); - CSA_ASSERT(this, - IsSequentialStringInstanceType(LoadInstanceType(seq_string))); - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); - return IntPtrAdd( - BitcastTaggedToWord(seq_string), - IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); -} - -Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) { - CSA_ASSERT(this, IsJSPrimitiveWrapper(object)); +TNode CodeStubAssembler::LoadJSPrimitiveWrapperValue( + TNode object) { return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset); } @@ -1887,15 +1982,9 @@ void CodeStubAssembler::DispatchMaybeObject(TNode maybe_object, GotoIf(IsCleared(maybe_object), if_cleared); - GotoIf(Word32Equal(Word32And(TruncateIntPtrToInt32( - BitcastMaybeObjectToWord(maybe_object)), - Int32Constant(kHeapObjectTagMask)), - Int32Constant(kHeapObjectTag)), - &inner_if_strong); + GotoIf(IsStrong(maybe_object), &inner_if_strong); - *extracted = - BitcastWordToTagged(WordAnd(BitcastMaybeObjectToWord(maybe_object), - IntPtrConstant(~kWeakHeapObjectMask))); + *extracted = GetHeapObjectAssumeWeak(maybe_object); Goto(if_weak); BIND(&inner_if_smi); @@ -1908,10 +1997,10 @@ void CodeStubAssembler::DispatchMaybeObject(TNode maybe_object, } TNode CodeStubAssembler::IsStrong(TNode value) { - return Word32Equal( - Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), - Int32Constant(kHeapObjectTagMask)), - Int32Constant(kHeapObjectTag)); + return Word32Equal(Word32And(TruncateIntPtrToInt32( + BitcastTaggedToWordForTagAndSmiBits(value)), + Int32Constant(kHeapObjectTagMask)), + Int32Constant(kHeapObjectTag)); } TNode CodeStubAssembler::GetHeapObjectIfStrong( @@ -1921,10 +2010,10 @@ TNode CodeStubAssembler::GetHeapObjectIfStrong( } TNode CodeStubAssembler::IsWeakOrCleared(TNode value) { - return Word32Equal( - Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), - Int32Constant(kHeapObjectTagMask)), - Int32Constant(kWeakHeapObjectTag)); + return Word32Equal(Word32And(TruncateIntPtrToInt32( + BitcastTaggedToWordForTagAndSmiBits(value)), + Int32Constant(kHeapObjectTagMask)), + Int32Constant(kWeakHeapObjectTag)); } TNode CodeStubAssembler::IsCleared(TNode value) { @@ -1932,11 +2021,6 @@ TNode CodeStubAssembler::IsCleared(TNode value) { Int32Constant(kClearedWeakHeapObjectLower32)); } -TNode CodeStubAssembler::IsNotCleared(TNode value) { - return Word32NotEqual(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), - Int32Constant(kClearedWeakHeapObjectLower32)); -} - TNode CodeStubAssembler::GetHeapObjectAssumeWeak( TNode value) { CSA_ASSERT(this, IsWeakOrCleared(value)); @@ -1951,43 +2035,41 @@ TNode CodeStubAssembler::GetHeapObjectAssumeWeak( return GetHeapObjectAssumeWeak(value); } -TNode CodeStubAssembler::IsWeakReferenceTo(TNode object, - TNode value) { -#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS) - STATIC_ASSERT(kTaggedSize == kInt32Size); - return Word32Equal( - Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)), - Uint32Constant( - static_cast(~kWeakHeapObjectMask & kMaxUInt32))), - TruncateWordToInt32(BitcastTaggedToWord(value))); -#else - return WordEqual(WordAnd(BitcastMaybeObjectToWord(object), - IntPtrConstant(~kWeakHeapObjectMask)), - BitcastTaggedToWord(value)); - -#endif -} - -TNode CodeStubAssembler::IsStrongReferenceTo(TNode object, - TNode value) { - return TaggedEqual(BitcastWordToTagged(BitcastMaybeObjectToWord(object)), - value); -} - -TNode CodeStubAssembler::IsNotWeakReferenceTo(TNode object, - TNode value) { -#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS) - return Word32NotEqual( - Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(object)), - Uint32Constant( - static_cast(~kWeakHeapObjectMask & kMaxUInt32))), - TruncateWordToInt32(BitcastTaggedToWord(value))); -#else - return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object), - IntPtrConstant(~kWeakHeapObjectMask)), - BitcastTaggedToWord(value)); - -#endif +// This version generates +// (maybe_object & ~mask) == value +// It works for non-Smi |maybe_object| and for both Smi and HeapObject values +// but requires a big constant for ~mask. +TNode CodeStubAssembler::IsWeakReferenceToObject( + TNode maybe_object, TNode value) { + CSA_ASSERT(this, TaggedIsNotSmi(maybe_object)); + if (COMPRESS_POINTERS_BOOL) { + return Word32Equal( + Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), + Uint32Constant(~static_cast(kWeakHeapObjectMask))), + TruncateWordToInt32(BitcastTaggedToWord(value))); + } else { + return WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object), + IntPtrConstant(~kWeakHeapObjectMask)), + BitcastTaggedToWord(value)); + } +} + +// This version generates +// maybe_object == (heap_object | mask) +// It works for any |maybe_object| values and generates a better code because it +// uses a small constant for mask. +TNode CodeStubAssembler::IsWeakReferenceTo( + TNode maybe_object, TNode heap_object) { + if (COMPRESS_POINTERS_BOOL) { + return Word32Equal( + TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), + Word32Or(TruncateWordToInt32(BitcastTaggedToWord(heap_object)), + Int32Constant(kWeakHeapObjectMask))); + } else { + return WordEqual(BitcastMaybeObjectToWord(maybe_object), + WordOr(BitcastTaggedToWord(heap_object), + IntPtrConstant(kWeakHeapObjectMask))); + } } TNode CodeStubAssembler::MakeWeak(TNode value) { @@ -2123,16 +2205,27 @@ TNode CodeStubAssembler::LoadPropertyArrayLength( return Signed(DecodeWord(value)); } -TNode CodeStubAssembler::LoadJSTypedArrayBackingStore( +TNode CodeStubAssembler::LoadJSTypedArrayDataPtr( TNode typed_array) { - // Backing store = external_pointer + base_pointer. - Node* external_pointer = - LoadObjectField(typed_array, JSTypedArray::kExternalPointerOffset, - MachineType::Pointer()); - TNode base_pointer = - LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset); - return UncheckedCast( - IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer))); + // Data pointer = external_pointer + static_cast(base_pointer). + TNode external_pointer = LoadObjectField( + typed_array, JSTypedArray::kExternalPointerOffset); + + TNode base_pointer; + if (COMPRESS_POINTERS_BOOL) { + TNode compressed_base = + LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset); + // Zero-extend TaggedT to WordT according to current compression scheme + // so that the addition with |external_pointer| (which already contains + // compensated offset value) below will decompress the tagged value. + // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for + // details. + base_pointer = Signed(ChangeUint32ToWord(compressed_base)); + } else { + base_pointer = + LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset); + } + return RawPtrAdd(external_pointer, base_pointer); } TNode CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( @@ -2267,8 +2360,7 @@ TNode CodeStubAssembler::BigIntFromInt64(TNode value) { return var_result.value(); } -compiler::TNode -CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( +TNode CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( SloppyTNode data_pointer, SloppyTNode offset) { Label if_zero(this), done(this); if (Is64()) { @@ -2416,59 +2508,30 @@ TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( return var_result.value(); } -void CodeStubAssembler::StoreJSTypedArrayElementFromTagged( - TNode context, TNode typed_array, - TNode index_node, TNode value, ElementsKind elements_kind) { - TNode data_pointer = LoadJSTypedArrayBackingStore(typed_array); - switch (elements_kind) { - case UINT8_ELEMENTS: - case UINT8_CLAMPED_ELEMENTS: - case INT8_ELEMENTS: - case UINT16_ELEMENTS: - case INT16_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - SmiToInt32(CAST(value)), SMI_PARAMETERS); - break; - case UINT32_ELEMENTS: - case INT32_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - TruncateTaggedToWord32(context, value), SMI_PARAMETERS); - break; - case FLOAT32_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))), - SMI_PARAMETERS); - break; - case FLOAT64_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS); - break; - case BIGUINT64_ELEMENTS: - case BIGINT64_ELEMENTS: - StoreElement(data_pointer, elements_kind, index_node, - UncheckedCast(value), SMI_PARAMETERS); - break; - default: - UNREACHABLE(); - } -} - +template TNode CodeStubAssembler::LoadFeedbackVectorSlot( - Node* object, Node* slot_index_node, int additional_offset, - ParameterMode parameter_mode) { - CSA_SLOW_ASSERT(this, IsFeedbackVector(object)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode)); + TNode feedback_vector, TNode slot, + int additional_offset) { int32_t header_size = FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag; - TNode offset = ElementOffsetFromIndex( - slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size); + TNode offset = + ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size); CSA_SLOW_ASSERT( - this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)), + this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector), FeedbackVector::kHeaderSize)); - return UncheckedCast( - Load(MachineType::AnyTagged(), object, offset)); + return Load(feedback_vector, offset); } +template TNode CodeStubAssembler::LoadFeedbackVectorSlot( + TNode feedback_vector, TNode slot, + int additional_offset); +template TNode CodeStubAssembler::LoadFeedbackVectorSlot( + TNode feedback_vector, TNode slot, + int additional_offset); +template TNode CodeStubAssembler::LoadFeedbackVectorSlot( + TNode feedback_vector, TNode slot, + int additional_offset); + template TNode CodeStubAssembler::LoadAndUntagToWord32ArrayElement( TNode object, int array_header_size, Node* index_node, @@ -2617,6 +2680,13 @@ TNode CodeStubAssembler::LoadDoubleWithHoleCheck( return UncheckedCast(Load(machine_type, base, offset)); } +TNode CodeStubAssembler::LoadContextHasExtensionField( + SloppyTNode context) { + TNode value = + LoadAndUntagObjectField(context, Context::kLengthOffset); + return IsSetWord(value); +} + TNode CodeStubAssembler::LoadContextElement( SloppyTNode context, int slot_index) { int offset = Context::SlotOffset(slot_index); @@ -2626,15 +2696,15 @@ TNode CodeStubAssembler::LoadContextElement( TNode CodeStubAssembler::LoadContextElement( SloppyTNode context, SloppyTNode slot_index) { - TNode offset = ElementOffsetFromIndex( - slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::SlotOffset(0)); + TNode offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, + Context::SlotOffset(0)); return UncheckedCast(Load(MachineType::AnyTagged(), context, offset)); } TNode CodeStubAssembler::LoadContextElement(TNode context, TNode slot_index) { - TNode offset = ElementOffsetFromIndex( - slot_index, PACKED_ELEMENTS, SMI_PARAMETERS, Context::SlotOffset(0)); + TNode offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, + Context::SlotOffset(0)); return UncheckedCast(Load(MachineType::AnyTagged(), context, offset)); } @@ -2949,33 +3019,30 @@ void CodeStubAssembler::StoreFixedDoubleArrayElement( StoreNoWriteBarrier(rep, object, offset, value_silenced); } -void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object, - Node* slot_index_node, - Node* value, - WriteBarrierMode barrier_mode, - int additional_offset, - ParameterMode parameter_mode) { - CSA_SLOW_ASSERT(this, IsFeedbackVector(object)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode)); +void CodeStubAssembler::StoreFeedbackVectorSlot( + TNode feedback_vector, TNode slot, + TNode value, WriteBarrierMode barrier_mode, + int additional_offset) { DCHECK(IsAligned(additional_offset, kTaggedSize)); DCHECK(barrier_mode == SKIP_WRITE_BARRIER || barrier_mode == UNSAFE_SKIP_WRITE_BARRIER || barrier_mode == UPDATE_WRITE_BARRIER); int header_size = FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag; - TNode offset = ElementOffsetFromIndex( - slot_index_node, HOLEY_ELEMENTS, parameter_mode, header_size); - // Check that slot_index_node <= object.length. + TNode offset = + ElementOffsetFromIndex(Signed(slot), HOLEY_ELEMENTS, header_size); + // Check that slot <= feedback_vector.length. CSA_ASSERT(this, - IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)), + IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector), FeedbackVector::kHeaderSize)); if (barrier_mode == SKIP_WRITE_BARRIER) { - StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value); + StoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, offset, + value); } else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) { - UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, - value); + UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, feedback_vector, + offset, value); } else { - Store(object, offset, value); + Store(feedback_vector, offset, value); } } @@ -3045,33 +3112,29 @@ TNode CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Label success(this); TVARIABLE(Smi, var_tagged_length); ParameterMode mode = OptimalParameterMode(); - VARIABLE(var_length, OptimalParameterRepresentation(), - TaggedToParameter(LoadFastJSArrayLength(array), mode)); - VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array)); + TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array))); + TVARIABLE(FixedArrayBase, var_elements, LoadElements(array)); // Resize the capacity of the fixed array if it doesn't fit. TNode first = arg_index->value(); - Node* growth = IntPtrToParameter( - IntPtrSub(UncheckedCast(args->GetLength(INTPTR_PARAMETERS)), - first), - mode); + TNode growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first)); PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(), &var_elements, growth, &pre_bailout); // Push each argument onto the end of the array now that there is enough // capacity. CodeStubAssembler::VariableList push_vars({&var_length}, zone()); - Node* elements = var_elements.value(); + TNode elements = var_elements.value(); args->ForEach( push_vars, - [this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) { + [&](TNode arg) { TryStoreArrayElement(kind, mode, &pre_bailout, elements, var_length.value(), arg); - Increment(&var_length, 1, mode); + Increment(&var_length); }, - first, nullptr); + first); { - TNode length = ParameterToTagged(var_length.value(), mode); + TNode length = BIntToSmi(var_length.value()); var_tagged_length = length; StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); Goto(&success); @@ -3111,8 +3174,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array, CSA_SLOW_ASSERT(this, IsJSArray(array)); Comment("BuildAppendJSArray: ", ElementsKindToString(kind)); ParameterMode mode = OptimalParameterMode(); - VARIABLE(var_length, OptimalParameterRepresentation(), - TaggedToParameter(LoadFastJSArrayLength(array), mode)); + TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array))); VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array)); // Resize the capacity of the fixed array if it doesn't fit. @@ -3124,9 +3186,9 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array, // capacity. TryStoreArrayElement(kind, mode, bailout, var_elements.value(), var_length.value(), value); - Increment(&var_length, 1, mode); + Increment(&var_length); - TNode length = ParameterToTagged(var_length.value(), mode); + TNode length = BIntToSmi(var_length.value()); StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); } @@ -3138,7 +3200,7 @@ Node* CodeStubAssembler::AllocateCellWithValue(Node* value, return result; } -Node* CodeStubAssembler::LoadCellValue(Node* cell) { +TNode CodeStubAssembler::LoadCellValue(Node* cell) { CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE)); return LoadObjectField(cell, Cell::kValueOffset); } @@ -3278,7 +3340,8 @@ TNode CodeStubAssembler::AllocateByteArray(TNode length, TNode raw_size = GetArrayAllocationSize(Signed(length), UINT8_ELEMENTS, INTPTR_PARAMETERS, ByteArray::kHeaderSize + kObjectAlignmentMask); - TNode size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); + TNode size = + WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), &if_sizeissmall, &if_notsizeissmall); @@ -3352,7 +3415,8 @@ TNode CodeStubAssembler::AllocateSeqOneByteString( TNode raw_size = GetArrayAllocationSize( Signed(ChangeUint32ToWord(length)), UINT8_ELEMENTS, INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize + kObjectAlignmentMask); - TNode size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); + TNode size = + WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), &if_sizeissmall, &if_notsizeissmall); @@ -3423,7 +3487,8 @@ TNode CodeStubAssembler::AllocateSeqTwoByteString( TNode raw_size = GetArrayAllocationSize( Signed(ChangeUint32ToWord(length)), UINT16_ELEMENTS, INTPTR_PARAMETERS, SeqOneByteString::kHeaderSize + kObjectAlignmentMask); - TNode size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); + TNode size = + WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), &if_sizeissmall, &if_notsizeissmall); @@ -3496,35 +3561,6 @@ TNode CodeStubAssembler::AllocateSlicedTwoByteString( offset); } -TNode CodeStubAssembler::AllocateConsString(TNode length, - TNode left, - TNode right) { - // Added string can be a cons string. - Comment("Allocating ConsString"); - TNode left_instance_type = LoadInstanceType(left); - TNode right_instance_type = LoadInstanceType(right); - - // Determine the resulting ConsString map to use depending on whether - // any of {left} or {right} has two byte encoding. - STATIC_ASSERT(kOneByteStringTag != 0); - STATIC_ASSERT(kTwoByteStringTag == 0); - TNode combined_instance_type = - Word32And(left_instance_type, right_instance_type); - TNode result_map = CAST(Select( - IsSetWord32(combined_instance_type, kStringEncodingMask), - [=] { return ConsOneByteStringMapConstant(); }, - [=] { return ConsStringMapConstant(); })); - TNode result = AllocateInNewSpace(ConsString::kSize); - StoreMapNoWriteBarrier(result, result_map); - StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length, - MachineRepresentation::kWord32); - StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset, - Int32Constant(String::kEmptyHashField), - MachineRepresentation::kWord32); - StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, left); - StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, right); - return CAST(result); -} TNode CodeStubAssembler::AllocateNameDictionary( int at_least_space_for) { @@ -3762,106 +3798,26 @@ template V8_EXPORT_PRIVATE TNode CodeStubAssembler::AllocateSmallOrderedHashTable( TNode capacity); -template -void CodeStubAssembler::FindOrderedHashTableEntry( - Node* table, Node* hash, - const std::function, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found) { - // Get the index of the bucket. - TNode const number_of_buckets = - SmiUntag(CAST(UnsafeLoadFixedArrayElement( - CAST(table), CollectionType::NumberOfBucketsIndex()))); - TNode const bucket = - WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); - TNode const first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( - CAST(table), bucket, - CollectionType::HashTableStartIndex() * kTaggedSize))); - - // Walk the bucket chain. - TNode entry_start; - Label if_key_found(this); - { - TVARIABLE(IntPtrT, var_entry, first_entry); - Label loop(this, {&var_entry, entry_start_position}), - continue_next_entry(this); - Goto(&loop); - BIND(&loop); - - // If the entry index is the not-found sentinel, we are done. - GotoIf(IntPtrEqual(var_entry.value(), - IntPtrConstant(CollectionType::kNotFound)), - not_found); - - // Make sure the entry index is within range. - CSA_ASSERT( - this, - UintPtrLessThan( - var_entry.value(), - SmiUntag(SmiAdd( - CAST(UnsafeLoadFixedArrayElement( - CAST(table), CollectionType::NumberOfElementsIndex())), - CAST(UnsafeLoadFixedArrayElement( - CAST(table), - CollectionType::NumberOfDeletedElementsIndex())))))); - - // Compute the index of the entry relative to kHashTableStartIndex. - entry_start = - IntPtrAdd(IntPtrMul(var_entry.value(), - IntPtrConstant(CollectionType::kEntrySize)), - number_of_buckets); - - // Load the key from the entry. - TNode const candidate_key = UnsafeLoadFixedArrayElement( - CAST(table), entry_start, - CollectionType::HashTableStartIndex() * kTaggedSize); - - key_compare(candidate_key, &if_key_found, &continue_next_entry); - - BIND(&continue_next_entry); - // Load the index of the next entry in the bucket chain. - var_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( - CAST(table), entry_start, - (CollectionType::HashTableStartIndex() + CollectionType::kChainOffset) * - kTaggedSize))); - - Goto(&loop); - } - - BIND(&if_key_found); - entry_start_position->Bind(entry_start); - Goto(entry_found); -} - -template void CodeStubAssembler::FindOrderedHashTableEntry( - Node* table, Node* hash, - const std::function, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found); -template void CodeStubAssembler::FindOrderedHashTableEntry( - Node* table, Node* hash, - const std::function, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found); - Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) { Comment("AllocateStruct"); CSA_ASSERT(this, IsMap(map)); TNode size = TimesTaggedSize(LoadMapInstanceSizeInWords(map)); TNode object = Allocate(size, flags); StoreMapNoWriteBarrier(object, map); - InitializeStructBody(object, map, size, Struct::kHeaderSize); + InitializeStructBody(object, size, Struct::kHeaderSize); return object; } -void CodeStubAssembler::InitializeStructBody(Node* object, Node* map, - Node* size, int start_offset) { - CSA_SLOW_ASSERT(this, IsMap(map)); +void CodeStubAssembler::InitializeStructBody(TNode object, + TNode size, + int start_offset) { Comment("InitializeStructBody"); TNode filler = UndefinedConstant(); // Calculate the untagged field addresses. - object = BitcastTaggedToWord(object); - TNode start_address = - IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag)); - TNode end_address = - IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag)); + TNode start_address = + IntPtrAdd(BitcastTaggedToWord(object), + IntPtrConstant(start_offset - kHeapObjectTag)); + TNode end_address = IntPtrAdd(start_address, size); StoreFieldsNoWriteBarrier(start_address, end_address, filler); } @@ -3883,8 +3839,9 @@ TNode CodeStubAssembler::AllocateJSObjectFromMap( } void CodeStubAssembler::InitializeJSObjectFromMap( - Node* object, Node* map, Node* instance_size, Node* properties, - Node* elements, SlackTrackingMode slack_tracking_mode) { + SloppyTNode object, SloppyTNode map, + SloppyTNode instance_size, Node* properties, Node* elements, + SlackTrackingMode slack_tracking_mode) { CSA_SLOW_ASSERT(this, IsMap(map)); // This helper assumes that the object is in new-space, as guarded by the // check in AllocatedJSObjectFromMap. @@ -3915,7 +3872,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap( } void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking( - Node* object, Node* map, Node* instance_size, int start_offset) { + SloppyTNode object, SloppyTNode map, + SloppyTNode instance_size, int start_offset) { STATIC_ASSERT(Map::kNoSlackTracking == 0); CSA_ASSERT( this, IsClearWord32(LoadMapBitField3(map))); @@ -3924,8 +3882,8 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking( } void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( - Node* object, Node* map, Node* instance_size) { - CSA_SLOW_ASSERT(this, IsMap(map)); + SloppyTNode object, SloppyTNode map, + SloppyTNode instance_size) { Comment("InitializeJSObjectBodyNoSlackTracking"); // Perform in-object slack tracking if requested. @@ -3953,9 +3911,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( // The object still has in-object slack therefore the |unsed_or_unused| // field contain the "used" value. - TNode used_size = TimesTaggedSize(ChangeUint32ToWord( + TNode used_size = Signed(TimesTaggedSize(ChangeUint32ToWord( LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset, - MachineType::Uint8()))); + MachineType::Uint8())))); Comment("iInitialize filler fields"); InitializeFieldsWithRoot(object, used_size, instance_size, @@ -3984,19 +3942,19 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( BIND(&end); } -void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address, - Node* end_address, - Node* value) { +void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode start_address, + TNode end_address, + TNode value) { Comment("StoreFieldsNoWriteBarrier"); CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize)); CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize)); - BuildFastLoop( + BuildFastLoop( start_address, end_address, - [this, value](Node* current) { + [=](TNode current) { UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current, value); }, - kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + kTaggedSize, IndexAdvanceMode::kPost); } TNode CodeStubAssembler::IsValidFastJSArrayCapacity( @@ -4008,12 +3966,12 @@ TNode CodeStubAssembler::IsValidFastJSArrayCapacity( TNode CodeStubAssembler::AllocateJSArray( TNode array_map, TNode elements, TNode length, - Node* allocation_site, int array_header_size) { + TNode allocation_site, int array_header_size) { Comment("begin allocation of JSArray passing in elements"); CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); int base_size = array_header_size; - if (allocation_site != nullptr) { + if (!allocation_site.is_null()) { base_size += AllocationMemento::kSize; } @@ -4027,8 +3985,9 @@ TNode CodeStubAssembler::AllocateJSArray( std::pair, TNode> CodeStubAssembler::AllocateUninitializedJSArrayWithElements( ElementsKind kind, TNode array_map, TNode length, - Node* allocation_site, Node* capacity, ParameterMode capacity_mode, - AllocationFlags allocation_flags, int array_header_size) { + TNode allocation_site, Node* capacity, + ParameterMode capacity_mode, AllocationFlags allocation_flags, + int array_header_size) { Comment("begin allocation of JSArray with elements"); CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0); CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); @@ -4065,7 +4024,9 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( BIND(&nonempty); { int base_size = array_header_size; - if (allocation_site != nullptr) base_size += AllocationMemento::kSize; + if (!allocation_site.is_null()) { + base_size += AllocationMemento::kSize; + } const int elements_offset = base_size; @@ -4138,8 +4099,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( } TNode CodeStubAssembler::AllocateUninitializedJSArray( - TNode array_map, TNode length, Node* allocation_site, - TNode size_in_bytes) { + TNode array_map, TNode length, + TNode allocation_site, TNode size_in_bytes) { CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); // Allocate space for the JSArray and the elements FixedArray in one go. @@ -4150,7 +4111,7 @@ TNode CodeStubAssembler::AllocateUninitializedJSArray( StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset, RootIndex::kEmptyFixedArray); - if (allocation_site != nullptr) { + if (!allocation_site.is_null()) { InitializeAllocationMemento(array, IntPtrConstant(JSArray::kSize), allocation_site); } @@ -4160,7 +4121,7 @@ TNode CodeStubAssembler::AllocateUninitializedJSArray( TNode CodeStubAssembler::AllocateJSArray( ElementsKind kind, TNode array_map, Node* capacity, TNode length, - Node* allocation_site, ParameterMode capacity_mode, + TNode allocation_site, ParameterMode capacity_mode, AllocationFlags allocation_flags) { CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, capacity_mode)); @@ -4189,10 +4150,9 @@ TNode CodeStubAssembler::AllocateJSArray( return array; } -Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array, - Node* begin, Node* count, - ParameterMode mode, Node* capacity, - Node* allocation_site) { +Node* CodeStubAssembler::ExtractFastJSArray( + TNode context, TNode array, Node* begin, Node* count, + ParameterMode mode, Node* capacity, TNode allocation_site) { TNode original_array_map = LoadMap(array); TNode elements_kind = LoadMapElementsKind(original_array_map); @@ -4209,18 +4169,16 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array, return result; } -Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, - ParameterMode mode, - Node* allocation_site, - HoleConversionMode convert_holes) { +TNode CodeStubAssembler::CloneFastJSArray( + TNode context, TNode array, ParameterMode mode, + TNode allocation_site, HoleConversionMode convert_holes) { // TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this // function is also used to copy boilerplates even when the no-elements // protector is invalid. This function should be renamed to reflect its uses. - CSA_ASSERT(this, IsJSArray(array)); TNode length = LoadJSArrayLength(array); - Node* new_elements = nullptr; - VARIABLE(var_new_elements, MachineRepresentation::kTagged); + TNode new_elements; + TVARIABLE(FixedArrayBase, var_new_elements); TVARIABLE(Int32T, var_elements_kind, LoadMapElementsKind(LoadMap(array))); Label allocate_jsarray(this), holey_extract(this), @@ -4240,7 +4198,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, TaggedToParameter(CAST(length), mode), nullptr, ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode, nullptr, var_elements_kind.value()); - var_new_elements.Bind(new_elements); + var_new_elements = new_elements; Goto(&allocate_jsarray); if (need_conversion) { @@ -4257,7 +4215,7 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, LoadElements(array), IntPtrOrSmiConstant(0, mode), TaggedToParameter(CAST(length), mode), nullptr, ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted); - var_new_elements.Bind(new_elements); + var_new_elements = new_elements; // If the array type didn't change, use the original elements kind. GotoIfNot(var_holes_converted.value(), &allocate_jsarray); // Otherwise use PACKED_ELEMENTS for the target's elements kind. @@ -4283,8 +4241,8 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array, TNode array_map = LoadJSArrayElementsMap(var_elements_kind.value(), native_context); - TNode result = AllocateJSArray( - array_map, CAST(var_new_elements.value()), CAST(length), allocation_site); + TNode result = AllocateJSArray(array_map, var_new_elements.value(), + CAST(length), allocation_site); return result; } @@ -4555,14 +4513,14 @@ TNode CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles( const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; TNode first_from_element_offset = ElementOffsetFromIndex(first, kind, mode, 0); - TNode limit_offset = IntPtrAdd(first_from_element_offset, - IntPtrConstant(first_element_offset)); + TNode limit_offset = IntPtrAdd(first_from_element_offset, + IntPtrConstant(first_element_offset)); TVARIABLE(IntPtrT, var_from_offset, ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind, mode, first_element_offset)); Label decrement(this, {&var_from_offset}), done(this); - TNode to_array_adjusted = + TNode to_array_adjusted = IntPtrSub(BitcastTaggedToWord(to_elements), first_from_element_offset); Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement); @@ -4908,12 +4866,10 @@ void CodeStubAssembler::MoveElements(ElementsKind kind, TNode elements_intptr = BitcastTaggedToWord(elements); TNode target_data_ptr = IntPtrAdd(elements_intptr, - ElementOffsetFromIndex(dst_index, kind, INTPTR_PARAMETERS, - fa_base_data_offset)); + ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset)); TNode source_data_ptr = IntPtrAdd(elements_intptr, - ElementOffsetFromIndex(src_index, kind, INTPTR_PARAMETERS, - fa_base_data_offset)); + ElementOffsetFromIndex(src_index, kind, fa_base_data_offset)); TNode memmove = ExternalConstant(ExternalReference::libc_memmove_function()); CallCFunction(memmove, MachineType::Pointer(), @@ -4997,10 +4953,10 @@ void CodeStubAssembler::CopyElements(ElementsKind kind, IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind))); static const int32_t fa_base_data_offset = FixedArrayBase::kHeaderSize - kHeapObjectTag; - TNode src_offset_start = ElementOffsetFromIndex( - src_index, kind, INTPTR_PARAMETERS, fa_base_data_offset); - TNode dst_offset_start = ElementOffsetFromIndex( - dst_index, kind, INTPTR_PARAMETERS, fa_base_data_offset); + TNode src_offset_start = + ElementOffsetFromIndex(src_index, kind, fa_base_data_offset); + TNode dst_offset_start = + ElementOffsetFromIndex(dst_index, kind, fa_base_data_offset); TNode src_elements_intptr = BitcastTaggedToWord(src_elements); TNode source_data_ptr = IntPtrAdd(src_elements_intptr, src_offset_start); @@ -5283,65 +5239,6 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array, Comment("] CopyPropertyArrayValues"); } -void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string, - TNode from_index, - TNode to_index, - TNode character_count, - String::Encoding from_encoding, - String::Encoding to_encoding) { - // Cannot assert IsString(from_string) and IsString(to_string) here because - // CSA::SubString can pass in faked sequential strings when handling external - // subject strings. - bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING; - bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING; - DCHECK_IMPLIES(to_one_byte, from_one_byte); - Comment("CopyStringCharacters ", - from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", " -> ", - to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING"); - - ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; - ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); - int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag; - TNode from_offset = ElementOffsetFromIndex( - from_index, from_kind, INTPTR_PARAMETERS, header_size); - TNode to_offset = - ElementOffsetFromIndex(to_index, to_kind, INTPTR_PARAMETERS, header_size); - TNode byte_count = - ElementOffsetFromIndex(character_count, from_kind, INTPTR_PARAMETERS); - TNode limit_offset = IntPtrAdd(from_offset, byte_count); - - // Prepare the fast loop - MachineType type = - from_one_byte ? MachineType::Uint8() : MachineType::Uint16(); - MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8 - : MachineRepresentation::kWord16; - int from_increment = 1 << ElementsKindToShiftSize(from_kind); - int to_increment = 1 << ElementsKindToShiftSize(to_kind); - - VARIABLE(current_to_offset, MachineType::PointerRepresentation(), to_offset); - VariableList vars({¤t_to_offset}, zone()); - int to_index_constant = 0, from_index_constant = 0; - bool index_same = (from_encoding == to_encoding) && - (from_index == to_index || - (ToInt32Constant(from_index, &from_index_constant) && - ToInt32Constant(to_index, &to_index_constant) && - from_index_constant == to_index_constant)); - BuildFastLoop( - vars, from_offset, limit_offset, - [this, from_string, to_string, ¤t_to_offset, to_increment, type, - rep, index_same](Node* offset) { - Node* value = Load(type, from_string, offset); - StoreNoWriteBarrier(rep, to_string, - index_same ? offset : current_to_offset.value(), - value); - if (!index_same) { - Increment(¤t_to_offset, to_increment); - } - }, - from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); -} - Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array, Node* offset, ElementsKind from_kind, @@ -5381,9 +5278,9 @@ Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity, return IntPtrOrSmiAdd(new_capacity, padding, mode); } -Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, - ElementsKind kind, Node* key, - Label* bailout) { +TNode CodeStubAssembler::TryGrowElementsCapacity( + Node* object, Node* elements, ElementsKind kind, Node* key, + Label* bailout) { CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind)); CSA_SLOW_ASSERT(this, TaggedIsSmi(key)); @@ -5395,11 +5292,9 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, TaggedToParameter(capacity, mode), mode, bailout); } -Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, - ElementsKind kind, Node* key, - Node* capacity, - ParameterMode mode, - Label* bailout) { +TNode CodeStubAssembler::TryGrowElementsCapacity( + Node* object, Node* elements, ElementsKind kind, Node* key, Node* capacity, + ParameterMode mode, Label* bailout) { Comment("TryGrowElementsCapacity"); CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind)); @@ -5418,7 +5313,7 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, new_capacity, mode, bailout); } -Node* CodeStubAssembler::GrowElementsCapacity( +TNode CodeStubAssembler::GrowElementsCapacity( Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind, Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) { Comment("[ GrowElementsCapacity"); @@ -5471,45 +5366,22 @@ void CodeStubAssembler::InitializeAllocationMemento(Node* base, Comment("]"); } -Node* CodeStubAssembler::TryTaggedToFloat64(Node* value, - Label* if_valueisnotnumber) { - Label out(this); - VARIABLE(var_result, MachineRepresentation::kFloat64); - - // Check if the {value} is a Smi or a HeapObject. - Label if_valueissmi(this), if_valueisnotsmi(this); - Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi); - - BIND(&if_valueissmi); - { - // Convert the Smi {value}. - var_result.Bind(SmiToFloat64(value)); - Goto(&out); - } - - BIND(&if_valueisnotsmi); - { - // Check if {value} is a HeapNumber. - Label if_valueisheapnumber(this); - Branch(IsHeapNumber(value), &if_valueisheapnumber, if_valueisnotnumber); - - BIND(&if_valueisheapnumber); - { - // Load the floating point value. - var_result.Bind(LoadHeapNumberValue(value)); - Goto(&out); - } - } - BIND(&out); - return var_result.value(); +TNode CodeStubAssembler::TryTaggedToFloat64( + TNode value, Label* if_valueisnotnumber) { + return Select( + TaggedIsSmi(value), [&]() { return SmiToFloat64(CAST(value)); }, + [&]() { + GotoIfNot(IsHeapNumber(CAST(value)), if_valueisnotnumber); + return LoadHeapNumberValue(CAST(value)); + }); } -Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { +TNode CodeStubAssembler::TruncateTaggedToFloat64( + SloppyTNode context, SloppyTNode value) { // We might need to loop once due to ToNumber conversion. - VARIABLE(var_value, MachineRepresentation::kTagged); - VARIABLE(var_result, MachineRepresentation::kFloat64); + TVARIABLE(Object, var_value, value); + TVARIABLE(Float64T, var_result); Label loop(this, &var_value), done_loop(this, &var_result); - var_value.Bind(value); Goto(&loop); BIND(&loop); { @@ -5520,14 +5392,13 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { // Convert {value} to Float64 if it is a number and convert it to a number // otherwise. - Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber); - var_result.Bind(result); + var_result = TryTaggedToFloat64(value, &if_valueisnotnumber); Goto(&done_loop); BIND(&if_valueisnotnumber); { // Convert the {value} to a Number first. - var_value.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, value)); + var_value = CallBuiltin(Builtins::kNonNumberToNumber, context, value); Goto(&loop); } } @@ -5535,8 +5406,9 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { return var_result.value(); } -Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) { - VARIABLE(var_result, MachineRepresentation::kWord32); +TNode CodeStubAssembler::TruncateTaggedToWord32( + SloppyTNode context, SloppyTNode value) { + TVARIABLE(Word32T, var_result); Label done(this); TaggedToWord32OrBigIntImpl(context, value, &done, &var_result); @@ -5546,38 +5418,33 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) { // Truncate {value} to word32 and jump to {if_number} if it is a Number, // or find that it is a BigInt and jump to {if_bigint}. -void CodeStubAssembler::TaggedToWord32OrBigInt(Node* context, Node* value, - Label* if_number, - Variable* var_word32, - Label* if_bigint, - Variable* var_bigint) { +void CodeStubAssembler::TaggedToWord32OrBigInt( + TNode context, TNode value, Label* if_number, + TVariable* var_word32, Label* if_bigint, + TVariable* var_maybe_bigint) { TaggedToWord32OrBigIntImpl( - context, value, if_number, var_word32, if_bigint, var_bigint); + context, value, if_number, var_word32, if_bigint, var_maybe_bigint); } // Truncate {value} to word32 and jump to {if_number} if it is a Number, // or find that it is a BigInt and jump to {if_bigint}. In either case, // store the type feedback in {var_feedback}. void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback( - Node* context, Node* value, Label* if_number, Variable* var_word32, - Label* if_bigint, Variable* var_bigint, Variable* var_feedback) { + TNode context, TNode value, Label* if_number, + TVariable* var_word32, Label* if_bigint, + TVariable* var_maybe_bigint, TVariable* var_feedback) { TaggedToWord32OrBigIntImpl( - context, value, if_number, var_word32, if_bigint, var_bigint, + context, value, if_number, var_word32, if_bigint, var_maybe_bigint, var_feedback); } template void CodeStubAssembler::TaggedToWord32OrBigIntImpl( - Node* context, Node* value, Label* if_number, Variable* var_word32, - Label* if_bigint, Variable* var_bigint, Variable* var_feedback) { - DCHECK(var_word32->rep() == MachineRepresentation::kWord32); - DCHECK(var_bigint == nullptr || - var_bigint->rep() == MachineRepresentation::kTagged); - DCHECK(var_feedback == nullptr || - var_feedback->rep() == MachineRepresentation::kTaggedSigned); - + TNode context, TNode value, Label* if_number, + TVariable* var_word32, Label* if_bigint, + TVariable* var_maybe_bigint, TVariable* var_feedback) { // We might need to loop after conversion. - VARIABLE(var_value, MachineRepresentation::kTagged, value); + TVARIABLE(Object, var_value, value); OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone); Variable* loop_vars[] = {&var_value, var_feedback}; int num_vars = @@ -5592,12 +5459,13 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( GotoIf(TaggedIsNotSmi(value), ¬_smi); // {value} is a Smi. - var_word32->Bind(SmiToInt32(value)); + *var_word32 = SmiToInt32(CAST(value)); CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall); Goto(if_number); BIND(¬_smi); - TNode map = LoadMap(value); + TNode value_heap_object = CAST(value); + TNode map = LoadMap(value_heap_object); GotoIf(IsHeapNumberMap(map), &is_heap_number); TNode instance_type = LoadMapInstanceType(map); if (conversion == Object::Conversion::kToNumeric) { @@ -5610,7 +5478,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( // We do not require an Or with earlier feedback here because once we // convert the value to a Numeric, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. - CSA_ASSERT(this, SmiEqual(CAST(var_feedback->value()), + CSA_ASSERT(this, SmiEqual(var_feedback->value(), SmiConstant(BinaryOperationFeedback::kNone))); } GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball); @@ -5618,25 +5486,25 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( auto builtin = conversion == Object::Conversion::kToNumeric ? Builtins::kNonNumberToNumeric : Builtins::kNonNumberToNumber; - var_value.Bind(CallBuiltin(builtin, context, value)); + var_value = CallBuiltin(builtin, context, value); OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny); Goto(&loop); BIND(&is_oddball); - var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset)); + var_value = LoadObjectField(value_heap_object, Oddball::kToNumberOffset); OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNumberOrOddball); Goto(&loop); } BIND(&is_heap_number); - var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value))); + *var_word32 = TruncateHeapNumberValueToWord32(CAST(value)); CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber); Goto(if_number); if (conversion == Object::Conversion::kToNumeric) { BIND(&is_bigint); - var_bigint->Bind(value); + *var_maybe_bigint = value; CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt); Goto(if_bigint); } @@ -5650,14 +5518,14 @@ TNode CodeStubAssembler::TruncateHeapNumberValueToWord32( } void CodeStubAssembler::TryHeapNumberToSmi(TNode number, - TVariable& var_result_smi, + TVariable* var_result_smi, Label* if_smi) { TNode value = LoadHeapNumberValue(number); TryFloat64ToSmi(value, var_result_smi, if_smi); } void CodeStubAssembler::TryFloat64ToSmi(TNode value, - TVariable& var_result_smi, + TVariable* var_result_smi, Label* if_smi) { TNode value32 = RoundFloat64ToInt32(value); TNode value64 = ChangeInt32ToFloat64(value32); @@ -5674,13 +5542,13 @@ void CodeStubAssembler::TryFloat64ToSmi(TNode value, BIND(&if_int32); { if (SmiValuesAre32Bits()) { - var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32)); + *var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32)); } else { DCHECK(SmiValuesAre31Bits()); TNode> pair = Int32AddWithOverflow(value32, value32); TNode overflow = Projection<1>(pair); GotoIf(overflow, &if_heap_number); - var_result_smi = + *var_result_smi = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair))); } Goto(if_smi); @@ -5693,7 +5561,7 @@ TNode CodeStubAssembler::ChangeFloat64ToTagged( Label if_smi(this), done(this); TVARIABLE(Smi, var_smi_result); TVARIABLE(Number, var_result); - TryFloat64ToSmi(value, var_smi_result, &if_smi); + TryFloat64ToSmi(value, &var_smi_result, &if_smi); var_result = AllocateHeapNumberWithValue(value); Goto(&done); @@ -6144,42 +6012,42 @@ TNode CodeStubAssembler::IsUndetectableMap(SloppyTNode map) { } TNode CodeStubAssembler::IsNoElementsProtectorCellInvalid() { - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = NoElementsProtectorConstant(); TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() { - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = ArrayIteratorProtectorConstant(); TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() { - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); - TNode cell = PromiseResolveProtectorConstant(); - TNode cell_value = LoadObjectField(cell, Cell::kValueOffset); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); + TNode cell = PromiseResolveProtectorConstant(); + TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode CodeStubAssembler::IsPromiseThenProtectorCellInvalid() { - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = PromiseThenProtectorConstant(); TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() { - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = ArraySpeciesProtectorConstant(); TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); } TNode CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() { - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = TypedArraySpeciesProtectorConstant(); TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); @@ -6190,12 +6058,12 @@ TNode CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid( TNode cell = CAST(LoadContextElement( native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX)); TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); return TaggedEqual(cell_value, invalid); } TNode CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() { - TNode invalid = SmiConstant(Isolate::kProtectorInvalid); + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = PromiseSpeciesProtectorConstant(); TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); return TaggedEqual(cell_value, invalid); @@ -6394,6 +6262,10 @@ TNode CodeStubAssembler::IsJSGlobalProxy( return IsJSGlobalProxyMap(LoadMap(object)); } +TNode CodeStubAssembler::IsJSGeneratorMap(TNode map) { + return InstanceTypeEqual(LoadMapInstanceType(map), JS_GENERATOR_OBJECT_TYPE); +} + TNode CodeStubAssembler::IsJSObjectInstanceType( SloppyTNode instance_type) { STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); @@ -6428,6 +6300,11 @@ TNode CodeStubAssembler::IsJSStringIterator( return HasInstanceType(object, JS_STRING_ITERATOR_TYPE); } +TNode CodeStubAssembler::IsJSRegExpStringIterator( + SloppyTNode object) { + return HasInstanceType(object, JS_REG_EXP_STRING_ITERATOR_TYPE); +} + TNode CodeStubAssembler::IsMap(SloppyTNode map) { return IsMetaMap(LoadMap(map)); } @@ -6656,7 +6533,7 @@ TNode CodeStubAssembler::IsBigInt(SloppyTNode object) { TNode CodeStubAssembler::IsPrimitiveInstanceType( SloppyTNode instance_type) { return Int32LessThanOrEqual(instance_type, - Int32Constant(LAST_PRIMITIVE_TYPE)); + Int32Constant(LAST_PRIMITIVE_HEAP_OBJECT_TYPE)); } TNode CodeStubAssembler::IsPrivateSymbol( @@ -6716,8 +6593,7 @@ TNode CodeStubAssembler::IsNumberDictionary( return HasInstanceType(object, NUMBER_DICTIONARY_TYPE); } -TNode CodeStubAssembler::IsJSGeneratorObject( - SloppyTNode object) { +TNode CodeStubAssembler::IsJSGeneratorObject(TNode object) { return HasInstanceType(object, JS_GENERATOR_OBJECT_TYPE); } @@ -6762,7 +6638,7 @@ TNode CodeStubAssembler::IsJSDataView(TNode object) { } TNode CodeStubAssembler::IsJSRegExp(SloppyTNode object) { - return HasInstanceType(object, JS_REGEXP_TYPE); + return HasInstanceType(object, JS_REG_EXP_TYPE); } TNode CodeStubAssembler::IsNumber(SloppyTNode object) { @@ -7011,201 +6887,17 @@ TNode CodeStubAssembler::StringFromSingleCharCode(TNode code) { return CAST(var_result.value()); } -// A wrapper around CopyStringCharacters which determines the correct string -// encoding, allocates a corresponding sequential string, and then copies the -// given character range using CopyStringCharacters. -// |from_string| must be a sequential string. -// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length. -TNode CodeStubAssembler::AllocAndCopyStringCharacters( - Node* from, Node* from_instance_type, TNode from_index, - TNode character_count) { - Label end(this), one_byte_sequential(this), two_byte_sequential(this); - TVARIABLE(String, var_result); - - Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential, - &two_byte_sequential); - - // The subject string is a sequential one-byte string. - BIND(&one_byte_sequential); - { - TNode result = AllocateSeqOneByteString( - Unsigned(TruncateIntPtrToInt32(character_count))); - CopyStringCharacters(from, result, from_index, IntPtrConstant(0), - character_count, String::ONE_BYTE_ENCODING, - String::ONE_BYTE_ENCODING); - var_result = result; - Goto(&end); - } - - // The subject string is a sequential two-byte string. - BIND(&two_byte_sequential); - { - TNode result = AllocateSeqTwoByteString( - Unsigned(TruncateIntPtrToInt32(character_count))); - CopyStringCharacters(from, result, from_index, IntPtrConstant(0), - character_count, String::TWO_BYTE_ENCODING, - String::TWO_BYTE_ENCODING); - var_result = result; - Goto(&end); - } - - BIND(&end); - return var_result.value(); -} - -TNode CodeStubAssembler::SubString(TNode string, - TNode from, - TNode to) { - TVARIABLE(String, var_result); - ToDirectStringAssembler to_direct(state(), string); - Label end(this), runtime(this); - - TNode const substr_length = IntPtrSub(to, from); - TNode const string_length = LoadStringLengthAsWord(string); - - // Begin dispatching based on substring length. - - Label original_string_or_invalid_length(this); - GotoIf(UintPtrGreaterThanOrEqual(substr_length, string_length), - &original_string_or_invalid_length); - - // A real substring (substr_length < string_length). - Label empty(this); - GotoIf(IntPtrEqual(substr_length, IntPtrConstant(0)), &empty); - - Label single_char(this); - GotoIf(IntPtrEqual(substr_length, IntPtrConstant(1)), &single_char); - - // Deal with different string types: update the index if necessary - // and extract the underlying string. - - TNode direct_string = to_direct.TryToDirect(&runtime); - TNode offset = IntPtrAdd(from, to_direct.offset()); - TNode const instance_type = to_direct.instance_type(); - - // The subject string can only be external or sequential string of either - // encoding at this point. - Label external_string(this); - { - if (FLAG_string_slices) { - Label next(this); - - // Short slice. Copy instead of slicing. - GotoIf(IntPtrLessThan(substr_length, - IntPtrConstant(SlicedString::kMinLength)), - &next); - - // Allocate new sliced string. - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - Label one_byte_slice(this), two_byte_slice(this); - Branch(IsOneByteStringInstanceType(to_direct.instance_type()), - &one_byte_slice, &two_byte_slice); - - BIND(&one_byte_slice); - { - var_result = AllocateSlicedOneByteString( - Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, - SmiTag(offset)); - Goto(&end); - } - - BIND(&two_byte_slice); - { - var_result = AllocateSlicedTwoByteString( - Unsigned(TruncateIntPtrToInt32(substr_length)), direct_string, - SmiTag(offset)); - Goto(&end); - } - - BIND(&next); - } - - // The subject string can only be external or sequential string of either - // encoding at this point. - GotoIf(to_direct.is_external(), &external_string); - - var_result = AllocAndCopyStringCharacters(direct_string, instance_type, - offset, substr_length); - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - Goto(&end); - } - - // Handle external string. - BIND(&external_string); - { - TNode const fake_sequential_string = - to_direct.PointerToString(&runtime); - - var_result = AllocAndCopyStringCharacters( - fake_sequential_string, instance_type, offset, substr_length); - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - Goto(&end); - } - - BIND(&empty); - { - var_result = EmptyStringConstant(); - Goto(&end); - } - - // Substrings of length 1 are generated through CharCodeAt and FromCharCode. - BIND(&single_char); - { - TNode char_code = StringCharCodeAt(string, from); - var_result = StringFromSingleCharCode(char_code); - Goto(&end); - } - - BIND(&original_string_or_invalid_length); - { - CSA_ASSERT(this, IntPtrEqual(substr_length, string_length)); - - // Equal length - check if {from, to} == {0, str.length}. - GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime); - - // Return the original string (substr_length == string_length). - - Counters* counters = isolate()->counters(); - IncrementCounter(counters->sub_string_native(), 1); - - var_result = string; - Goto(&end); - } - - // Fall back to a runtime call. - BIND(&runtime); - { - var_result = - CAST(CallRuntime(Runtime::kStringSubstring, NoContextConstant(), string, - SmiTag(from), SmiTag(to))); - Goto(&end); - } - - BIND(&end); - return var_result.value(); -} - -ToDirectStringAssembler::ToDirectStringAssembler( - compiler::CodeAssemblerState* state, TNode string, Flags flags) - : CodeStubAssembler(state), - var_string_(string, this), - var_instance_type_(LoadInstanceType(string), this), - var_offset_(IntPtrConstant(0), this), - var_is_external_(Int32Constant(0), this), - flags_(flags) {} +ToDirectStringAssembler::ToDirectStringAssembler( + compiler::CodeAssemblerState* state, TNode string, Flags flags) + : CodeStubAssembler(state), + var_string_(string, this), + var_instance_type_(LoadInstanceType(string), this), + var_offset_(IntPtrConstant(0), this), + var_is_external_(Int32Constant(0), this), + flags_(flags) {} TNode ToDirectStringAssembler::TryToDirect(Label* if_bailout) { - VariableList vars({&var_string_, &var_offset_, &var_instance_type_}, zone()); - Label dispatch(this, vars); + Label dispatch(this, {&var_string_, &var_offset_, &var_instance_type_}); Label if_iscons(this); Label if_isexternal(this); Label if_issliced(this); @@ -7333,232 +7025,6 @@ TNode ToDirectStringAssembler::TryToSequential( return var_result.value(); } -void CodeStubAssembler::BranchIfCanDerefIndirectString( - TNode string, TNode instance_type, Label* can_deref, - Label* cannot_deref) { - TNode representation = - Word32And(instance_type, Int32Constant(kStringRepresentationMask)); - GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), can_deref); - GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)), - cannot_deref); - // Cons string. - TNode rhs = - LoadObjectField(string, ConsString::kSecondOffset); - GotoIf(IsEmptyString(rhs), can_deref); - Goto(cannot_deref); -} - -TNode CodeStubAssembler::DerefIndirectString( - TNode string, TNode instance_type, Label* cannot_deref) { - Label deref(this); - BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref); - BIND(&deref); - STATIC_ASSERT(static_cast(ThinString::kActualOffset) == - static_cast(ConsString::kFirstOffset)); - return LoadObjectField(string, ThinString::kActualOffset); -} - -void CodeStubAssembler::DerefIndirectString(TVariable* var_string, - TNode instance_type) { -#ifdef DEBUG - Label can_deref(this), cannot_deref(this); - BranchIfCanDerefIndirectString(var_string->value(), instance_type, &can_deref, - &cannot_deref); - BIND(&cannot_deref); - DebugBreak(); // Should be able to dereference string. - Goto(&can_deref); - BIND(&can_deref); -#endif // DEBUG - - STATIC_ASSERT(static_cast(ThinString::kActualOffset) == - static_cast(ConsString::kFirstOffset)); - *var_string = - LoadObjectField(var_string->value(), ThinString::kActualOffset); -} - -void CodeStubAssembler::MaybeDerefIndirectString(TVariable* var_string, - TNode instance_type, - Label* did_deref, - Label* cannot_deref) { - Label deref(this); - BranchIfCanDerefIndirectString(var_string->value(), instance_type, &deref, - cannot_deref); - - BIND(&deref); - { - DerefIndirectString(var_string, instance_type); - Goto(did_deref); - } -} - -void CodeStubAssembler::MaybeDerefIndirectStrings( - TVariable* var_left, TNode left_instance_type, - TVariable* var_right, TNode right_instance_type, - Label* did_something) { - Label did_nothing_left(this), did_something_left(this), - didnt_do_anything(this); - MaybeDerefIndirectString(var_left, left_instance_type, &did_something_left, - &did_nothing_left); - - BIND(&did_something_left); - { - MaybeDerefIndirectString(var_right, right_instance_type, did_something, - did_something); - } - - BIND(&did_nothing_left); - { - MaybeDerefIndirectString(var_right, right_instance_type, did_something, - &didnt_do_anything); - } - - BIND(&didnt_do_anything); - // Fall through if neither string was an indirect string. -} - -TNode CodeStubAssembler::StringAdd(Node* context, TNode left, - TNode right) { - TVARIABLE(String, result); - Label check_right(this), runtime(this, Label::kDeferred), cons(this), - done(this, &result), done_native(this, &result); - Counters* counters = isolate()->counters(); - - TNode left_length = LoadStringLengthAsWord32(left); - GotoIfNot(Word32Equal(left_length, Uint32Constant(0)), &check_right); - result = right; - Goto(&done_native); - - BIND(&check_right); - TNode right_length = LoadStringLengthAsWord32(right); - GotoIfNot(Word32Equal(right_length, Uint32Constant(0)), &cons); - result = left; - Goto(&done_native); - - BIND(&cons); - { - TNode new_length = Uint32Add(left_length, right_length); - - // If new length is greater than String::kMaxLength, goto runtime to - // throw. Note: we also need to invalidate the string length protector, so - // can't just throw here directly. - GotoIf(Uint32GreaterThan(new_length, Uint32Constant(String::kMaxLength)), - &runtime); - - TVARIABLE(String, var_left, left); - TVARIABLE(String, var_right, right); - Variable* input_vars[2] = {&var_left, &var_right}; - Label non_cons(this, 2, input_vars); - Label slow(this, Label::kDeferred); - GotoIf(Uint32LessThan(new_length, Uint32Constant(ConsString::kMinLength)), - &non_cons); - - result = - AllocateConsString(new_length, var_left.value(), var_right.value()); - Goto(&done_native); - - BIND(&non_cons); - - Comment("Full string concatenate"); - TNode left_instance_type = LoadInstanceType(var_left.value()); - TNode right_instance_type = LoadInstanceType(var_right.value()); - // Compute intersection and difference of instance types. - - TNode ored_instance_types = - Word32Or(left_instance_type, right_instance_type); - TNode xored_instance_types = - Word32Xor(left_instance_type, right_instance_type); - - // Check if both strings have the same encoding and both are sequential. - GotoIf(IsSetWord32(xored_instance_types, kStringEncodingMask), &runtime); - GotoIf(IsSetWord32(ored_instance_types, kStringRepresentationMask), &slow); - - TNode word_left_length = Signed(ChangeUint32ToWord(left_length)); - TNode word_right_length = Signed(ChangeUint32ToWord(right_length)); - - Label two_byte(this); - GotoIf(Word32Equal(Word32And(ored_instance_types, - Int32Constant(kStringEncodingMask)), - Int32Constant(kTwoByteStringTag)), - &two_byte); - // One-byte sequential string case - result = AllocateSeqOneByteString(new_length); - CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), - IntPtrConstant(0), word_left_length, - String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); - CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), - word_left_length, word_right_length, - String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING); - Goto(&done_native); - - BIND(&two_byte); - { - // Two-byte sequential string case - result = AllocateSeqTwoByteString(new_length); - CopyStringCharacters(var_left.value(), result.value(), IntPtrConstant(0), - IntPtrConstant(0), word_left_length, - String::TWO_BYTE_ENCODING, - String::TWO_BYTE_ENCODING); - CopyStringCharacters(var_right.value(), result.value(), IntPtrConstant(0), - word_left_length, word_right_length, - String::TWO_BYTE_ENCODING, - String::TWO_BYTE_ENCODING); - Goto(&done_native); - } - - BIND(&slow); - { - // Try to unwrap indirect strings, restart the above attempt on success. - MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right, - right_instance_type, &non_cons); - Goto(&runtime); - } - } - BIND(&runtime); - { - result = CAST(CallRuntime(Runtime::kStringAdd, context, left, right)); - Goto(&done); - } - - BIND(&done_native); - { - IncrementCounter(counters->string_add_native(), 1); - Goto(&done); - } - - BIND(&done); - return result.value(); -} - -TNode CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint( - TNode codepoint) { - VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant()); - - Label if_isword16(this), if_isword32(this), return_result(this); - - Branch(Uint32LessThan(codepoint, Int32Constant(0x10000)), &if_isword16, - &if_isword32); - - BIND(&if_isword16); - { - var_result.Bind(StringFromSingleCharCode(codepoint)); - Goto(&return_result); - } - - BIND(&if_isword32); - { - TNode value = AllocateSeqTwoByteString(2); - StoreNoWriteBarrier( - MachineRepresentation::kWord32, value, - IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), - codepoint); - var_result.Bind(value); - Goto(&return_result); - } - - BIND(&return_result); - return CAST(var_result.value()); -} - TNode CodeStubAssembler::StringToNumber(TNode input) { Label runtime(this, Label::kDeferred); Label end(this); @@ -7585,22 +7051,22 @@ TNode CodeStubAssembler::StringToNumber(TNode input) { return var_result.value(); } -TNode CodeStubAssembler::NumberToString(TNode input) { +TNode CodeStubAssembler::NumberToString(TNode input, + Label* bailout) { TVARIABLE(String, result); TVARIABLE(Smi, smi_input); - Label runtime(this, Label::kDeferred), if_smi(this), if_heap_number(this), - done(this, &result); + Label if_smi(this), if_heap_number(this), done(this, &result); // Load the number string cache. TNode number_string_cache = NumberStringCacheConstant(); // Make the hash mask from the length of the number string cache. It // contains two elements (number and string) for each cache entry. - // TODO(ishell): cleanup mask handling. - TNode mask = - BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache)); - TNode one = IntPtrConstant(1); - mask = IntPtrSub(mask, one); + TNode number_string_cache_length = + LoadAndUntagFixedArrayBaseLength(number_string_cache); + TNode one = Int32Constant(1); + TNode mask = Int32Sub( + Word32Shr(TruncateWordToInt32(number_string_cache_length), one), one); GotoIfNot(TaggedIsSmi(input), &if_heap_number); smi_input = CAST(input); @@ -7611,36 +7077,35 @@ TNode CodeStubAssembler::NumberToString(TNode input) { Comment("NumberToString - HeapNumber"); TNode heap_number_input = CAST(input); // Try normalizing the HeapNumber. - TryHeapNumberToSmi(heap_number_input, smi_input, &if_smi); + TryHeapNumberToSmi(heap_number_input, &smi_input, &if_smi); // Make a hash from the two 32-bit values of the double. TNode low = LoadObjectField(heap_number_input, HeapNumber::kValueOffset); TNode high = LoadObjectField( heap_number_input, HeapNumber::kValueOffset + kIntSize); - TNode hash = Word32Xor(low, high); - TNode word_hash = WordShl(ChangeInt32ToIntPtr(hash), one); - TNode index = - WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant())); + TNode hash = Word32And(Word32Xor(low, high), mask); + TNode entry_index = + Signed(ChangeUint32ToWord(Int32Add(hash, hash))); // Cache entry's key must be a heap number TNode number_key = - UnsafeLoadFixedArrayElement(number_string_cache, index); - GotoIf(TaggedIsSmi(number_key), &runtime); + UnsafeLoadFixedArrayElement(number_string_cache, entry_index); + GotoIf(TaggedIsSmi(number_key), bailout); TNode number_key_heap_object = CAST(number_key); - GotoIfNot(IsHeapNumber(number_key_heap_object), &runtime); + GotoIfNot(IsHeapNumber(number_key_heap_object), bailout); // Cache entry's key must match the heap number value we're looking for. TNode low_compare = LoadObjectField( number_key_heap_object, HeapNumber::kValueOffset); TNode high_compare = LoadObjectField( number_key_heap_object, HeapNumber::kValueOffset + kIntSize); - GotoIfNot(Word32Equal(low, low_compare), &runtime); - GotoIfNot(Word32Equal(high, high_compare), &runtime); + GotoIfNot(Word32Equal(low, low_compare), bailout); + GotoIfNot(Word32Equal(high, high_compare), bailout); // Heap number match, return value from cache entry. - result = CAST( - UnsafeLoadFixedArrayElement(number_string_cache, index, kTaggedSize)); + result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index, + kTaggedSize)); Goto(&done); } @@ -7648,17 +7113,28 @@ TNode CodeStubAssembler::NumberToString(TNode input) { { Comment("NumberToString - Smi"); // Load the smi key, make sure it matches the smi we're looking for. - TNode smi_index = BitcastWordToTagged(WordAnd( - WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask)); + TNode hash = Word32And(SmiToInt32(smi_input.value()), mask); + TNode entry_index = + Signed(ChangeUint32ToWord(Int32Add(hash, hash))); TNode smi_key = UnsafeLoadFixedArrayElement( - number_string_cache, smi_index, 0, SMI_PARAMETERS); - GotoIf(TaggedNotEqual(smi_key, smi_input.value()), &runtime); + number_string_cache, entry_index, 0, INTPTR_PARAMETERS); + GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout); // Smi match, return value from cache entry. - result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, smi_index, - kTaggedSize, SMI_PARAMETERS)); + result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index, + kTaggedSize, INTPTR_PARAMETERS)); Goto(&done); } + BIND(&done); + return result.value(); +} + +TNode CodeStubAssembler::NumberToString(TNode input) { + TVARIABLE(String, result); + Label runtime(this, Label::kDeferred), done(this, &result); + + result = NumberToString(input, &runtime); + Goto(&done); BIND(&runtime); { @@ -8290,102 +7766,129 @@ void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) { } } -void CodeStubAssembler::Increment(Variable* variable, int value, - ParameterMode mode) { - DCHECK_IMPLIES(mode == INTPTR_PARAMETERS, - variable->rep() == MachineType::PointerRepresentation()); - DCHECK_IMPLIES(mode == SMI_PARAMETERS, CanBeTaggedSigned(variable->rep())); - variable->Bind(IntPtrOrSmiAdd(variable->value(), - IntPtrOrSmiConstant(value, mode), mode)); +template +void CodeStubAssembler::Increment(TVariable* variable, int value) { + *variable = + IntPtrOrSmiAdd(variable->value(), IntPtrOrSmiConstant(value)); } +// Instantiate Increment for Smi and IntPtrT. +// TODO(v8:9708): Consider renaming to [Smi|IntPtrT|RawPtrT]Increment. +template void CodeStubAssembler::Increment(TVariable* variable, + int value); +template void CodeStubAssembler::Increment( + TVariable* variable, int value); +template void CodeStubAssembler::Increment( + TVariable* variable, int value); + void CodeStubAssembler::Use(Label* label) { GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label); } -void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex, - Variable* var_index, Label* if_keyisunique, - Variable* var_unique, Label* if_bailout, +void CodeStubAssembler::TryToName(SloppyTNode key, Label* if_keyisindex, + TVariable* var_index, + Label* if_keyisunique, + TVariable* var_unique, + Label* if_bailout, Label* if_notinternalized) { - DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep()); - DCHECK_EQ(MachineRepresentation::kTagged, var_unique->rep()); Comment("TryToName"); - Label if_hascachedindex(this), if_keyisnotindex(this), if_thinstring(this), - if_keyisother(this, Label::kDeferred); + Label if_keyisnotindex(this); // Handle Smi and HeapNumber keys. - var_index->Bind(TryToIntptr(key, &if_keyisnotindex)); + *var_index = TryToIntptr(key, &if_keyisnotindex); Goto(if_keyisindex); BIND(&if_keyisnotindex); - TNode key_map = LoadMap(key); - var_unique->Bind(key); - // Symbols are unique. - GotoIf(IsSymbolMap(key_map), if_keyisunique); - TNode key_instance_type = LoadMapInstanceType(key_map); - // Miss if |key| is not a String. - STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); - GotoIfNot(IsStringInstanceType(key_instance_type), &if_keyisother); - - // |key| is a String. Check if it has a cached array index. - TNode hash = LoadNameHashField(key); - GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask), - &if_hascachedindex); - // No cached array index. If the string knows that it contains an index, - // then it must be an uncacheable index. Handle this case in the runtime. - GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout); - // Check if we have a ThinString. - GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE), - &if_thinstring); - GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE), - &if_thinstring); - // Finally, check if |key| is internalized. - STATIC_ASSERT(kNotInternalizedTag != 0); - GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask), - if_notinternalized != nullptr ? if_notinternalized : if_bailout); - Goto(if_keyisunique); + { + Label if_symbol(this), if_string(this), + if_keyisother(this, Label::kDeferred); + TNode key_heap_object = CAST(key); + TNode key_map = LoadMap(key_heap_object); - BIND(&if_thinstring); - var_unique->Bind( - LoadObjectField(CAST(key), ThinString::kActualOffset)); - Goto(if_keyisunique); + GotoIf(IsSymbolMap(key_map), &if_symbol); - BIND(&if_hascachedindex); - var_index->Bind(DecodeWordFromWord32(hash)); - Goto(if_keyisindex); + // Miss if |key| is not a String. + STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); + TNode key_instance_type = LoadMapInstanceType(key_map); + Branch(IsStringInstanceType(key_instance_type), &if_string, &if_keyisother); - BIND(&if_keyisother); - GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout); - var_unique->Bind(LoadObjectField(key, Oddball::kToStringOffset)); - Goto(if_keyisunique); + // Symbols are unique. + BIND(&if_symbol); + { + *var_unique = CAST(key); + Goto(if_keyisunique); + } + + BIND(&if_string); + { + Label if_hascachedindex(this), if_thinstring(this); + + // |key| is a String. Check if it has a cached array index. + TNode key_string = CAST(key); + TNode hash = LoadNameHashField(key_string); + GotoIf(IsClearWord32(hash, Name::kDoesNotContainCachedArrayIndexMask), + &if_hascachedindex); + // No cached array index. If the string knows that it contains an index, + // then it must be an uncacheable index. Handle this case in the runtime. + GotoIf(IsClearWord32(hash, Name::kIsNotArrayIndexMask), if_bailout); + // Check if we have a ThinString. + GotoIf(InstanceTypeEqual(key_instance_type, THIN_STRING_TYPE), + &if_thinstring); + GotoIf(InstanceTypeEqual(key_instance_type, THIN_ONE_BYTE_STRING_TYPE), + &if_thinstring); + // Finally, check if |key| is internalized. + STATIC_ASSERT(kNotInternalizedTag != 0); + GotoIf(IsSetWord32(key_instance_type, kIsNotInternalizedMask), + if_notinternalized != nullptr ? if_notinternalized : if_bailout); + + *var_unique = key_string; + Goto(if_keyisunique); + + BIND(&if_thinstring); + *var_unique = + LoadObjectField(key_string, ThinString::kActualOffset); + Goto(if_keyisunique); + + BIND(&if_hascachedindex); + *var_index = + Signed(DecodeWordFromWord32(hash)); + Goto(if_keyisindex); + } + + BIND(&if_keyisother); + { + GotoIfNot(InstanceTypeEqual(key_instance_type, ODDBALL_TYPE), if_bailout); + *var_unique = + LoadObjectField(key_heap_object, Oddball::kToStringOffset); + Goto(if_keyisunique); + } + } } void CodeStubAssembler::TryInternalizeString( - Node* string, Label* if_index, Variable* var_index, Label* if_internalized, - Variable* var_internalized, Label* if_not_internalized, Label* if_bailout) { - DCHECK(var_index->rep() == MachineType::PointerRepresentation()); - DCHECK_EQ(var_internalized->rep(), MachineRepresentation::kTagged); - CSA_SLOW_ASSERT(this, IsString(string)); + SloppyTNode string, Label* if_index, TVariable* var_index, + Label* if_internalized, TVariable* var_internalized, + Label* if_not_internalized, Label* if_bailout) { TNode function = ExternalConstant(ExternalReference::try_internalize_string_function()); TNode const isolate_ptr = ExternalConstant(ExternalReference::isolate_address(isolate())); - Node* result = - CallCFunction(function, MachineType::AnyTagged(), - std::make_pair(MachineType::Pointer(), isolate_ptr), - std::make_pair(MachineType::AnyTagged(), string)); + TNode result = + CAST(CallCFunction(function, MachineType::AnyTagged(), + std::make_pair(MachineType::Pointer(), isolate_ptr), + std::make_pair(MachineType::AnyTagged(), string))); Label internalized(this); GotoIf(TaggedIsNotSmi(result), &internalized); - TNode word_result = SmiUntag(result); + TNode word_result = SmiUntag(CAST(result)); GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kNotFound)), if_not_internalized); GotoIf(IntPtrEqual(word_result, IntPtrConstant(ResultSentinel::kUnsupported)), if_bailout); - var_index->Bind(word_result); + *var_index = word_result; Goto(if_index); BIND(&internalized); - var_internalized->Bind(result); + *var_internalized = CAST(result); Goto(if_internalized); } @@ -8712,31 +8215,6 @@ TNode CodeStubAssembler::BasicLoadNumberDictionaryElement( return LoadValueByKeyIndex(dictionary, index); } -void CodeStubAssembler::BasicStoreNumberDictionaryElement( - TNode dictionary, TNode intptr_index, - TNode value, Label* not_data, Label* if_hole, Label* read_only) { - TVARIABLE(IntPtrT, var_entry); - Label if_found(this); - NumberDictionaryLookup(dictionary, intptr_index, &if_found, &var_entry, - if_hole); - BIND(&if_found); - - // Check that the value is a data property. - TNode index = EntryToIndex(var_entry.value()); - TNode details = - LoadDetailsByKeyIndex(dictionary, index); - TNode kind = DecodeWord32(details); - // TODO(jkummerow): Support accessors without missing? - GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data); - - // Check that the property is writeable. - GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), - read_only); - - // Finally, store the value. - StoreValueByKeyIndex(dictionary, index, value); -} - template void CodeStubAssembler::FindInsertionEntry(TNode dictionary, TNode key, @@ -8858,16 +8336,16 @@ void CodeStubAssembler::LookupLinear(TNode unique_name, first_inclusive, IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor)); - BuildFastLoop( + BuildFastLoop( last_exclusive, first_inclusive, - [=](SloppyTNode name_index) { + [=](TNode name_index) { TNode element = LoadArrayElement(array, Array::kHeaderSize, name_index); TNode candidate_name = CAST(element); *var_name_index = name_index; GotoIf(TaggedEqual(candidate_name, unique_name), if_found); }, - -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre); + -Array::kEntrySize, IndexAdvanceMode::kPre); Goto(if_not_found); } @@ -9029,7 +8507,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( TNode type = LoadMapInstanceType(map); TNode bit_field3 = EnsureOnlyHasSimpleProperties(map, type, bailout); - TNode descriptors = LoadMapDescriptors(map); + TVARIABLE(DescriptorArray, var_descriptors, LoadMapDescriptors(map)); TNode nof_descriptors = DecodeWord32(bit_field3); @@ -9044,25 +8522,23 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( // Note: var_end_key_index is exclusive for the loop TVARIABLE(IntPtrT, var_end_key_index, ToKeyIndex(nof_descriptors)); - VariableList list( - {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop, - &var_start_key_index, &var_end_key_index}, - zone()); + VariableList list({&var_descriptors, &var_stable, &var_has_symbol, + &var_is_symbol_processing_loop, &var_start_key_index, + &var_end_key_index}, + zone()); Label descriptor_array_loop( - this, {&var_stable, &var_has_symbol, &var_is_symbol_processing_loop, - &var_start_key_index, &var_end_key_index}); + this, {&var_descriptors, &var_stable, &var_has_symbol, + &var_is_symbol_processing_loop, &var_start_key_index, + &var_end_key_index}); Goto(&descriptor_array_loop); BIND(&descriptor_array_loop); - BuildFastLoop( + BuildFastLoop( list, var_start_key_index.value(), var_end_key_index.value(), - [=, &var_stable, &var_has_symbol, &var_is_symbol_processing_loop, - &var_start_key_index, &var_end_key_index](Node* index) { - TNode descriptor_key_index = - TNode::UncheckedCast(index); + [&](TNode descriptor_key_index) { TNode next_key = - LoadKeyByKeyIndex(descriptors, descriptor_key_index); + LoadKeyByKeyIndex(var_descriptors.value(), descriptor_key_index); TVARIABLE(Object, var_value, SmiConstant(0)); Label callback(this), next_iteration(this); @@ -9117,7 +8593,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( // Directly decode from the descriptor array if |object| did not // change shape. var_map = map; - var_meta_storage = descriptors; + var_meta_storage = var_descriptors.value(); var_entry = Signed(descriptor_key_index); Goto(&if_found_fast); } @@ -9183,19 +8659,21 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( BIND(&callback); body(next_key, var_value.value()); - // Check if |object| is still stable, i.e. we can proceed using - // property details from preloaded |descriptors|. - var_stable = Select( - var_stable.value(), - [=] { return TaggedEqual(LoadMap(object), map); }, - [=] { return Int32FalseConstant(); }); + // Check if |object| is still stable, i.e. the descriptors in the + // preloaded |descriptors| are still the same modulo in-place + // representation changes. + GotoIfNot(var_stable.value(), &next_iteration); + var_stable = TaggedEqual(LoadMap(object), map); + // Reload the descriptors just in case the actual array changed, and + // any of the field representations changed in-place. + var_descriptors = LoadMapDescriptors(map); Goto(&next_iteration); } } BIND(&next_iteration); }, - DescriptorArray::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + DescriptorArray::kEntrySize, IndexAdvanceMode::kPost); if (mode == kEnumerationOrder) { Label done(this); @@ -9205,14 +8683,73 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( var_is_symbol_processing_loop = Int32TrueConstant(); // Add DescriptorArray::kEntrySize to make the var_end_key_index exclusive // as BuildFastLoop() expects. - Increment(&var_end_key_index, DescriptorArray::kEntrySize, - INTPTR_PARAMETERS); + Increment(&var_end_key_index, DescriptorArray::kEntrySize); Goto(&descriptor_array_loop); BIND(&done); } } +TNode CodeStubAssembler::GetConstructor(TNode map) { + TVARIABLE(HeapObject, var_maybe_constructor); + var_maybe_constructor = map; + Label loop(this, &var_maybe_constructor), done(this); + GotoIfNot(IsMap(var_maybe_constructor.value()), &done); + Goto(&loop); + + BIND(&loop); + { + var_maybe_constructor = CAST(LoadObjectField( + var_maybe_constructor.value(), Map::kConstructorOrBackPointerOffset)); + GotoIf(IsMap(var_maybe_constructor.value()), &loop); + Goto(&done); + } + + BIND(&done); + return var_maybe_constructor.value(); +} + +TNode CodeStubAssembler::GetCreationContext( + TNode receiver, Label* if_bailout) { + TNode receiver_map = LoadMap(receiver); + TNode constructor = GetConstructor(receiver_map); + + TVARIABLE(JSFunction, var_function); + + Label done(this), if_jsfunction(this), if_jsgenerator(this); + GotoIf(TaggedIsSmi(constructor), if_bailout); + + TNode function_map = LoadMap(CAST(constructor)); + GotoIf(IsJSFunctionMap(function_map), &if_jsfunction); + GotoIf(IsJSGeneratorMap(function_map), &if_jsgenerator); + // Remote objects don't have a creation context. + GotoIf(IsFunctionTemplateInfoMap(function_map), if_bailout); + + CSA_ASSERT(this, IsJSFunctionMap(receiver_map)); + var_function = CAST(receiver); + Goto(&done); + + BIND(&if_jsfunction); + { + var_function = CAST(constructor); + Goto(&done); + } + + BIND(&if_jsgenerator); + { + var_function = LoadJSGeneratorObjectFunction(CAST(receiver)); + Goto(&done); + } + + BIND(&done); + TNode context = LoadJSFunctionContext(var_function.value()); + + GotoIfNot(IsContext(context), if_bailout); + + TNode native_context = LoadNativeContext(context); + return native_context; +} + void CodeStubAssembler::DescriptorLookup( SloppyTNode unique_name, SloppyTNode descriptors, SloppyTNode bitfield3, Label* if_found, @@ -9302,7 +8839,7 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject( } void CodeStubAssembler::TryLookupProperty( - SloppyTNode object, SloppyTNode map, + SloppyTNode object, SloppyTNode map, SloppyTNode instance_type, SloppyTNode unique_name, Label* if_found_fast, Label* if_found_dict, Label* if_found_global, TVariable* var_meta_storage, TVariable* var_name_index, @@ -9310,7 +8847,7 @@ void CodeStubAssembler::TryLookupProperty( Label if_objectisspecial(this); GotoIf(IsSpecialReceiverInstanceType(instance_type), &if_objectisspecial); - TryLookupPropertyInSimpleObject(object, map, unique_name, if_found_fast, + TryLookupPropertyInSimpleObject(CAST(object), map, unique_name, if_found_fast, if_found_dict, var_meta_storage, var_name_index, if_not_found); @@ -9547,25 +9084,44 @@ TNode CodeStubAssembler::CallGetterIfAccessor( // AccessorPair case. { if (mode == kCallJSGetter) { + Label if_callable(this), if_function_template_info(this); Node* accessor_pair = value; TNode getter = CAST(LoadObjectField(accessor_pair, AccessorPair::kGetterOffset)); TNode getter_map = LoadMap(getter); - TNode instance_type = LoadMapInstanceType(getter_map); - // FunctionTemplateInfo getters are not supported yet. - GotoIf(InstanceTypeEqual(instance_type, FUNCTION_TEMPLATE_INFO_TYPE), - if_bailout); + + GotoIf(IsCallableMap(getter_map), &if_callable); + GotoIf(IsFunctionTemplateInfoMap(getter_map), &if_function_template_info); // Return undefined if the {getter} is not callable. var_value.Bind(UndefinedConstant()); - GotoIfNot(IsCallableMap(getter_map), &done); + Goto(&done); - // Call the accessor. - Callable callable = CodeFactory::Call(isolate()); - Node* result = CallJS(callable, context, getter, receiver); - var_value.Bind(result); + BIND(&if_callable); + { + // Call the accessor. + Callable callable = CodeFactory::Call(isolate()); + Node* result = CallJS(callable, context, getter, receiver); + var_value.Bind(result); + Goto(&done); + } + + BIND(&if_function_template_info); + { + TNode cached_property_name = LoadObjectField( + getter, FunctionTemplateInfo::kCachedPropertyNameOffset); + GotoIfNot(IsTheHole(cached_property_name), if_bailout); + + TNode creation_context = + GetCreationContext(CAST(receiver), if_bailout); + var_value.Bind(CallBuiltin( + Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver, + creation_context, getter, IntPtrConstant(0), receiver)); + Goto(&done); + } + } else { + Goto(&done); } - Goto(&done); } // AccessorInfo case. @@ -9617,10 +9173,11 @@ TNode CodeStubAssembler::CallGetterIfAccessor( GotoIfNot(IsLengthString( LoadObjectField(accessor_info, AccessorInfo::kNameOffset)), if_bailout); - Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); + TNode receiver_value = + LoadJSPrimitiveWrapperValue(CAST(receiver)); GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout); - GotoIfNot(IsString(receiver_value), if_bailout); - var_value.Bind(LoadStringLengthAsSmi(receiver_value)); + GotoIfNot(IsString(CAST(receiver_value)), if_bailout); + var_value.Bind(LoadStringLengthAsSmi(CAST(receiver_value))); Goto(&done); } } @@ -9808,18 +9365,14 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_isfaststringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); - Node* string = LoadJSPrimitiveWrapperValue(object); - CSA_ASSERT(this, IsString(string)); + TNode string = CAST(LoadJSPrimitiveWrapperValue(CAST(object))); TNode length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); Goto(&if_isobjectorsmi); } BIND(&if_isslowstringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); - Node* string = LoadJSPrimitiveWrapperValue(object); - CSA_ASSERT(this, IsString(string)); + TNode string = CAST(LoadJSPrimitiveWrapperValue(CAST(object))); TNode length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); Goto(&if_isdictionary); @@ -9892,8 +9445,8 @@ void CodeStubAssembler::TryPrototypeChainLookup( GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy); } - VARIABLE(var_index, MachineType::PointerRepresentation()); - VARIABLE(var_unique, MachineRepresentation::kTagged); + TVARIABLE(IntPtrT, var_index); + TVARIABLE(Name, var_unique); Label if_keyisindex(this), if_iskeyunique(this); TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_unique, @@ -9905,9 +9458,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( TVARIABLE(Map, var_holder_map, map); TVARIABLE(Int32T, var_holder_instance_type, instance_type); - VariableList merged_variables( - {&var_holder, &var_holder_map, &var_holder_instance_type}, zone()); - Label loop(this, merged_variables); + Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type}); Goto(&loop); BIND(&loop); { @@ -9950,9 +9501,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( TVARIABLE(Map, var_holder_map, map); TVARIABLE(Int32T, var_holder_instance_type, instance_type); - VariableList merged_variables( - {&var_holder, &var_holder_map, &var_holder_instance_type}, zone()); - Label loop(this, merged_variables); + Label loop(this, {&var_holder, &var_holder_map, &var_holder_instance_type}); Goto(&loop); BIND(&loop); { @@ -9978,22 +9527,22 @@ void CodeStubAssembler::TryPrototypeChainLookup( } } -Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object, - SloppyTNode prototype) { - CSA_ASSERT(this, TaggedIsNotSmi(object)); - VARIABLE(var_result, MachineRepresentation::kTagged); +TNode CodeStubAssembler::HasInPrototypeChain(TNode context, + TNode object, + TNode prototype) { + TVARIABLE(Oddball, var_result); Label return_false(this), return_true(this), return_runtime(this, Label::kDeferred), return_result(this); // Loop through the prototype chain looking for the {prototype}. - VARIABLE(var_object_map, MachineRepresentation::kTagged, LoadMap(object)); + TVARIABLE(Map, var_object_map, LoadMap(object)); Label loop(this, &var_object_map); Goto(&loop); BIND(&loop); { // Check if we can determine the prototype directly from the {object_map}. Label if_objectisdirect(this), if_objectisspecial(this, Label::kDeferred); - Node* object_map = var_object_map.value(); + TNode object_map = var_object_map.value(); TNode object_instance_type = LoadMapInstanceType(object_map); Branch(IsSpecialReceiverInstanceType(object_instance_type), &if_objectisspecial, &if_objectisdirect); @@ -10018,22 +9567,22 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object, // Continue with the prototype. CSA_ASSERT(this, TaggedIsNotSmi(object_prototype)); - var_object_map.Bind(LoadMap(object_prototype)); + var_object_map = LoadMap(object_prototype); Goto(&loop); } BIND(&return_true); - var_result.Bind(TrueConstant()); + var_result = TrueConstant(); Goto(&return_result); BIND(&return_false); - var_result.Bind(FalseConstant()); + var_result = FalseConstant(); Goto(&return_result); BIND(&return_runtime); { // Fallback to the runtime implementation. - var_result.Bind( + var_result = CAST( CallRuntime(Runtime::kHasInPrototypeChain, context, object, prototype)); } Goto(&return_result); @@ -10042,63 +9591,67 @@ Node* CodeStubAssembler::HasInPrototypeChain(Node* context, Node* object, return var_result.value(); } -Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable, - Node* object) { - VARIABLE(var_result, MachineRepresentation::kTagged); +TNode CodeStubAssembler::OrdinaryHasInstance( + TNode context, TNode callable_maybe_smi, + TNode object_maybe_smi) { + TVARIABLE(Oddball, var_result); Label return_runtime(this, Label::kDeferred), return_result(this); GotoIfForceSlowPath(&return_runtime); // Goto runtime if {object} is a Smi. - GotoIf(TaggedIsSmi(object), &return_runtime); + GotoIf(TaggedIsSmi(object_maybe_smi), &return_runtime); // Goto runtime if {callable} is a Smi. - GotoIf(TaggedIsSmi(callable), &return_runtime); + GotoIf(TaggedIsSmi(callable_maybe_smi), &return_runtime); - // Load map of {callable}. - TNode callable_map = LoadMap(callable); - - // Goto runtime if {callable} is not a JSFunction. - TNode callable_instance_type = LoadMapInstanceType(callable_map); - GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE), - &return_runtime); - - GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map, - &return_runtime); - - // Get the "prototype" (or initial map) of the {callable}. - TNode callable_prototype = LoadObjectField( - CAST(callable), JSFunction::kPrototypeOrInitialMapOffset); { - Label no_initial_map(this), walk_prototype_chain(this); - TVARIABLE(HeapObject, var_callable_prototype, callable_prototype); + // Load map of {callable}. + TNode object = CAST(object_maybe_smi); + TNode callable = CAST(callable_maybe_smi); + TNode callable_map = LoadMap(callable); - // Resolve the "prototype" if the {callable} has an initial map. - GotoIfNot(IsMap(callable_prototype), &no_initial_map); - var_callable_prototype = - LoadObjectField(callable_prototype, Map::kPrototypeOffset); - Goto(&walk_prototype_chain); + // Goto runtime if {callable} is not a JSFunction. + TNode callable_instance_type = LoadMapInstanceType(callable_map); + GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE), + &return_runtime); - BIND(&no_initial_map); - // {callable_prototype} is the hole if the "prototype" property hasn't been - // requested so far. - Branch(TaggedEqual(callable_prototype, TheHoleConstant()), &return_runtime, - &walk_prototype_chain); + GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), callable_map, + &return_runtime); - BIND(&walk_prototype_chain); - callable_prototype = var_callable_prototype.value(); - } + // Get the "prototype" (or initial map) of the {callable}. + TNode callable_prototype = LoadObjectField( + callable, JSFunction::kPrototypeOrInitialMapOffset); + { + Label no_initial_map(this), walk_prototype_chain(this); + TVARIABLE(HeapObject, var_callable_prototype, callable_prototype); + + // Resolve the "prototype" if the {callable} has an initial map. + GotoIfNot(IsMap(callable_prototype), &no_initial_map); + var_callable_prototype = LoadObjectField( + callable_prototype, Map::kPrototypeOffset); + Goto(&walk_prototype_chain); + + BIND(&no_initial_map); + // {callable_prototype} is the hole if the "prototype" property hasn't + // been requested so far. + Branch(TaggedEqual(callable_prototype, TheHoleConstant()), + &return_runtime, &walk_prototype_chain); + + BIND(&walk_prototype_chain); + callable_prototype = var_callable_prototype.value(); + } - // Loop through the prototype chain looking for the {callable} prototype. - CSA_ASSERT(this, IsJSReceiver(callable_prototype)); - var_result.Bind(HasInPrototypeChain(context, object, callable_prototype)); - Goto(&return_result); + // Loop through the prototype chain looking for the {callable} prototype. + var_result = HasInPrototypeChain(context, object, callable_prototype); + Goto(&return_result); + } BIND(&return_runtime); { // Fallback to the runtime implementation. - var_result.Bind( - CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object)); + var_result = CAST(CallRuntime(Runtime::kOrdinaryHasInstance, context, + callable_maybe_smi, object_maybe_smi)); } Goto(&return_result); @@ -10111,34 +9664,72 @@ TNode CodeStubAssembler::ElementOffsetFromIndex(Node* index_node, ParameterMode mode, int base_size) { CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, mode)); + if (mode == SMI_PARAMETERS) { + return ElementOffsetFromIndex(ReinterpretCast(index_node), kind, + base_size); + } else { + DCHECK(mode == INTPTR_PARAMETERS); + return ElementOffsetFromIndex(ReinterpretCast(index_node), kind, + base_size); + } +} + +template +TNode CodeStubAssembler::ElementOffsetFromIndex( + TNode index_node, ElementsKind kind, int base_size) { + // TODO(v8:9708): Remove IntPtrT variant in favor of UintPtrT. + static_assert(std::is_same::value || + std::is_same::value || + std::is_same::value, + "Only Smi, UintPtrT or IntPtrT index nodes are allowed"); int element_size_shift = ElementsKindToShiftSize(kind); int element_size = 1 << element_size_shift; int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize; intptr_t index = 0; + TNode intptr_index_node; bool constant_index = false; - if (mode == SMI_PARAMETERS) { + if (std::is_same::value) { + TNode smi_index_node = ReinterpretCast(index_node); element_size_shift -= kSmiShiftBits; Smi smi_index; - constant_index = ToSmiConstant(index_node, &smi_index); - if (constant_index) index = smi_index.value(); - index_node = BitcastTaggedSignedToWord(index_node); + constant_index = ToSmiConstant(smi_index_node, &smi_index); + if (constant_index) { + index = smi_index.value(); + } else { + if (COMPRESS_POINTERS_BOOL) { + smi_index_node = NormalizeSmiIndex(smi_index_node); + } + } + intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(smi_index_node); } else { - DCHECK(mode == INTPTR_PARAMETERS); - constant_index = ToIntPtrConstant(index_node, &index); + intptr_index_node = ReinterpretCast(index_node); + constant_index = ToIntPtrConstant(intptr_index_node, &index); } if (constant_index) { return IntPtrConstant(base_size + element_size * index); } - TNode shifted_index = + TNode shifted_index = (element_size_shift == 0) - ? UncheckedCast(index_node) + ? intptr_index_node : ((element_size_shift > 0) - ? WordShl(index_node, IntPtrConstant(element_size_shift)) - : WordSar(index_node, IntPtrConstant(-element_size_shift))); + ? WordShl(intptr_index_node, + IntPtrConstant(element_size_shift)) + : WordSar(intptr_index_node, + IntPtrConstant(-element_size_shift))); return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index)); } +// Instantiate ElementOffsetFromIndex for Smi and IntPtrT. +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::ElementOffsetFromIndex(TNode index_node, + ElementsKind kind, + int base_size); +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::ElementOffsetFromIndex(TNode index_node, + ElementsKind kind, + int base_size); + TNode CodeStubAssembler::IsOffsetInBounds(SloppyTNode offset, SloppyTNode length, int header_size, @@ -10146,8 +9737,7 @@ TNode CodeStubAssembler::IsOffsetInBounds(SloppyTNode offset, // Make sure we point to the last field. int element_size = 1 << ElementsKindToShiftSize(kind); int correction = header_size - kHeapObjectTag - element_size; - TNode last_offset = - ElementOffsetFromIndex(length, kind, INTPTR_PARAMETERS, correction); + TNode last_offset = ElementOffsetFromIndex(length, kind, correction); return IntPtrLessThanOrEqual(offset, last_offset); } @@ -10203,8 +9793,9 @@ TNode CodeStubAssembler::LoadFeedbackVectorForStub() { return CAST(LoadFeedbackVector(function)); } -void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector, - Node* slot_id) { +void CodeStubAssembler::UpdateFeedback(TNode feedback, + TNode maybe_vector, + TNode slot_id) { Label end(this); // If feedback_vector is not valid, then nothing to do. GotoIf(IsUndefined(maybe_vector), &end); @@ -10216,7 +9807,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector, TNode feedback_element = LoadFeedbackVectorSlot(feedback_vector, slot_id); TNode previous_feedback = CAST(feedback_element); - TNode combined_feedback = SmiOr(previous_feedback, CAST(feedback)); + TNode combined_feedback = SmiOr(previous_feedback, feedback); GotoIf(SmiEqual(previous_feedback, combined_feedback), &end); { @@ -10230,7 +9821,7 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* maybe_vector, } void CodeStubAssembler::ReportFeedbackUpdate( - SloppyTNode feedback_vector, SloppyTNode slot_id, + TNode feedback_vector, SloppyTNode slot_id, const char* reason) { // Reset profiler ticks. StoreObjectFieldNoWriteBarrier( @@ -10241,7 +9832,7 @@ void CodeStubAssembler::ReportFeedbackUpdate( // Trace the update. CallRuntime(Runtime::kInterpreterTraceUpdateFeedback, NoContextConstant(), LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset), - SmiTag(slot_id), StringConstant(reason)); + SmiTag(Signed(slot_id)), StringConstant(reason)); #endif // V8_TRACE_FEEDBACK_UPDATES } @@ -10285,14 +9876,16 @@ TNode CodeStubAssembler::LoadReceiverMap(SloppyTNode receiver) { [=] { return LoadMap(UncheckedCast(receiver)); }); } -TNode CodeStubAssembler::TryToIntptr(Node* key, Label* miss) { +TNode CodeStubAssembler::TryToIntptr(SloppyTNode key, + Label* miss) { TVARIABLE(IntPtrT, var_intptr_key); Label done(this, &var_intptr_key), key_is_smi(this); GotoIf(TaggedIsSmi(key), &key_is_smi); + // Try to convert a heap number to a Smi. - GotoIfNot(IsHeapNumber(key), miss); + GotoIfNot(IsHeapNumber(CAST(key)), miss); { - TNode value = LoadHeapNumberValue(key); + TNode value = LoadHeapNumberValue(CAST(key)); TNode int_value = RoundFloat64ToInt32(value); GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss); var_intptr_key = ChangeInt32ToIntPtr(int_value); @@ -10301,7 +9894,7 @@ TNode CodeStubAssembler::TryToIntptr(Node* key, Label* miss) { BIND(&key_is_smi); { - var_intptr_key = SmiUntag(key); + var_intptr_key = SmiUntag(CAST(key)); Goto(&done); } @@ -10354,7 +9947,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments( } Label if_mapped(this), if_unmapped(this), end(this, &var_result); TNode intptr_two = IntPtrConstant(2); - TNode adjusted_length = IntPtrSub(elements_length, intptr_two); + TNode adjusted_length = IntPtrSub(elements_length, intptr_two); GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped); @@ -10510,33 +10103,35 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind, } } -Node* CodeStubAssembler::Int32ToUint8Clamped(Node* int32_value) { +TNode CodeStubAssembler::Int32ToUint8Clamped( + TNode int32_value) { Label done(this); TNode int32_zero = Int32Constant(0); TNode int32_255 = Int32Constant(255); - VARIABLE(var_value, MachineRepresentation::kWord32, int32_value); + TVARIABLE(Word32T, var_value, int32_value); GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done); - var_value.Bind(int32_zero); + var_value = int32_zero; GotoIf(Int32LessThan(int32_value, int32_zero), &done); - var_value.Bind(int32_255); + var_value = int32_255; Goto(&done); BIND(&done); - return var_value.value(); + return UncheckedCast(var_value.value()); } -Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) { +TNode CodeStubAssembler::Float64ToUint8Clamped( + TNode float64_value) { Label done(this); - VARIABLE(var_value, MachineRepresentation::kWord32, Int32Constant(0)); + TVARIABLE(Word32T, var_value, Int32Constant(0)); GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done); - var_value.Bind(Int32Constant(255)); + var_value = Int32Constant(255); GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done); { TNode rounded_value = Float64RoundToEven(float64_value); - var_value.Bind(TruncateFloat64ToWord32(rounded_value)); + var_value = TruncateFloat64ToWord32(rounded_value); Goto(&done); } BIND(&done); - return var_value.value(); + return UncheckedCast(var_value.value()); } Node* CodeStubAssembler::PrepareValueForWriteToTypedArray( @@ -10716,8 +10311,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout); } - TNode backing_store = LoadJSTypedArrayBackingStore(CAST(object)); - StoreElement(backing_store, elements_kind, intptr_key, converted_value, + TNode data_ptr = LoadJSTypedArrayDataPtr(CAST(object)); + StoreElement(data_ptr, elements_kind, intptr_key, converted_value, parameter_mode); Goto(&done); @@ -10807,7 +10402,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, if (IsSmiElementsKind(elements_kind)) { GotoIfNot(TaggedIsSmi(value), bailout); } else if (IsDoubleElementsKind(elements_kind)) { - value = TryTaggedToFloat64(value, bailout); + value = TryTaggedToFloat64(CAST(value), bailout); } if (IsGrowStoreMode(store_mode) && @@ -11047,7 +10642,7 @@ TNode CodeStubAssembler::PageFromAddress(TNode address) { } TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( - SloppyTNode feedback_vector, TNode slot) { + TNode feedback_vector, TNode slot) { TNode size = IntPtrConstant(AllocationSite::kSizeWithWeakNext); TNode site = Allocate(size, CodeStubAssembler::kPretenured); StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap); @@ -11090,19 +10685,16 @@ TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site); StoreFullTaggedNoWriteBarrier(site_list, site); - StoreFeedbackVectorSlot(feedback_vector, slot, site, UPDATE_WRITE_BARRIER, 0, - SMI_PARAMETERS); + StoreFeedbackVectorSlot(feedback_vector, slot, site); return CAST(site); } TNode CodeStubAssembler::StoreWeakReferenceInFeedbackVector( - SloppyTNode feedback_vector, Node* slot, - SloppyTNode value, int additional_offset, - ParameterMode parameter_mode) { + TNode feedback_vector, TNode slot, + TNode value, int additional_offset) { TNode weak_value = MakeWeak(value); StoreFeedbackVectorSlot(feedback_vector, slot, weak_value, - UPDATE_WRITE_BARRIER, additional_offset, - parameter_mode); + UPDATE_WRITE_BARRIER, additional_offset); return weak_value; } @@ -11135,14 +10727,14 @@ TNode CodeStubAssembler::LoadElementsKind( return elements_kind; } -Node* CodeStubAssembler::BuildFastLoop( - const CodeStubAssembler::VariableList& vars, Node* start_index, - Node* end_index, const FastLoopBody& body, int increment, - ParameterMode parameter_mode, IndexAdvanceMode advance_mode) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode)); - MachineRepresentation index_rep = ParameterRepresentation(parameter_mode); - VARIABLE(var, index_rep, start_index); +template +TNode CodeStubAssembler::BuildFastLoop(const VariableList& vars, + TNode start_index, + TNode end_index, + const FastLoopBody& body, + int increment, + IndexAdvanceMode advance_mode) { + TVARIABLE(TIndex, var, start_index); VariableList vars_copy(vars.begin(), vars.end(), zone()); vars_copy.push_back(&var); Label loop(this, vars_copy); @@ -11154,8 +10746,7 @@ Node* CodeStubAssembler::BuildFastLoop( // to force the loop header check at the end of the loop and branch forward to // it from the pre-header). The extra branch is slower in the case that the // loop actually iterates. - TNode first_check = - IntPtrOrSmiEqual(var.value(), end_index, parameter_mode); + TNode first_check = IntPtrOrSmiEqual(var.value(), end_index); int32_t first_check_val; if (ToInt32Constant(first_check, &first_check_val)) { if (first_check_val) return var.value(); @@ -11167,19 +10758,28 @@ Node* CodeStubAssembler::BuildFastLoop( BIND(&loop); { if (advance_mode == IndexAdvanceMode::kPre) { - Increment(&var, increment, parameter_mode); + Increment(&var, increment); } body(var.value()); if (advance_mode == IndexAdvanceMode::kPost) { - Increment(&var, increment, parameter_mode); + Increment(&var, increment); } - Branch(IntPtrOrSmiNotEqual(var.value(), end_index, parameter_mode), &loop, - &after_loop); + Branch(IntPtrOrSmiNotEqual(var.value(), end_index), &loop, &after_loop); } BIND(&after_loop); return var.value(); } +// Instantiate BuildFastLoop for Smi and IntPtrT. +template TNode CodeStubAssembler::BuildFastLoop( + const VariableList& vars, TNode start_index, TNode end_index, + const FastLoopBody& body, int increment, + IndexAdvanceMode advance_mode); +template TNode CodeStubAssembler::BuildFastLoop( + const VariableList& vars, TNode start_index, + TNode end_index, const FastLoopBody& body, int increment, + IndexAdvanceMode advance_mode); + void CodeStubAssembler::BuildFastFixedArrayForEach( const CodeStubAssembler::VariableList& vars, Node* fixed_array, ElementsKind kind, Node* first_element_inclusive, @@ -11201,17 +10801,15 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( if (direction == ForEachDirection::kForward) { for (int i = first_val; i < last_val; ++i) { TNode index = IntPtrConstant(i); - TNode offset = - ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS, - FixedArray::kHeaderSize - kHeapObjectTag); + TNode offset = ElementOffsetFromIndex( + index, kind, FixedArray::kHeaderSize - kHeapObjectTag); body(fixed_array, offset); } } else { for (int i = last_val - 1; i >= first_val; --i) { TNode index = IntPtrConstant(i); - TNode offset = - ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS, - FixedArray::kHeaderSize - kHeapObjectTag); + TNode offset = ElementOffsetFromIndex( + index, kind, FixedArray::kHeaderSize - kHeapObjectTag); body(fixed_array, offset); } } @@ -11228,11 +10826,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( if (direction == ForEachDirection::kReverse) std::swap(start, limit); int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize; - BuildFastLoop( + BuildFastLoop( vars, start, limit, - [fixed_array, &body](Node* offset) { body(fixed_array, offset); }, + [&](TNode offset) { body(fixed_array, offset); }, direction == ForEachDirection::kReverse ? -increment : increment, - INTPTR_PARAMETERS, direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre : IndexAdvanceMode::kPost); } @@ -11243,22 +10840,21 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace( doesnt_fit); } -void CodeStubAssembler::InitializeFieldsWithRoot(Node* object, - Node* start_offset, - Node* end_offset, +void CodeStubAssembler::InitializeFieldsWithRoot(TNode object, + TNode start_offset, + TNode end_offset, RootIndex root_index) { CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag)); end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag)); TNode root_value = LoadRoot(root_index); - BuildFastLoop( + BuildFastLoop( end_offset, start_offset, - [this, object, root_value](Node* current) { + [=](TNode current) { StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current, root_value); }, - -kTaggedSize, INTPTR_PARAMETERS, - CodeStubAssembler::IndexAdvanceMode::kPre); + -kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre); } void CodeStubAssembler::BranchIfNumberRelationalComparison( @@ -11384,11 +10980,9 @@ Operation Reverse(Operation op) { } } // anonymous namespace -Node* CodeStubAssembler::RelationalComparison(Operation op, - SloppyTNode left, - SloppyTNode right, - SloppyTNode context, - Variable* var_type_feedback) { +TNode CodeStubAssembler::RelationalComparison( + Operation op, TNode left, TNode right, + TNode context, TVariable* var_type_feedback) { Label return_true(this), return_false(this), do_float_comparison(this), end(this); TVARIABLE(Oddball, var_result); // Actually only "true" or "false". @@ -11403,7 +10997,7 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, if (var_type_feedback != nullptr) { // Initialize the type feedback to None. The current feedback is combined // with the previous feedback. - var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kNone); loop_variable_list.push_back(var_type_feedback); } Label loop(this, loop_variable_list); @@ -11914,17 +11508,17 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode value, } // ES6 section 7.2.12 Abstract Equality Comparison -Node* CodeStubAssembler::Equal(SloppyTNode left, - SloppyTNode right, - SloppyTNode context, - Variable* var_type_feedback) { +TNode CodeStubAssembler::Equal(SloppyTNode left, + SloppyTNode right, + SloppyTNode context, + TVariable* var_type_feedback) { // This is a slightly optimized version of Object::Equals. Whenever you // change something functionality wise in here, remember to update the // Object::Equals method as well. Label if_equal(this), if_notequal(this), do_float_comparison(this), do_right_stringtonumber(this, Label::kDeferred), end(this); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(Oddball, result); TVARIABLE(Float64T, var_left_float); TVARIABLE(Float64T, var_right_float); @@ -11984,7 +11578,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber); // {left} is Smi and {right} is not HeapNumber or Smi. if (var_type_feedback != nullptr) { - var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } GotoIf(IsBooleanMap(right_map), &if_right_boolean); TNode right_type = LoadMapInstanceType(right_map); @@ -12009,8 +11603,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_right_bigint); { - result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber, - NoContextConstant(), right, left)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber, + NoContextConstant(), right, left)); Goto(&end); } @@ -12046,7 +11640,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_left_string); { GotoIfNot(IsStringInstanceType(right_type), &use_symmetry); - result.Bind(CallBuiltin(Builtins::kStringEqual, context, left, right)); + result = + CAST(CallBuiltin(Builtins::kStringEqual, context, left, right)); CombineFeedback(var_type_feedback, SmiOr(CollectFeedbackForString(left_type), CollectFeedbackForString(right_type))); @@ -12067,8 +11662,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, { Label if_right_boolean(this); if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber); GotoIf(IsBooleanMap(right_map), &if_right_boolean); @@ -12098,38 +11692,35 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_right_heapnumber); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } - result.Bind(CallRuntime(Runtime::kBigIntEqualToNumber, - NoContextConstant(), left, right)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber, + NoContextConstant(), left, right)); Goto(&end); } BIND(&if_right_bigint); { CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); - result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt, - NoContextConstant(), left, right)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt, + NoContextConstant(), left, right)); Goto(&end); } BIND(&if_right_string); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } - result.Bind(CallRuntime(Runtime::kBigIntEqualToString, - NoContextConstant(), left, right)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToString, + NoContextConstant(), left, right)); Goto(&end); } BIND(&if_right_boolean); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset); Goto(&loop); @@ -12154,8 +11745,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, if (var_type_feedback != nullptr) { // If {right} is undetectable, it must be either also // Null or Undefined, or a Receiver (aka document.all). - var_type_feedback->Bind(SmiConstant( - CompareOperationFeedback::kReceiverOrNullOrUndefined)); + *var_type_feedback = SmiConstant( + CompareOperationFeedback::kReceiverOrNullOrUndefined); } Goto(&if_equal); } @@ -12164,12 +11755,11 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, { if (var_type_feedback != nullptr) { // Track whether {right} is Null, Undefined or Receiver. - var_type_feedback->Bind(SmiConstant( - CompareOperationFeedback::kReceiverOrNullOrUndefined)); + *var_type_feedback = SmiConstant( + CompareOperationFeedback::kReceiverOrNullOrUndefined); GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal); GotoIfNot(IsBooleanMap(right_map), &if_notequal); - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } Goto(&if_notequal); } @@ -12178,8 +11768,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_left_boolean); { if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } // If {right} is a Boolean too, it must be a different Boolean. @@ -12200,7 +11789,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, if (var_type_feedback != nullptr) { Label if_right_symbol(this); GotoIf(IsSymbolInstanceType(right_type), &if_right_symbol); - var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); Goto(&if_notequal); BIND(&if_right_symbol); @@ -12218,8 +11807,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, // {left} is a Primitive and {right} is a JSReceiver, so swapping // the order is not observable. if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } Goto(&use_symmetry); } @@ -12254,8 +11842,8 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, // When we get here, {right} must be either Null or Undefined. CSA_ASSERT(this, IsNullOrUndefined(right)); if (var_type_feedback != nullptr) { - var_type_feedback->Bind(SmiConstant( - CompareOperationFeedback::kReceiverOrNullOrUndefined)); + *var_type_feedback = SmiConstant( + CompareOperationFeedback::kReceiverOrNullOrUndefined); } Branch(IsUndetectableMap(left_map), &if_equal, &if_notequal); } @@ -12265,8 +11853,7 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, // {right} is a Primitive, and neither Null or Undefined; // convert {left} to Primitive too. if (var_type_feedback != nullptr) { - var_type_feedback->Bind( - SmiConstant(CompareOperationFeedback::kAny)); + *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); } Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); var_left = CallStub(callable, context, left); @@ -12298,13 +11885,13 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_equal); { - result.Bind(TrueConstant()); + result = TrueConstant(); Goto(&end); } BIND(&if_notequal); { - result.Bind(FalseConstant()); + result = FalseConstant(); Goto(&end); } @@ -12312,9 +11899,9 @@ Node* CodeStubAssembler::Equal(SloppyTNode left, return result.value(); } -TNode CodeStubAssembler::StrictEqual(SloppyTNode lhs, - SloppyTNode rhs, - Variable* var_type_feedback) { +TNode CodeStubAssembler::StrictEqual( + SloppyTNode lhs, SloppyTNode rhs, + TVariable* var_type_feedback) { // Pseudo-code for the algorithm below: // // if (lhs == rhs) { @@ -12482,7 +12069,7 @@ TNode CodeStubAssembler::StrictEqual(SloppyTNode lhs, CollectFeedbackForString(lhs_instance_type); TNode rhs_feedback = CollectFeedbackForString(rhs_instance_type); - var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback)); + *var_type_feedback = SmiOr(lhs_feedback, rhs_feedback); } result = CAST(CallBuiltin(Builtins::kStringEqual, NoContextConstant(), lhs, rhs)); @@ -12556,7 +12143,7 @@ TNode CodeStubAssembler::StrictEqual(SloppyTNode lhs, BIND(&if_lhsisoddball); { - STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE); + STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE); GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types); GotoIf(Int32LessThan(rhs_instance_type, Int32Constant(ODDBALL_TYPE)), @@ -12855,8 +12442,8 @@ TNode CodeStubAssembler::HasProperty(SloppyTNode context, return result.value(); } -Node* CodeStubAssembler::Typeof(Node* value) { - VARIABLE(result_var, MachineRepresentation::kTagged); +TNode CodeStubAssembler::Typeof(SloppyTNode value) { + TVARIABLE(String, result_var); Label return_number(this, Label::kDeferred), if_oddball(this), return_function(this), return_undefined(this), return_object(this), @@ -12864,7 +12451,8 @@ Node* CodeStubAssembler::Typeof(Node* value) { GotoIf(TaggedIsSmi(value), &return_number); - TNode map = LoadMap(value); + TNode value_heap_object = CAST(value); + TNode map = LoadMap(value_heap_object); GotoIf(IsHeapNumberMap(map), &return_number); @@ -12890,49 +12478,50 @@ Node* CodeStubAssembler::Typeof(Node* value) { GotoIf(IsBigIntInstanceType(instance_type), &return_bigint); CSA_ASSERT(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE)); - result_var.Bind(HeapConstant(isolate()->factory()->symbol_string())); + result_var = HeapConstant(isolate()->factory()->symbol_string()); Goto(&return_result); BIND(&return_number); { - result_var.Bind(HeapConstant(isolate()->factory()->number_string())); + result_var = HeapConstant(isolate()->factory()->number_string()); Goto(&return_result); } BIND(&if_oddball); { - TNode type = LoadObjectField(value, Oddball::kTypeOfOffset); - result_var.Bind(type); + TNode type = + CAST(LoadObjectField(value_heap_object, Oddball::kTypeOfOffset)); + result_var = type; Goto(&return_result); } BIND(&return_function); { - result_var.Bind(HeapConstant(isolate()->factory()->function_string())); + result_var = HeapConstant(isolate()->factory()->function_string()); Goto(&return_result); } BIND(&return_undefined); { - result_var.Bind(HeapConstant(isolate()->factory()->undefined_string())); + result_var = HeapConstant(isolate()->factory()->undefined_string()); Goto(&return_result); } BIND(&return_object); { - result_var.Bind(HeapConstant(isolate()->factory()->object_string())); + result_var = HeapConstant(isolate()->factory()->object_string()); Goto(&return_result); } BIND(&return_string); { - result_var.Bind(HeapConstant(isolate()->factory()->string_string())); + result_var = HeapConstant(isolate()->factory()->string_string()); Goto(&return_result); } BIND(&return_bigint); { - result_var.Bind(HeapConstant(isolate()->factory()->bigint_string())); + result_var = HeapConstant(isolate()->factory()->bigint_string()); Goto(&return_result); } @@ -12941,7 +12530,7 @@ Node* CodeStubAssembler::Typeof(Node* value) { } TNode CodeStubAssembler::GetSuperConstructor( - SloppyTNode context, SloppyTNode active_function) { + TNode context, TNode active_function) { Label is_not_constructor(this, Label::kDeferred), out(this); TVARIABLE(Object, result); @@ -13004,9 +12593,10 @@ TNode CodeStubAssembler::SpeciesConstructor( return var_result.value(); } -Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, - Node* context) { - VARIABLE(var_result, MachineRepresentation::kTagged); +TNode CodeStubAssembler::InstanceOf(TNode object, + TNode callable, + TNode context) { + TVARIABLE(Oddball, var_result); Label if_notcallable(this, Label::kDeferred), if_notreceiver(this, Label::kDeferred), if_otherhandler(this), if_nohandler(this, Label::kDeferred), return_true(this), @@ -13014,7 +12604,7 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, // Ensure that the {callable} is actually a JSReceiver. GotoIf(TaggedIsSmi(callable), &if_notreceiver); - GotoIfNot(IsJSReceiver(callable), &if_notreceiver); + GotoIfNot(IsJSReceiver(CAST(callable)), &if_notreceiver); // Load the @@hasInstance property from {callable}. TNode inst_of_handler = @@ -13032,8 +12622,8 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, // Call to Function.prototype[@@hasInstance] directly. Callable builtin(BUILTIN_CODE(isolate(), FunctionPrototypeHasInstance), CallTrampolineDescriptor{}); - Node* result = CallJS(builtin, context, inst_of_handler, callable, object); - var_result.Bind(result); + var_result = + CAST(CallJS(builtin, context, inst_of_handler, callable, object)); Goto(&return_result); } @@ -13055,12 +12645,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, BIND(&if_nohandler); { // Ensure that the {callable} is actually Callable. - GotoIfNot(IsCallable(callable), &if_notcallable); + GotoIfNot(IsCallable(CAST(callable)), &if_notcallable); // Use the OrdinaryHasInstance algorithm. - TNode result = - CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object); - var_result.Bind(result); + var_result = CAST( + CallBuiltin(Builtins::kOrdinaryHasInstance, context, callable, object)); Goto(&return_result); } @@ -13071,11 +12660,11 @@ Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable, { ThrowTypeError(context, MessageTemplate::kNonObjectInInstanceOfCheck); } BIND(&return_true); - var_result.Bind(TrueConstant()); + var_result = TrueConstant(); Goto(&return_result); BIND(&return_false); - var_result.Bind(FalseConstant()); + var_result = FalseConstant(); Goto(&return_result); BIND(&return_result); @@ -13294,9 +12883,8 @@ TNode CodeStubAssembler::AllocateJSIteratorResult( return CAST(result); } -Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, - Node* key, - Node* value) { +TNode CodeStubAssembler::AllocateJSIteratorResultForEntry( + TNode context, TNode key, SloppyTNode value) { TNode native_context = LoadNativeContext(context); TNode length = SmiConstant(2); int const elements_size = FixedArray::SizeFor(2); @@ -13326,7 +12914,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, array); StoreObjectFieldRoot(result, JSIteratorResult::kDoneOffset, RootIndex::kFalseValue); - return result; + return CAST(result); } TNode CodeStubAssembler::ArraySpeciesCreate(TNode context, @@ -13393,21 +12981,19 @@ TNode CodeStubAssembler::LoadJSTypedArrayLength( return LoadObjectField(typed_array, JSTypedArray::kLengthOffset); } -CodeStubArguments::CodeStubArguments( - CodeStubAssembler* assembler, Node* argc, Node* fp, - CodeStubAssembler::ParameterMode param_mode, ReceiverMode receiver_mode) +CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, + TNode argc, TNode fp, + ReceiverMode receiver_mode) : assembler_(assembler), - argc_mode_(param_mode), receiver_mode_(receiver_mode), argc_(argc), base_(), fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) { TNode offset = assembler_->ElementOffsetFromIndex( - argc_, SYSTEM_POINTER_ELEMENTS, param_mode, + argc_, SYSTEM_POINTER_ELEMENTS, (StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kSystemPointerSize); - base_ = - assembler_->UncheckedCast(assembler_->IntPtrAdd(fp_, offset)); + base_ = assembler_->RawPtrAdd(fp_, offset); } TNode CodeStubArguments::GetReceiver() const { @@ -13422,24 +13008,18 @@ void CodeStubArguments::SetReceiver(TNode object) const { base_, assembler_->IntPtrConstant(kSystemPointerSize), object); } -TNode CodeStubArguments::AtIndexPtr( - Node* index, CodeStubAssembler::ParameterMode mode) const { - using Node = compiler::Node; - Node* negated_index = assembler_->IntPtrOrSmiSub( - assembler_->IntPtrOrSmiConstant(0, mode), index, mode); +TNode CodeStubArguments::AtIndexPtr(TNode index) const { + TNode negated_index = + assembler_->IntPtrOrSmiSub(assembler_->IntPtrConstant(0), index); TNode offset = assembler_->ElementOffsetFromIndex( - negated_index, SYSTEM_POINTER_ELEMENTS, mode, 0); - return assembler_->IntPtrAdd(assembler_->UncheckedCast(base_), - offset); + negated_index, SYSTEM_POINTER_ELEMENTS, 0); + return assembler_->RawPtrAdd(base_, offset); } -TNode CodeStubArguments::AtIndex( - Node* index, CodeStubAssembler::ParameterMode mode) const { - DCHECK_EQ(argc_mode_, mode); - CSA_ASSERT(assembler_, - assembler_->UintPtrOrSmiLessThan(index, GetLength(mode), mode)); +TNode CodeStubArguments::AtIndex(TNode index) const { + CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength())); return assembler_->UncheckedCast( - assembler_->LoadFullTagged(AtIndexPtr(index, mode))); + assembler_->LoadFullTagged(AtIndexPtr(index))); } TNode CodeStubArguments::AtIndex(int index) const { @@ -13452,9 +13032,8 @@ TNode CodeStubArguments::GetOptionalArgumentValue( CodeStubAssembler::Label argument_missing(assembler_), argument_done(assembler_, &result); - assembler_->GotoIf(assembler_->UintPtrOrSmiGreaterThanOrEqual( - assembler_->IntPtrOrSmiConstant(index, argc_mode_), - argc_, argc_mode_), + assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual( + assembler_->IntPtrConstant(index), argc_), &argument_missing); result = AtIndex(index); assembler_->Goto(&argument_done); @@ -13473,10 +13052,8 @@ TNode CodeStubArguments::GetOptionalArgumentValue( CodeStubAssembler::Label argument_missing(assembler_), argument_done(assembler_, &result); - assembler_->GotoIf( - assembler_->UintPtrOrSmiGreaterThanOrEqual( - assembler_->IntPtrToParameter(index, argc_mode_), argc_, argc_mode_), - &argument_missing); + assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(index, argc_), + &argument_missing); result = AtIndex(index); assembler_->Goto(&argument_done); @@ -13490,43 +13067,38 @@ TNode CodeStubArguments::GetOptionalArgumentValue( void CodeStubArguments::ForEach( const CodeStubAssembler::VariableList& vars, - const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last, - CodeStubAssembler::ParameterMode mode) { + const CodeStubArguments::ForEachBodyFunction& body, TNode first, + TNode last) const { assembler_->Comment("CodeStubArguments::ForEach"); if (first == nullptr) { - first = assembler_->IntPtrOrSmiConstant(0, mode); + first = assembler_->IntPtrConstant(0); } if (last == nullptr) { - DCHECK_EQ(mode, argc_mode_); last = argc_; } - TNode start = assembler_->IntPtrSub( - assembler_->UncheckedCast(base_), - assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS, mode)); - TNode end = assembler_->IntPtrSub( - assembler_->UncheckedCast(base_), - assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode)); - assembler_->BuildFastLoop( + TNode start = assembler_->RawPtrSub( + base_, + assembler_->ElementOffsetFromIndex(first, SYSTEM_POINTER_ELEMENTS)); + TNode end = assembler_->RawPtrSub( + base_, assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS)); + assembler_->BuildFastLoop( vars, start, end, - [this, &body](Node* current) { - Node* arg = assembler_->Load(MachineType::AnyTagged(), current); + [&](TNode current) { + TNode arg = assembler_->Load(current); body(arg); }, - -kSystemPointerSize, CodeStubAssembler::INTPTR_PARAMETERS, - CodeStubAssembler::IndexAdvanceMode::kPost); + -kSystemPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost); } void CodeStubArguments::PopAndReturn(Node* value) { - Node* pop_count; + TNode pop_count; if (receiver_mode_ == ReceiverMode::kHasReceiver) { - pop_count = assembler_->IntPtrOrSmiAdd( - argc_, assembler_->IntPtrOrSmiConstant(1, argc_mode_), argc_mode_); + pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1)); } else { pop_count = argc_; } - assembler_->PopAndReturn(assembler_->ParameterToIntPtr(pop_count, argc_mode_), - value); + assembler_->PopAndReturn(pop_count, value); } TNode CodeStubAssembler::IsFastElementsKind( @@ -13642,21 +13214,15 @@ Node* CodeStubAssembler:: } TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { - CSA_ASSERT(this, SmiGreaterThanOrEqual(builtin_id, SmiConstant(0))); - CSA_ASSERT(this, - SmiLessThan(builtin_id, SmiConstant(Builtins::builtin_count))); + CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::builtin_count))); - int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize; - int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits; - TNode table_index = - index_shift >= 0 - ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift) - : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift); - - return CAST( - Load(MachineType::TaggedPointer(), + TNode offset = + ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS); + + return CAST(BitcastWordToTagged( + Load(MachineType::Pointer(), ExternalConstant(ExternalReference::builtins_address(isolate())), - table_index)); + offset))); } TNode CodeStubAssembler::GetSharedFunctionInfoCode( @@ -13765,11 +13331,9 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( return sfi_code.value(); } -Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map, - Node* shared_info, - Node* context) { - CSA_SLOW_ASSERT(this, IsMap(map)); - +TNode CodeStubAssembler::AllocateFunctionWithMapAndContext( + TNode map, TNode shared_info, + TNode context) { TNode const code = GetSharedFunctionInfoCode(shared_info); // TODO(ishell): All the callers of this function pass map loaded from @@ -13790,7 +13354,7 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map, shared_info); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, code); - return fun; + return CAST(fun); } void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver, @@ -13839,8 +13403,9 @@ void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver, } } -Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty, - Label* if_runtime) { +TNode CodeStubAssembler::CheckEnumCache(TNode receiver, + Label* if_empty, + Label* if_runtime) { Label if_fast(this), if_cache(this), if_no_cache(this, Label::kDeferred); TNode receiver_map = LoadMap(receiver); @@ -13855,7 +13420,7 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty, { // Avoid runtime-call for empty dictionary receivers. GotoIfNot(IsDictionaryMap(receiver_map), if_runtime); - TNode properties = CAST(LoadSlowProperties(receiver)); + TNode properties = CAST(LoadSlowProperties(CAST(receiver))); TNode length = GetNumberOfElements(properties); GotoIfNot(TaggedEqual(length, SmiConstant(0)), if_runtime); // Check that there are no elements on the {receiver} and its prototype @@ -13881,8 +13446,7 @@ TNode CodeStubAssembler::GetArgumentValue(TorqueStructArguments args, TorqueStructArguments CodeStubAssembler::GetFrameArguments( TNode frame, TNode argc) { - return CodeStubArguments(this, argc, frame, INTPTR_PARAMETERS) - .GetTorqueArguments(); + return CodeStubArguments(this, argc, frame).GetTorqueArguments(); } void CodeStubAssembler::Print(const char* s) { @@ -13976,9 +13540,8 @@ TNode CodeStubAssembler::ArrayCreate(TNode context, // TODO(delphick): Consider using // AllocateUninitializedJSArrayWithElements to avoid initializing an // array and then writing over it. - array = - AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length, SmiConstant(0), - nullptr, ParameterMode::SMI_PARAMETERS); + array = AllocateJSArray(PACKED_SMI_ELEMENTS, array_map, length, + SmiConstant(0), {}, ParameterMode::SMI_PARAMETERS); Goto(&done); BIND(&done); diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 9884d04e66e1da..eee3e7a376a9d3 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -97,6 +97,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(iterator_symbol, iterator_symbol, IteratorSymbol) \ V(length_string, length_string, LengthString) \ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ + V(match_symbol, match_symbol, MatchSymbol) \ V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \ V(MetaMap, meta_map, MetaMap) \ V(MinusZeroValue, minus_zero_value, MinusZero) \ @@ -114,7 +115,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(object_to_string, object_to_string, ObjectToString) \ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \ V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \ - V(premonomorphic_symbol, premonomorphic_symbol, PremonomorphicSymbol) \ V(PreparseDataMap, preparse_data_map, PreparseDataMap) \ V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \ V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \ @@ -157,11 +157,11 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) #ifdef DEBUG -#define CSA_CHECK(csa, x) \ - (csa)->Check( \ - [&]() -> compiler::Node* { \ - return implicit_cast>(x); \ - }, \ +#define CSA_CHECK(csa, x) \ + (csa)->Check( \ + [&]() -> compiler::Node* { \ + return implicit_cast>(x); \ + }, \ #x, __FILE__, __LINE__) #else #define CSA_CHECK(csa, x) (csa)->FastCheck(x) @@ -255,10 +255,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler public TorqueGeneratedExportedMacrosAssembler { public: using Node = compiler::Node; - template - using TNode = compiler::TNode; - template - using SloppyTNode = compiler::SloppyTNode; template using LazyNode = std::function()>; @@ -303,11 +299,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return ParameterRepresentation(OptimalParameterMode()); } + TNode ParameterToIntPtr(TNode value) { return SmiUntag(value); } + TNode ParameterToIntPtr(TNode value) { return value; } + // TODO(v8:9708): remove once all uses are ported. TNode ParameterToIntPtr(Node* value, ParameterMode mode) { if (mode == SMI_PARAMETERS) value = SmiUntag(value); return UncheckedCast(value); } + template + TNode IntPtrToParameter(TNode value); + Node* IntPtrToParameter(SloppyTNode value, ParameterMode mode) { if (mode == SMI_PARAMETERS) return SmiTag(value); return value; @@ -364,6 +366,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler #error Unknown architecture. #endif + // Pointer compression specific. Returns true if the upper 32 bits of a Smi + // contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi + // can be directly used as an index in element offset computation. + TNode IsValidSmiIndex(TNode smi); + + // Pointer compression specific. Ensures that the upper 32 bits of a Smi + // contain the sign of a lower 32 bits so that the Smi can be directly used + // as an index in element offset computation. + TNode NormalizeSmiIndex(TNode smi_index); + TNode TaggedToSmi(TNode value, Label* fail) { GotoIf(TaggedIsNotSmi(value), fail); return UncheckedCast(value); @@ -443,18 +455,52 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* MatchesParameterMode(Node* value, ParameterMode mode); -#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ - Node* OpName(Node* a, Node* b, ParameterMode mode) { \ - if (mode == SMI_PARAMETERS) { \ - return SmiOpName(CAST(a), CAST(b)); \ - } else { \ - DCHECK_EQ(INTPTR_PARAMETERS, mode); \ - return IntPtrOpName(a, b); \ - } \ - } +#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ + /* TODO(v8:9708): remove once all uses are ported. */ \ + Node* OpName(Node* a, Node* b, ParameterMode mode) { \ + if (mode == SMI_PARAMETERS) { \ + return SmiOpName(CAST(a), CAST(b)); \ + } else { \ + DCHECK_EQ(INTPTR_PARAMETERS, mode); \ + return IntPtrOpName(UncheckedCast(a), \ + UncheckedCast(b)); \ + } \ + } \ + TNode OpName(TNode a, TNode b) { return SmiOpName(a, b); } \ + TNode OpName(TNode a, TNode b) { \ + return IntPtrOpName(a, b); \ + } \ + TNode OpName(TNode a, TNode b) { \ + return ReinterpretCast(IntPtrOpName( \ + ReinterpretCast(a), ReinterpretCast(b))); \ + } + // TODO(v8:9708): Define BInt operations once all uses are ported. PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin) PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd) PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub) +#undef PARAMETER_BINOP + +#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \ + /* TODO(v8:9708): remove once all uses are ported. */ \ + TNode OpName(Node* a, Node* b, ParameterMode mode) { \ + if (mode == SMI_PARAMETERS) { \ + return SmiOpName(CAST(a), CAST(b)); \ + } else { \ + DCHECK_EQ(INTPTR_PARAMETERS, mode); \ + return IntPtrOpName(UncheckedCast(a), \ + UncheckedCast(b)); \ + } \ + } \ + TNode OpName(TNode a, TNode b) { return SmiOpName(a, b); } \ + TNode OpName(TNode a, TNode b) { \ + return IntPtrOpName(a, b); \ + } \ + TNode OpName(TNode a, TNode b) { \ + return IntPtrOpName(a, b); \ + } + // TODO(v8:9708): Define BInt operations once all uses are ported. + PARAMETER_BINOP(IntPtrOrSmiEqual, WordEqual, SmiEqual) + PARAMETER_BINOP(IntPtrOrSmiNotEqual, WordNotEqual, SmiNotEqual) PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan) PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual, SmiLessThanOrEqual) @@ -473,31 +519,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler intptr_t ConstexprWordNot(intptr_t a) { return ~a; } uintptr_t ConstexprWordNot(uintptr_t a) { return ~a; } - TNode TaggedEqual(TNode> a, - TNode> b) { - // In pointer-compressed architectures, the instruction selector will narrow - // this comparison to a 32-bit one. + TNode TaggedEqual(TNode a, TNode b) { +#ifdef V8_COMPRESS_POINTERS + return Word32Equal(ChangeTaggedToCompressed(a), + ChangeTaggedToCompressed(b)); +#else return WordEqual(ReinterpretCast(a), ReinterpretCast(b)); +#endif } - TNode TaggedNotEqual(TNode> a, - TNode> b) { - // In pointer-compressed architectures, the instruction selector will narrow - // this comparison to a 32-bit one. - return WordNotEqual(ReinterpretCast(a), ReinterpretCast(b)); + TNode TaggedNotEqual(TNode a, TNode b) { + return Word32BinaryNot(TaggedEqual(a, b)); } TNode NoContextConstant(); #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - compiler::TNode().rootAccessorName())>::type>::type> \ name##Constant(); HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - compiler::TNode().rootAccessorName())>::type>::type> \ name##Constant(); HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) @@ -511,11 +556,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode BIntConstant(int value); + template + TNode IntPtrOrSmiConstant(int value); + // TODO(v8:9708): remove once all uses are ported. Node* IntPtrOrSmiConstant(int value, ParameterMode mode); - TNode IntPtrOrSmiEqual(Node* left, Node* right, ParameterMode mode); - TNode IntPtrOrSmiNotEqual(Node* left, Node* right, ParameterMode mode); + bool IsIntPtrOrSmiConstantZero(TNode test); + bool IsIntPtrOrSmiConstantZero(TNode test); + // TODO(v8:9708): remove once all uses are ported. bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode); + bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, ParameterMode mode); @@ -557,25 +607,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode SmiToFloat64(SloppyTNode value); TNode SmiFromIntPtr(SloppyTNode value) { return SmiTag(value); } TNode SmiFromInt32(SloppyTNode value); + TNode SmiFromUint32(TNode value); TNode SmiToIntPtr(SloppyTNode value) { return SmiUntag(value); } TNode SmiToInt32(SloppyTNode value); // Smi operations. -#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode SmiOpName(TNode a, TNode b) { \ - if (SmiValuesAre32Bits()) { \ - return BitcastWordToTaggedSigned(IntPtrOpName( \ - BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \ - } else { \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \ - Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \ - } \ +#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode SmiOpName(TNode a, TNode b) { \ + if (SmiValuesAre32Bits()) { \ + return BitcastWordToTaggedSigned( \ + IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \ + BitcastTaggedToWordForTagAndSmiBits(b))); \ + } else { \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Int32OpName( \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))))); \ + } \ } SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) @@ -595,38 +647,40 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode SmiShl(TNode a, int shift) { return BitcastWordToTaggedSigned( - WordShl(BitcastTaggedSignedToWord(a), shift)); + WordShl(BitcastTaggedToWordForTagAndSmiBits(a), shift)); } TNode SmiShr(TNode a, int shift) { if (kTaggedSize == kInt64Size) { return BitcastWordToTaggedSigned( - WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + WordAnd(WordShr(BitcastTaggedToWordForTagAndSmiBits(a), shift), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } else { // For pointer compressed Smis, we want to make sure that we truncate to // int32 before shifting, to avoid the values of the top 32-bits from // leaking into the sign bit of the smi. return BitcastWordToTaggedSigned(WordAnd( ChangeInt32ToIntPtr(Word32Shr( - TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), + shift)), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } } TNode SmiSar(TNode a, int shift) { if (kTaggedSize == kInt64Size) { return BitcastWordToTaggedSigned( - WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + WordAnd(WordSar(BitcastTaggedToWordForTagAndSmiBits(a), shift), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } else { // For pointer compressed Smis, we want to make sure that we truncate to // int32 before shifting, to avoid the values of the top 32-bits from // changing the sign bit of the smi. return BitcastWordToTaggedSigned(WordAnd( ChangeInt32ToIntPtr(Word32Sar( - TruncateWordToInt32(BitcastTaggedSignedToWord(a)), shift)), - BitcastTaggedSignedToWord(SmiConstant(-1)))); + TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), + shift)), + BitcastTaggedToWordForTagAndSmiBits(SmiConstant(-1)))); } } @@ -648,21 +702,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } } -#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode SmiOpName(TNode a, TNode b) { \ - if (kTaggedSize == kInt64Size) { \ - return IntPtrOpName(BitcastTaggedSignedToWord(a), \ - BitcastTaggedSignedToWord(b)); \ - } else { \ - DCHECK_EQ(kTaggedSize, kInt32Size); \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \ - } \ +#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode SmiOpName(TNode a, TNode b) { \ + if (kTaggedSize == kInt64Size) { \ + return IntPtrOpName(BitcastTaggedToWordForTagAndSmiBits(a), \ + BitcastTaggedToWordForTagAndSmiBits(b)); \ + } else { \ + DCHECK_EQ(kTaggedSize, kInt32Size); \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return Int32OpName( \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))); \ + } \ } SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) @@ -856,9 +911,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode TruncateIntPtrToInt32(SloppyTNode value); // Check a value for smi-ness - TNode TaggedIsSmi(SloppyTNode a); TNode TaggedIsSmi(TNode a); - TNode TaggedIsNotSmi(SloppyTNode a); + TNode TaggedIsSmi(SloppyTNode a) { + return TaggedIsSmi(UncheckedCast(a)); + } + TNode TaggedIsNotSmi(TNode a); + TNode TaggedIsNotSmi(SloppyTNode a) { + return TaggedIsNotSmi(UncheckedCast(a)); + } // Check that the value is a non-negative smi. TNode TaggedIsPositiveSmi(SloppyTNode a); // Check that a word has a word-aligned address. @@ -918,9 +978,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. void GotoIfForceSlowPath(Label* if_true); - // Branches to {if_true} when Debug::ExecutionMode is DebugInfo::kSideEffect. - void GotoIfDebugExecutionModeChecksSideEffects(Label* if_true); - // Load value from current parent frame by given offset in bytes. Node* LoadFromParentFrame(int offset, MachineType type = MachineType::AnyTagged()); @@ -1060,9 +1117,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsStringWrapperElementsKind(TNode map); void GotoIfMapHasSlowProperties(TNode map, Label* if_slow); - // Load the properties backing store of a JSObject. - TNode LoadSlowProperties(SloppyTNode object); - TNode LoadFastProperties(SloppyTNode object); + // Load the properties backing store of a JSReceiver. + TNode LoadSlowProperties(SloppyTNode object); + TNode LoadFastProperties(SloppyTNode object); // Load the elements backing store of a JSObject. TNode LoadElements(SloppyTNode object) { return LoadJSObjectElements(object); @@ -1148,10 +1205,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadStringLengthAsWord(SloppyTNode string); // Load length field of a String object as uint32_t value. TNode LoadStringLengthAsWord32(SloppyTNode string); - // Loads a pointer to the sequential String char array. - Node* PointerToSeqStringData(Node* seq_string); // Load value field of a JSPrimitiveWrapper object. - Node* LoadJSPrimitiveWrapperValue(Node* object); + TNode LoadJSPrimitiveWrapperValue(TNode object); // Figures out whether the value of maybe_object is: // - a SMI (jump to "if_smi", "extracted" will be the SMI value) @@ -1175,7 +1230,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsWeakOrCleared(TNode value); TNode IsCleared(TNode value); - TNode IsNotCleared(TNode value); + TNode IsNotCleared(TNode value) { + return Word32BinaryNot(IsCleared(value)); + } // Removes the weak bit + asserts it was set. TNode GetHeapObjectAssumeWeak(TNode value); @@ -1183,12 +1240,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode GetHeapObjectAssumeWeak(TNode value, Label* if_cleared); - TNode IsWeakReferenceTo(TNode object, - TNode value); - TNode IsNotWeakReferenceTo(TNode object, - TNode value); - TNode IsStrongReferenceTo(TNode object, - TNode value); + // Checks if |maybe_object| is a weak reference to given |heap_object|. + // Works for both any tagged |maybe_object| values. + TNode IsWeakReferenceTo(TNode maybe_object, + TNode heap_object); + // Returns true if the |object| is a HeapObject and |maybe_object| is a weak + // reference to |object|. + // The |maybe_object| must not be a Smi. + TNode IsWeakReferenceToObject(TNode maybe_object, + TNode object); TNode MakeWeak(TNode value); @@ -1341,9 +1401,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode elements_kind, Label* if_accessor, Label* if_hole); // Load a feedback slot from a FeedbackVector. + template TNode LoadFeedbackVectorSlot( - Node* object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + TNode feedback_vector, TNode slot, + int additional_offset = 0); TNode LoadFeedbackVectorLength(TNode); TNode LoadDoubleWithHoleCheck(TNode array, @@ -1383,13 +1444,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode BigIntFromInt32Pair(TNode low, TNode high); TNode BigIntFromUint32Pair(TNode low, TNode high); - void StoreJSTypedArrayElementFromTagged(TNode context, - TNode typed_array, - TNode index_node, - TNode value, - ElementsKind elements_kind); - // Context manipulation + TNode LoadContextHasExtensionField(SloppyTNode context); TNode LoadContextElement(SloppyTNode context, int slot_index); TNode LoadContextElement(SloppyTNode context, @@ -1608,10 +1664,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } void StoreFeedbackVectorSlot( - Node* object, Node* index, Node* value, + TNode feedback_vector, TNode slot, + TNode value, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + int additional_offset = 0); void EnsureArrayLengthWritable(TNode map, Label* bailout); @@ -1633,8 +1689,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value, Label* bailout); - void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address, - Node* value); + void StoreFieldsNoWriteBarrier(TNode start_address, + TNode end_address, + TNode value); Node* AllocateCellWithValue(Node* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); @@ -1642,7 +1699,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return AllocateCellWithValue(SmiConstant(value), SKIP_WRITE_BARRIER); } - Node* LoadCellValue(Node* cell); + TNode LoadCellValue(Node* cell); void StoreCellValue(Node* cell, Node* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); @@ -1698,11 +1755,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode parent, TNode offset); - // Allocate an appropriate one- or two-byte ConsString with the first and - // second parts specified by |left| and |right|. - TNode AllocateConsString(TNode length, TNode left, - TNode right); - TNode AllocateNameDictionary(int at_least_space_for); TNode AllocateNameDictionary( TNode at_least_space_for, AllocationFlags = kNone); @@ -1714,26 +1766,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler template Node* AllocateOrderedHashTable(); - // Builds code that finds OrderedHashTable entry for a key with hash code - // {hash} with using the comparison code generated by {key_compare}. The code - // jumps to {entry_found} if the key is found, or to {not_found} if the key - // was not found. In the {entry_found} branch, the variable - // entry_start_position will be bound to the index of the entry (relative to - // OrderedHashTable::kHashTableStartIndex). - // - // The {CollectionType} template parameter stands for the particular instance - // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. - template - void FindOrderedHashTableEntry( - Node* table, Node* hash, - const std::function, Label*, Label*)>& key_compare, - Variable* entry_start_position, Label* entry_found, Label* not_found); - template TNode AllocateSmallOrderedHashTable(TNode capacity); Node* AllocateStruct(Node* map, AllocationFlags flags = kNone); - void InitializeStructBody(Node* object, Node* map, Node* size, + void InitializeStructBody(TNode object, TNode size, int start_offset = Struct::kHeaderSize); TNode AllocateJSObjectFromMap( @@ -1742,14 +1779,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SlackTrackingMode slack_tracking_mode = kNoSlackTracking); void InitializeJSObjectFromMap( - Node* object, Node* map, Node* instance_size, Node* properties = nullptr, + SloppyTNode object, SloppyTNode map, + SloppyTNode instance_size, Node* properties = nullptr, Node* elements = nullptr, SlackTrackingMode slack_tracking_mode = kNoSlackTracking); - void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map, - Node* instance_size); + void InitializeJSObjectBodyWithSlackTracking( + SloppyTNode object, SloppyTNode map, + SloppyTNode instance_size); void InitializeJSObjectBodyNoSlackTracking( - Node* object, Node* map, Node* instance_size, + SloppyTNode object, SloppyTNode map, + SloppyTNode instance_size, int start_offset = JSObject::kHeaderSize); TNode IsValidFastJSArrayCapacity(Node* capacity, @@ -1762,7 +1802,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::pair, TNode> AllocateUninitializedJSArrayWithElements( ElementsKind kind, TNode array_map, TNode length, - Node* allocation_site, Node* capacity, + TNode allocation_site, Node* capacity, ParameterMode capacity_mode = INTPTR_PARAMETERS, AllocationFlags allocation_flags = kNone, int array_header_size = JSArray::kSize); @@ -1771,20 +1811,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // The ParameterMode argument is only used for the capacity parameter. TNode AllocateJSArray( ElementsKind kind, TNode array_map, Node* capacity, - TNode length, Node* allocation_site = nullptr, + TNode length, TNode allocation_site = {}, ParameterMode capacity_mode = INTPTR_PARAMETERS, AllocationFlags allocation_flags = kNone); TNode AllocateJSArray(ElementsKind kind, TNode array_map, TNode capacity, TNode length) { - return AllocateJSArray(kind, array_map, capacity, length, nullptr, + return AllocateJSArray(kind, array_map, capacity, length, {}, SMI_PARAMETERS); } TNode AllocateJSArray(ElementsKind kind, TNode array_map, TNode capacity, TNode length, AllocationFlags allocation_flags = kNone) { - return AllocateJSArray(kind, array_map, capacity, length, nullptr, + return AllocateJSArray(kind, array_map, capacity, length, {}, INTPTR_PARAMETERS, allocation_flags); } @@ -1792,7 +1832,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode AllocateJSArray(TNode array_map, TNode elements, TNode length, - Node* allocation_site = nullptr, + TNode allocation_site = {}, int array_header_size = JSArray::kSize); enum class HoleConversionMode { kDontConvert, kConvertToUndefined }; @@ -1806,15 +1846,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // If |convert_holes| is set kDontConvert, holes are also copied to the // resulting array, who will have the same elements kind as |array|. The // function generates significantly less code in this case. - Node* CloneFastJSArray( - Node* context, Node* array, ParameterMode mode = INTPTR_PARAMETERS, - Node* allocation_site = nullptr, + TNode CloneFastJSArray( + TNode context, TNode array, + ParameterMode mode = INTPTR_PARAMETERS, + TNode allocation_site = {}, HoleConversionMode convert_holes = HoleConversionMode::kDontConvert); - Node* ExtractFastJSArray(Node* context, Node* array, Node* begin, Node* count, + Node* ExtractFastJSArray(TNode context, TNode array, + Node* begin, Node* count, ParameterMode mode = INTPTR_PARAMETERS, Node* capacity = nullptr, - Node* allocation_site = nullptr); + TNode allocation_site = {}); TNode AllocateFixedArray( ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS, @@ -1828,6 +1870,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler fixed_array_map); } + TNode GetCreationContext(TNode receiver, + Label* if_bailout); + TNode GetConstructor(TNode map); + TNode GetStructMap(InstanceType instance_type); TNode AllocateUninitializedFixedArray(intptr_t capacity) { @@ -1879,10 +1925,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode object, IterationKind mode); + // TODO(v8:9722): Return type should be JSIteratorResult TNode AllocateJSIteratorResult(SloppyTNode context, SloppyTNode value, SloppyTNode done); - Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value); + + // TODO(v8:9722): Return type should be JSIteratorResult + TNode AllocateJSIteratorResultForEntry(TNode context, + TNode key, + SloppyTNode value); TNode ArraySpeciesCreate(TNode context, TNode originalArray, @@ -1904,6 +1955,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler enum class DestroySource { kNo, kYes }; + // Collect the callable |maybe_target| feedback for either a CALL_IC or + // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|. + void CollectCallableFeedback(TNode maybe_target, + TNode context, + TNode feedback_vector, + TNode slot_id); + + // Collect CALL_IC feedback for |maybe_target| function in the + // |feedback_vector| at |slot_id|, and the call counts in + // the |feedback_vector| at |slot_id+1|. + void CollectCallFeedback(TNode maybe_target, TNode context, + TNode maybe_feedback_vector, + TNode slot_id); + + // Increment the call count for a CALL_IC or construct call. + // The call count is located at feedback_vector[slot_id + 1]. + void IncrementCallCount(TNode feedback_vector, + TNode slot_id); + // Specify DestroySource::kYes if {from_array} is being supplanted by // {to_array}. This offers a slight performance benefit by simply copying the // array word by word. The source may be destroyed at the end of this macro. @@ -2152,27 +2222,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // kAllFixedArrays, the generated code is more compact and efficient if the // caller can specify whether only FixedArrays or FixedDoubleArrays will be // passed as the |source| parameter. - Node* CloneFixedArray(Node* source, - ExtractFixedArrayFlags flags = - ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) { + TNode CloneFixedArray( + TNode source, + ExtractFixedArrayFlags flags = + ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) { ParameterMode mode = OptimalParameterMode(); return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr, nullptr, flags, mode); } - // Copies |character_count| elements from |from_string| to |to_string| - // starting at the |from_index|'th character. |from_string| and |to_string| - // can either be one-byte strings or two-byte strings, although if - // |from_string| is two-byte, then |to_string| must be two-byte. - // |from_index|, |to_index| and |character_count| must be intptr_ts s.t. 0 <= - // |from_index| <= |from_index| + |character_count| <= from_string.length and - // 0 <= |to_index| <= |to_index| + |character_count| <= to_string.length. - void CopyStringCharacters(Node* from_string, Node* to_string, - TNode from_index, TNode to_index, - TNode character_count, - String::Encoding from_encoding, - String::Encoding to_encoding); - // Loads an element from |array| of |from_kind| elements by given |offset| // (NOTE: not index!), does a hole check if |if_hole| is provided and // converts the value so that it becomes ready for storing to array of @@ -2194,21 +2252,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Tries to grow the |elements| array of given |object| to store the |key| // or bails out if the growing gap is too big. Returns new elements. - Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, - Node* key, Label* bailout); + TNode TryGrowElementsCapacity(Node* object, Node* elements, + ElementsKind kind, Node* key, + Label* bailout); // Tries to grow the |capacity|-length |elements| array of given |object| // to store the |key| or bails out if the growing gap is too big. Returns // new elements. - Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, - Node* key, Node* capacity, ParameterMode mode, - Label* bailout); + TNode TryGrowElementsCapacity(Node* object, Node* elements, + ElementsKind kind, Node* key, + Node* capacity, + ParameterMode mode, + Label* bailout); // Grows elements capacity of given object. Returns new elements. - Node* GrowElementsCapacity(Node* object, Node* elements, - ElementsKind from_kind, ElementsKind to_kind, - Node* capacity, Node* new_capacity, - ParameterMode mode, Label* bailout); + TNode GrowElementsCapacity(Node* object, Node* elements, + ElementsKind from_kind, + ElementsKind to_kind, + Node* capacity, Node* new_capacity, + ParameterMode mode, + Label* bailout); // Given a need to grow by |growth|, allocate an appropriate new capacity // if necessary, and return a new elements FixedArray object. Label |bailout| @@ -2223,25 +2286,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* base_allocation_size, Node* allocation_site); - Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber); - Node* TruncateTaggedToFloat64(Node* context, Node* value); - Node* TruncateTaggedToWord32(Node* context, Node* value); - void TaggedToWord32OrBigInt(Node* context, Node* value, Label* if_number, - Variable* var_word32, Label* if_bigint, - Variable* var_bigint); - void TaggedToWord32OrBigIntWithFeedback( - Node* context, Node* value, Label* if_number, Variable* var_word32, - Label* if_bigint, Variable* var_bigint, Variable* var_feedback); + TNode TryTaggedToFloat64(TNode value, + Label* if_valueisnotnumber); + TNode TruncateTaggedToFloat64(SloppyTNode context, + SloppyTNode value); + TNode TruncateTaggedToWord32(SloppyTNode context, + SloppyTNode value); + void TaggedToWord32OrBigInt(TNode context, TNode value, + Label* if_number, TVariable* var_word32, + Label* if_bigint, + TVariable* var_maybe_bigint); + void TaggedToWord32OrBigIntWithFeedback(TNode context, + TNode value, Label* if_number, + TVariable* var_word32, + Label* if_bigint, + TVariable* var_maybe_bigint, + TVariable* var_feedback); // Truncate the floating point value of a HeapNumber to an Int32. TNode TruncateHeapNumberValueToWord32(TNode object); // Conversions. - void TryHeapNumberToSmi(TNode number, - TVariable& output, // NOLINT(runtime/references) + void TryHeapNumberToSmi(TNode number, TVariable* output, Label* if_smi); - void TryFloat64ToSmi(TNode number, - TVariable& output, // NOLINT(runtime/references) + void TryFloat64ToSmi(TNode number, TVariable* output, Label* if_smi); TNode ChangeFloat64ToTagged(SloppyTNode value); TNode ChangeInt32ToTagged(SloppyTNode value); @@ -2377,7 +2445,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsAllocationSiteInstanceType(SloppyTNode instance_type); TNode IsJSFunctionMap(SloppyTNode map); TNode IsJSFunction(SloppyTNode object); - TNode IsJSGeneratorObject(SloppyTNode object); + TNode IsJSGeneratorObject(TNode object); TNode IsJSGlobalProxyInstanceType(SloppyTNode instance_type); TNode IsJSGlobalProxyMap(SloppyTNode map); TNode IsJSGlobalProxy(SloppyTNode object); @@ -2388,6 +2456,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSPromise(SloppyTNode object); TNode IsJSProxy(SloppyTNode object); TNode IsJSStringIterator(SloppyTNode object); + TNode IsJSRegExpStringIterator(SloppyTNode object); TNode IsJSReceiverInstanceType(SloppyTNode instance_type); TNode IsJSReceiverMap(SloppyTNode map); TNode IsJSReceiver(SloppyTNode object); @@ -2395,6 +2464,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSTypedArrayInstanceType(SloppyTNode instance_type); TNode IsJSTypedArrayMap(SloppyTNode map); TNode IsJSTypedArray(SloppyTNode object); + TNode IsJSGeneratorMap(TNode map); TNode IsJSPrimitiveWrapperInstanceType( SloppyTNode instance_type); TNode IsJSPrimitiveWrapperMap(SloppyTNode map); @@ -2537,47 +2607,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Return the single character string with only {code}. TNode StringFromSingleCharCode(TNode code); - // Return a new string object which holds a substring containing the range - // [from,to[ of string. - TNode SubString(TNode string, TNode from, - TNode to); - - // Return a new string object produced by concatenating |first| with |second|. - TNode StringAdd(Node* context, TNode first, - TNode second); - - // Check if |string| is an indirect (thin or flat cons) string type that can - // be dereferenced by DerefIndirectString. - void BranchIfCanDerefIndirectString(TNode string, - TNode instance_type, - Label* can_deref, Label* cannot_deref); - // Unpack an indirect (thin or flat cons) string type. - void DerefIndirectString(TVariable* var_string, - TNode instance_type); - // Check if |var_string| has an indirect (thin or flat cons) string type, - // and unpack it if so. - void MaybeDerefIndirectString(TVariable* var_string, - TNode instance_type, Label* did_deref, - Label* cannot_deref); - // Check if |var_left| or |var_right| has an indirect (thin or flat cons) - // string type, and unpack it/them if so. Fall through if nothing was done. - void MaybeDerefIndirectStrings(TVariable* var_left, - TNode left_instance_type, - TVariable* var_right, - TNode right_instance_type, - Label* did_something); - TNode DerefIndirectString(TNode string, - TNode instance_type, - Label* cannot_deref); - - TNode StringFromSingleUTF16EncodedCodePoint(TNode codepoint); - // Type conversion helpers. enum class BigIntHandling { kConvertToNumber, kThrow }; // Convert a String to a Number. TNode StringToNumber(TNode input); // Convert a Number to a String. TNode NumberToString(TNode input); + TNode NumberToString(TNode input, Label* bailout); + // Convert a Non-Number object to a Number. TNode NonNumberToNumber( SloppyTNode context, SloppyTNode input, @@ -2715,6 +2752,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return Word32Equal(Word32And(word32, const_mask), const_mask); } + // Returns true if the bit field |BitField| in |word32| is equal to a given. + // constant |value|. Avoids a shift compared to using DecodeWord32. + template + TNode IsEqualInWord32(TNode word32, + typename BitField::FieldType value) { + TNode masked_word32 = + Word32And(word32, Int32Constant(BitField::kMask)); + return Word32Equal(masked_word32, Int32Constant(BitField::encode(value))); + } + // Returns true if any of the |T|'s bits in given |word| are set. template TNode IsSetWord(SloppyTNode word) { @@ -2730,9 +2777,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Smi-encoding of the mask is performed implicitly! TNode IsSetSmi(SloppyTNode smi, int untagged_mask) { intptr_t mask_word = bit_cast(Smi::FromInt(untagged_mask)); - return WordNotEqual( - WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)), - IntPtrConstant(0)); + return WordNotEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(smi), + IntPtrConstant(mask_word)), + IntPtrConstant(0)); } // Returns true if all of the |T|'s bits in given |word32| are clear. @@ -2762,11 +2809,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void IncrementCounter(StatsCounter* counter, int delta); void DecrementCounter(StatsCounter* counter, int delta); - void Increment(Variable* variable, int value = 1, - ParameterMode mode = INTPTR_PARAMETERS); - void Decrement(Variable* variable, int value = 1, - ParameterMode mode = INTPTR_PARAMETERS) { - Increment(variable, -value, mode); + template + void Increment(TVariable* variable, int value = 1); + + template + void Decrement(TVariable* variable, int value = 1) { + Increment(variable, -value); } // Generates "if (false) goto label" code. Useful for marking a label as @@ -2780,8 +2828,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Note: If |key| does not yet have a hash, |if_notinternalized| will be taken // even if |key| is an array index. |if_keyisunique| will never // be taken for array indices. - void TryToName(Node* key, Label* if_keyisindex, Variable* var_index, - Label* if_keyisunique, Variable* var_unique, Label* if_bailout, + void TryToName(SloppyTNode key, Label* if_keyisindex, + TVariable* var_index, Label* if_keyisunique, + TVariable* var_unique, Label* if_bailout, Label* if_notinternalized = nullptr); // Performs a hash computation and string table lookup for the given string, @@ -2793,8 +2842,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // - |if_not_internalized| if the string is not in the string table (but // does not add it). // - |if_bailout| for unsupported cases (e.g. uncachable array index). - void TryInternalizeString(Node* string, Label* if_index, Variable* var_index, - Label* if_internalized, Variable* var_internalized, + void TryInternalizeString(SloppyTNode string, Label* if_index, + TVariable* var_index, + Label* if_internalized, + TVariable* var_internalized, Label* if_not_internalized, Label* if_bailout); // Calculates array index for given dictionary entry and entry field. @@ -2938,10 +2989,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode BasicLoadNumberDictionaryElement( TNode dictionary, TNode intptr_index, Label* not_data, Label* if_hole); - void BasicStoreNumberDictionaryElement(TNode dictionary, - TNode intptr_index, - TNode value, Label* not_data, - Label* if_hole, Label* read_only); template void FindInsertionEntry(TNode dictionary, TNode key, @@ -3053,7 +3100,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // // Note: this code does not check if the global dictionary points to deleted // entry! This has to be done by the caller. - void TryLookupProperty(SloppyTNode object, SloppyTNode map, + void TryLookupProperty(SloppyTNode object, SloppyTNode map, SloppyTNode instance_type, SloppyTNode unique_name, Label* if_found_fast, Label* if_found_dict, Label* if_found_global, @@ -3113,10 +3160,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Returns true if {object} has {prototype} somewhere in it's prototype // chain, otherwise false is returned. Might cause arbitrary side effects // due to [[GetPrototypeOf]] invocations. - Node* HasInPrototypeChain(Node* context, Node* object, - SloppyTNode prototype); + TNode HasInPrototypeChain(TNode context, + TNode object, + TNode prototype); // ES6 section 7.3.19 OrdinaryHasInstance (C, O) - Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object); + TNode OrdinaryHasInstance(TNode context, + TNode callable, + TNode object); // Load type feedback vector from the stub caller's frame. TNode LoadFeedbackVectorForStub(); @@ -3137,12 +3187,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SloppyTNode closure); // Update the type feedback vector. - void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id); + void UpdateFeedback(TNode feedback, + TNode maybe_feedback_vector, + TNode slot_id); // Report that there was a feedback update, performing any tasks that should // be done after a feedback update. - void ReportFeedbackUpdate(SloppyTNode feedback_vector, - SloppyTNode slot_id, const char* reason); + void ReportFeedbackUpdate(TNode feedback_vector, + SloppyTNode slot_id, const char* reason); // Combine the new feedback with the existing_feedback. Do nothing if // existing_feedback is nullptr. @@ -3185,8 +3237,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadScriptContext(TNode context, TNode context_index); - Node* Int32ToUint8Clamped(Node* int32_value); - Node* Float64ToUint8Clamped(Node* float64_value); + TNode Int32ToUint8Clamped(TNode int32_value); + TNode Float64ToUint8Clamped(TNode float64_value); Node* PrepareValueForWriteToTypedArray(TNode input, ElementsKind elements_kind, @@ -3229,13 +3281,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Store a weak in-place reference into the FeedbackVector. TNode StoreWeakReferenceInFeedbackVector( - SloppyTNode feedback_vector, Node* slot, - SloppyTNode value, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + TNode feedback_vector, TNode slot, + TNode value, int additional_offset = 0); // Create a new AllocationSite and install it into a feedback vector. TNode CreateAllocationSiteInFeedbackVector( - SloppyTNode feedback_vector, TNode slot); + TNode feedback_vector, TNode slot); // TODO(ishell, cbruni): Change to HasBoilerplate. TNode NotHasBoilerplate(TNode maybe_literal_site); @@ -3245,19 +3296,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler enum class IndexAdvanceMode { kPre, kPost }; - using FastLoopBody = std::function; + template + using FastLoopBody = std::function index)>; - Node* BuildFastLoop(const VariableList& var_list, Node* start_index, - Node* end_index, const FastLoopBody& body, int increment, - ParameterMode parameter_mode, - IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); + template + TNode BuildFastLoop( + const VariableList& var_list, TNode start_index, + TNode end_index, const FastLoopBody& body, int increment, + IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); - Node* BuildFastLoop(Node* start_index, Node* end_index, - const FastLoopBody& body, int increment, - ParameterMode parameter_mode, - IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { + template + TNode BuildFastLoop( + TNode start_index, TNode end_index, + const FastLoopBody& body, int increment, + IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body, - increment, parameter_mode, advance_mode); + increment, advance_mode); } enum class ForEachDirection { kForward, kReverse }; @@ -3304,13 +3358,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Label* doesnt_fit, int base_size, ParameterMode mode); - void InitializeFieldsWithRoot(Node* object, Node* start_offset, - Node* end_offset, RootIndex root); + void InitializeFieldsWithRoot(TNode object, + TNode start_offset, + TNode end_offset, RootIndex root); - Node* RelationalComparison(Operation op, SloppyTNode left, - SloppyTNode right, - SloppyTNode context, - Variable* var_type_feedback = nullptr); + TNode RelationalComparison( + Operation op, TNode left, TNode right, + TNode context, TVariable* var_type_feedback = nullptr); void BranchIfNumberRelationalComparison(Operation op, SloppyTNode left, @@ -3360,12 +3414,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void GotoIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_false); - Node* Equal(SloppyTNode lhs, SloppyTNode rhs, - SloppyTNode context, - Variable* var_type_feedback = nullptr); + TNode Equal(SloppyTNode lhs, SloppyTNode rhs, + SloppyTNode context, + TVariable* var_type_feedback = nullptr); TNode StrictEqual(SloppyTNode lhs, SloppyTNode rhs, - Variable* var_type_feedback = nullptr); + TVariable* var_type_feedback = nullptr); // ECMA#sec-samevalue // Similar to StrictEqual except that NaNs are treated as equal and minus zero @@ -3395,16 +3449,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler HasPropertyLookupMode::kHasProperty); } - Node* Typeof(Node* value); + TNode Typeof(SloppyTNode value); - TNode GetSuperConstructor(SloppyTNode context, - SloppyTNode active_function); + TNode GetSuperConstructor(TNode context, + TNode active_function); TNode SpeciesConstructor( SloppyTNode context, SloppyTNode object, SloppyTNode default_constructor); - Node* InstanceOf(Node* object, Node* callable, Node* context); + TNode InstanceOf(TNode object, TNode callable, + TNode context); // Debug helpers Node* IsDebugActive(); @@ -3431,8 +3486,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // JSTypedArray helpers TNode LoadJSTypedArrayLength(TNode typed_array); - TNode LoadJSTypedArrayBackingStore(TNode typed_array); + TNode LoadJSTypedArrayDataPtr(TNode typed_array); + template + TNode ElementOffsetFromIndex(TNode index, ElementsKind kind, + int base_size = 0); + // TODO(v8:9708): remove once all uses are ported. TNode ElementOffsetFromIndex(Node* index, ElementsKind kind, ParameterMode mode, int base_size = 0); @@ -3451,8 +3510,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SloppyTNode shared_info, Label* if_compile_lazy = nullptr); - Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info, - Node* context); + TNode AllocateFunctionWithMapAndContext( + TNode map, TNode shared_info, + TNode context); // Promise helpers Node* IsPromiseHookEnabled(); @@ -3463,7 +3523,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // for..in helpers void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, Label* if_fast, Label* if_slow); - Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime); + TNode CheckEnumCache(TNode receiver, Label* if_empty, + Label* if_runtime); TNode GetArgumentValue(TorqueStructArguments args, TNode index); @@ -3620,11 +3681,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* receiver, Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetter); - TNode TryToIntptr(Node* key, Label* miss); - - void BranchIfPrototypesHaveNoElements(Node* receiver_map, - Label* definitely_no_elements, - Label* possibly_elements); + TNode TryToIntptr(SloppyTNode key, Label* miss); void InitializeFunctionContext(Node* native_context, Node* context, int slots); @@ -3655,13 +3712,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Allocate and return a JSArray of given total size in bytes with header // fields initialized. - TNode AllocateUninitializedJSArray(TNode array_map, - TNode length, - Node* allocation_site, - TNode size_in_bytes); + TNode AllocateUninitializedJSArray( + TNode array_map, TNode length, + TNode allocation_site, TNode size_in_bytes); TNode IsValidSmi(TNode smi); - Node* SmiShiftBitsConstant(); + + TNode SmiShiftBitsConstant() { + return IntPtrConstant(kSmiShiftSize + kSmiTagSize); + } + TNode SmiShiftBitsConstant32() { + return Int32Constant(kSmiShiftSize + kSmiTagSize); + } // Emits keyed sloppy arguments load if the |value| is nullptr or store // otherwise. Returns either the loaded value or |value|. @@ -3689,10 +3751,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void GenerateEqual_Same(SloppyTNode value, Label* if_equal, Label* if_notequal, Variable* var_type_feedback = nullptr); - TNode AllocAndCopyStringCharacters(Node* from, - Node* from_instance_type, - TNode from_index, - TNode character_count); static const int kElementLoopUnrollThreshold = 8; @@ -3705,11 +3763,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Variable* var_numeric, Variable* var_feedback); template - void TaggedToWord32OrBigIntImpl(Node* context, Node* value, Label* if_number, - Variable* var_word32, + void TaggedToWord32OrBigIntImpl(TNode context, TNode value, + Label* if_number, + TVariable* var_word32, Label* if_bigint = nullptr, - Variable* var_bigint = nullptr, - Variable* var_feedback = nullptr); + TVariable* var_maybe_bigint = nullptr, + TVariable* var_feedback = nullptr); private: // Low-level accessors for Descriptor arrays. @@ -3727,36 +3786,48 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } }; +// template class V8_EXPORT_PRIVATE CodeStubArguments { public: using Node = compiler::Node; - template - using TNode = compiler::TNode; - template - using SloppyTNode = compiler::SloppyTNode; enum ReceiverMode { kHasReceiver, kNoReceiver }; - // |argc| is an intptr value which specifies the number of arguments passed - // to the builtin excluding the receiver. The arguments will include a - // receiver iff |receiver_mode| is kHasReceiver. - CodeStubArguments(CodeStubAssembler* assembler, Node* argc, + // |argc| specifies the number of arguments passed to the builtin excluding + // the receiver. The arguments will include a receiver iff |receiver_mode| + // is kHasReceiver. + CodeStubArguments(CodeStubAssembler* assembler, TNode argc, ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) - : CodeStubArguments(assembler, argc, nullptr, - CodeStubAssembler::INTPTR_PARAMETERS, receiver_mode) { - } + : CodeStubArguments(assembler, argc, TNode(), receiver_mode) {} + + CodeStubArguments(CodeStubAssembler* assembler, TNode argc, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) + : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc), + TNode(), receiver_mode) {} - // |argc| is either a smi or intptr depending on |param_mode|. The arguments - // include a receiver iff |receiver_mode| is kHasReceiver. - CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp, - CodeStubAssembler::ParameterMode param_mode, + // TODO(v8:9708): Consider removing this variant + CodeStubArguments(CodeStubAssembler* assembler, TNode argc, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) + : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), + TNode(), receiver_mode) {} + + // |argc| specifies the number of arguments passed to the builtin excluding + // the receiver. The arguments will include a receiver iff |receiver_mode| + // is kHasReceiver. + CodeStubArguments(CodeStubAssembler* assembler, TNode argc, + TNode fp, ReceiverMode receiver_mode = ReceiverMode::kHasReceiver); + CodeStubArguments(CodeStubAssembler* assembler, TNode argc, + TNode fp, + ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) + : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp, + receiver_mode) {} + // Used by Torque to construct arguments based on a Torque-defined // struct of values. CodeStubArguments(CodeStubAssembler* assembler, TorqueStructArguments torque_arguments) : assembler_(assembler), - argc_mode_(CodeStubAssembler::INTPTR_PARAMETERS), receiver_mode_(ReceiverMode::kHasReceiver), argc_(torque_arguments.length), base_(torque_arguments.base), @@ -3769,14 +3840,17 @@ class V8_EXPORT_PRIVATE CodeStubArguments { void SetReceiver(TNode object) const; // Computes address of the index'th argument. - TNode AtIndexPtr(Node* index, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) const; + TNode AtIndexPtr(TNode index) const; + TNode AtIndexPtr(TNode index) const { + return AtIndexPtr(assembler_->ParameterToIntPtr(index)); + } // |index| is zero-based and does not include the receiver - TNode AtIndex(Node* index, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) const; + TNode AtIndex(TNode index) const; + // TODO(v8:9708): Consider removing this variant + TNode AtIndex(TNode index) const { + return AtIndex(assembler_->ParameterToIntPtr(index)); + } TNode AtIndex(int index) const; @@ -3786,15 +3860,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments { TNode GetOptionalArgumentValue(int index, TNode default_value); - Node* GetLength(CodeStubAssembler::ParameterMode mode) const { - DCHECK_EQ(mode, argc_mode_); - return argc_; - } + TNode GetLength() const { return argc_; } TorqueStructArguments GetTorqueArguments() const { - DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); - return TorqueStructArguments{assembler_->UncheckedCast(fp_), base_, - assembler_->UncheckedCast(argc_)}; + return TorqueStructArguments{fp_, base_, argc_}; } TNode GetOptionalArgumentValue(TNode index) { @@ -3802,28 +3871,32 @@ class V8_EXPORT_PRIVATE CodeStubArguments { } TNode GetOptionalArgumentValue(TNode index, TNode default_value); - TNode GetLength() const { - DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS); - return assembler_->UncheckedCast(argc_); - } - using ForEachBodyFunction = std::function; + using ForEachBodyFunction = std::function arg)>; // Iteration doesn't include the receiver. |first| and |last| are zero-based. - void ForEach(const ForEachBodyFunction& body, Node* first = nullptr, - Node* last = nullptr, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS) { + template + void ForEach(const ForEachBodyFunction& body, TNode first = {}, + TNode last = {}) const { CodeStubAssembler::VariableList list(0, assembler_->zone()); ForEach(list, body, first, last); } // Iteration doesn't include the receiver. |first| and |last| are zero-based. void ForEach(const CodeStubAssembler::VariableList& vars, - const ForEachBodyFunction& body, Node* first = nullptr, - Node* last = nullptr, - CodeStubAssembler::ParameterMode mode = - CodeStubAssembler::INTPTR_PARAMETERS); + const ForEachBodyFunction& body, TNode first = {}, + TNode last = {}) const; + + void ForEach(const CodeStubAssembler::VariableList& vars, + const ForEachBodyFunction& body, TNode first, + TNode last = {}) const { + TNode first_intptr = assembler_->ParameterToIntPtr(first); + TNode last_intptr; + if (last != nullptr) { + last_intptr = assembler_->ParameterToIntPtr(last); + } + return ForEach(vars, body, first_intptr, last_intptr); + } void PopAndReturn(Node* value); @@ -3831,11 +3904,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments { Node* GetArguments(); CodeStubAssembler* assembler_; - CodeStubAssembler::ParameterMode argc_mode_; ReceiverMode receiver_mode_; - Node* argc_; + TNode argc_; TNode base_; - Node* fp_; + TNode fp_; }; class ToDirectStringAssembler : public CodeStubAssembler { diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc index 6e9613005e71a5..ef3d83a06eb88a 100644 --- a/deps/v8/src/codegen/compilation-cache.cc +++ b/deps/v8/src/codegen/compilation-cache.cc @@ -28,7 +28,7 @@ CompilationCache::CompilationCache(Isolate* isolate) eval_global_(isolate), eval_contextual_(isolate), reg_exp_(isolate, kRegExpGenerations), - enabled_(true) { + enabled_script_and_eval_(true) { CompilationSubCache* subcaches[kSubCacheCount] = { &script_, &eval_global_, &eval_contextual_, ®_exp_}; for (int i = 0; i < kSubCacheCount; ++i) { @@ -254,7 +254,7 @@ void CompilationCacheRegExp::Put(Handle source, JSRegExp::Flags flags, } void CompilationCache::Remove(Handle function_info) { - if (!IsEnabled()) return; + if (!IsEnabledScriptAndEval()) return; eval_global_.Remove(function_info); eval_contextual_.Remove(function_info); @@ -265,7 +265,7 @@ MaybeHandle CompilationCache::LookupScript( Handle source, MaybeHandle name, int line_offset, int column_offset, ScriptOriginOptions resource_options, Handle native_context, LanguageMode language_mode) { - if (!IsEnabled()) return MaybeHandle(); + if (!IsEnabledScriptAndEval()) return MaybeHandle(); return script_.Lookup(source, name, line_offset, column_offset, resource_options, native_context, language_mode); @@ -277,7 +277,7 @@ InfoCellPair CompilationCache::LookupEval(Handle source, LanguageMode language_mode, int position) { InfoCellPair result; - if (!IsEnabled()) return result; + if (!IsEnabledScriptAndEval()) return result; const char* cache_type; @@ -303,8 +303,6 @@ InfoCellPair CompilationCache::LookupEval(Handle source, MaybeHandle CompilationCache::LookupRegExp(Handle source, JSRegExp::Flags flags) { - if (!IsEnabled()) return MaybeHandle(); - return reg_exp_.Lookup(source, flags); } @@ -312,7 +310,7 @@ void CompilationCache::PutScript(Handle source, Handle native_context, LanguageMode language_mode, Handle function_info) { - if (!IsEnabled()) return; + if (!IsEnabledScriptAndEval()) return; LOG(isolate(), CompilationCacheEvent("put", "script", *function_info)); script_.Put(source, native_context, language_mode, function_info); @@ -324,7 +322,7 @@ void CompilationCache::PutEval(Handle source, Handle function_info, Handle feedback_cell, int position) { - if (!IsEnabled()) return; + if (!IsEnabledScriptAndEval()) return; const char* cache_type; HandleScope scope(isolate()); @@ -344,8 +342,6 @@ void CompilationCache::PutEval(Handle source, void CompilationCache::PutRegExp(Handle source, JSRegExp::Flags flags, Handle data) { - if (!IsEnabled()) return; - reg_exp_.Put(source, flags, data); } @@ -367,10 +363,12 @@ void CompilationCache::MarkCompactPrologue() { } } -void CompilationCache::Enable() { enabled_ = true; } +void CompilationCache::EnableScriptAndEval() { + enabled_script_and_eval_ = true; +} -void CompilationCache::Disable() { - enabled_ = false; +void CompilationCache::DisableScriptAndEval() { + enabled_script_and_eval_ = false; Clear(); } diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h index 35595b19858510..04bea44a82b36e 100644 --- a/deps/v8/src/codegen/compilation-cache.h +++ b/deps/v8/src/codegen/compilation-cache.h @@ -202,9 +202,14 @@ class V8_EXPORT_PRIVATE CompilationCache { void MarkCompactPrologue(); // Enable/disable compilation cache. Used by debugger to disable compilation - // cache during debugging to make sure new scripts are always compiled. - void Enable(); - void Disable(); + // cache during debugging so that eval and new scripts are always compiled. + // TODO(bmeurer, chromium:992277): The RegExp cache cannot be enabled and/or + // disabled, since it doesn't affect debugging. However ideally the other + // caches should also be always on, even in the presence of the debugger, + // but at this point there are too many unclear invariants, and so I decided + // to just fix the pressing performance problem for RegExp individually first. + void EnableScriptAndEval(); + void DisableScriptAndEval(); private: explicit CompilationCache(Isolate* isolate); @@ -215,7 +220,9 @@ class V8_EXPORT_PRIVATE CompilationCache { // The number of sub caches covering the different types to cache. static const int kSubCacheCount = 4; - bool IsEnabled() const { return FLAG_compilation_cache && enabled_; } + bool IsEnabledScriptAndEval() const { + return FLAG_compilation_cache && enabled_script_and_eval_; + } Isolate* isolate() const { return isolate_; } @@ -227,8 +234,8 @@ class V8_EXPORT_PRIVATE CompilationCache { CompilationCacheRegExp reg_exp_; CompilationSubCache* subcaches_[kSubCacheCount]; - // Current enable state of the compilation cache. - bool enabled_; + // Current enable state of the compilation cache for scripts and eval. + bool enabled_script_and_eval_; friend class Isolate; diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index fbd181f5c8eeb1..d73be13a30a7d6 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -666,21 +666,25 @@ V8_WARN_UNUSED_RESULT MaybeHandle GetCodeFromOptimizedCodeCache( function->GetIsolate(), RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap); Handle shared(function->shared(), function->GetIsolate()); + Isolate* isolate = function->GetIsolate(); DisallowHeapAllocation no_gc; - if (osr_offset.IsNone()) { - if (function->has_feedback_vector()) { - FeedbackVector feedback_vector = function->feedback_vector(); - feedback_vector.EvictOptimizedCodeMarkedForDeoptimization( - function->shared(), "GetCodeFromOptimizedCodeCache"); - Code code = feedback_vector.optimized_code(); - - if (!code.is_null()) { - // Caching of optimized code enabled and optimized code found. - DCHECK(!code.marked_for_deoptimization()); - DCHECK(function->shared().is_compiled()); - return Handle(code, feedback_vector.GetIsolate()); - } - } + Code code; + if (osr_offset.IsNone() && function->has_feedback_vector()) { + FeedbackVector feedback_vector = function->feedback_vector(); + feedback_vector.EvictOptimizedCodeMarkedForDeoptimization( + function->shared(), "GetCodeFromOptimizedCodeCache"); + code = feedback_vector.optimized_code(); + } else if (!osr_offset.IsNone()) { + code = function->context() + .native_context() + .GetOSROptimizedCodeCache() + .GetOptimizedCode(shared, osr_offset, isolate); + } + if (!code.is_null()) { + // Caching of optimized code enabled and optimized code found. + DCHECK(!code.marked_for_deoptimization()); + DCHECK(function->shared().is_compiled()); + return Handle(code, isolate); } return MaybeHandle(); } @@ -711,12 +715,15 @@ void InsertCodeIntoOptimizedCodeCache( // Cache optimized context-specific code. Handle function = compilation_info->closure(); Handle shared(function->shared(), function->GetIsolate()); - Handle native_context(function->context().native_context(), - function->GetIsolate()); + Handle native_context(function->context().native_context(), + function->GetIsolate()); if (compilation_info->osr_offset().IsNone()) { Handle vector = handle(function->feedback_vector(), function->GetIsolate()); FeedbackVector::SetOptimizedCode(vector, code); + } else { + OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code, + compilation_info->osr_offset()); } } @@ -1904,6 +1911,12 @@ struct ScriptCompileTimerScope { case CacheBehaviour::kConsumeCodeCache: return isolate_->counters()->compile_script_with_consume_cache(); + // Note that this only counts the finalization part of streaming, the + // actual streaming compile is counted by BackgroundCompileTask into + // "compile_script_on_background". + case CacheBehaviour::kNoCacheBecauseStreamingSource: + return isolate_->counters()->compile_script_streaming_finalization(); + case CacheBehaviour::kNoCacheBecauseInlineScript: return isolate_->counters() ->compile_script_no_cache_because_inline_script(); @@ -1923,9 +1936,6 @@ struct ScriptCompileTimerScope { // TODO(leszeks): Consider counting separately once modules are more // common. case CacheBehaviour::kNoCacheBecauseModule: - // TODO(leszeks): Count separately or remove entirely once we have - // background compilation. - case CacheBehaviour::kNoCacheBecauseStreamingSource: case CacheBehaviour::kNoCacheBecauseV8Extension: case CacheBehaviour::kNoCacheBecauseExtensionModule: case CacheBehaviour::kNoCacheBecausePacScript: diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc index 6816c5b7ad580b..42b2fa6e9a0233 100644 --- a/deps/v8/src/codegen/constant-pool.cc +++ b/deps/v8/src/codegen/constant-pool.cc @@ -49,22 +49,22 @@ ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess( } ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry( - ConstantPoolEntry& entry, ConstantPoolEntry::Type type) { + ConstantPoolEntry* entry, ConstantPoolEntry::Type type) { DCHECK(!emitted_label_.is_bound()); PerTypeEntryInfo& info = info_[type]; const int entry_size = ConstantPoolEntry::size(type); bool merged = false; - if (entry.sharing_ok()) { + if (entry->sharing_ok()) { // Try to merge entries std::vector::iterator it = info.shared_entries.begin(); int end = static_cast(info.shared_entries.size()); for (int i = 0; i < end; i++, it++) { if ((entry_size == kSystemPointerSize) - ? entry.value() == it->value() - : entry.value64() == it->value64()) { + ? entry->value() == it->value() + : entry->value64() == it->value64()) { // Merge with found entry. - entry.set_merged_index(i); + entry->set_merged_index(i); merged = true; break; } @@ -72,16 +72,16 @@ ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry( } // By definition, merged entries have regular access. - DCHECK(!merged || entry.merged_index() < info.regular_count); + DCHECK(!merged || entry->merged_index() < info.regular_count); ConstantPoolEntry::Access access = (merged ? ConstantPoolEntry::REGULAR : NextAccess(type)); // Enforce an upper bound on search time by limiting the search to // unique sharable entries which fit in the regular section. - if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) { - info.shared_entries.push_back(entry); + if (entry->sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) { + info.shared_entries.push_back(*entry); } else { - info.entries.push_back(entry); + info.entries.push_back(*entry); } // We're done if we found a match or have already triggered the diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h index d07452336b4e40..d2ab5641aea07f 100644 --- a/deps/v8/src/codegen/constant-pool.h +++ b/deps/v8/src/codegen/constant-pool.h @@ -102,13 +102,13 @@ class ConstantPoolBuilder { ConstantPoolEntry::Access AddEntry(int position, intptr_t value, bool sharing_ok) { ConstantPoolEntry entry(position, value, sharing_ok); - return AddEntry(entry, ConstantPoolEntry::INTPTR); + return AddEntry(&entry, ConstantPoolEntry::INTPTR); } // Add double constant to the embedded constant pool ConstantPoolEntry::Access AddEntry(int position, Double value) { ConstantPoolEntry entry(position, value); - return AddEntry(entry, ConstantPoolEntry::DOUBLE); + return AddEntry(&entry, ConstantPoolEntry::DOUBLE); } // Add double constant to the embedded constant pool @@ -138,9 +138,8 @@ class ConstantPoolBuilder { inline Label* EmittedPosition() { return &emitted_label_; } private: - ConstantPoolEntry::Access AddEntry( - ConstantPoolEntry& entry, // NOLINT(runtime/references) - ConstantPoolEntry::Type type); + ConstantPoolEntry::Access AddEntry(ConstantPoolEntry* entry, + ConstantPoolEntry::Type type); void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type); void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access, ConstantPoolEntry::Type type); diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h index dae9992c57f6c1..6b3d3934d0c111 100644 --- a/deps/v8/src/codegen/cpu-features.h +++ b/deps/v8/src/codegen/cpu-features.h @@ -13,7 +13,7 @@ namespace internal { // CPU feature flags. enum CpuFeature { - // x86 +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 SSE4_2, SSE4_1, SSSE3, @@ -26,39 +26,46 @@ enum CpuFeature { LZCNT, POPCNT, ATOM, - // ARM + +#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 // - Standard configurations. The baseline is ARMv6+VFPv2. ARMv7, // ARMv7-A + VFPv3-D32 + NEON ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV ARMv8, // ARMv8-A (+ all of the above) - // MIPS, MIPS64 + + // ARM feature aliases (based on the standard configurations above). + VFPv3 = ARMv7, + NEON = ARMv7, + VFP32DREGS = ARMv7, + SUDIV = ARMv7_SUDIV, + +#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 FPU, FP64FPU, MIPSr1, MIPSr2, MIPSr6, MIPS_SIMD, // MSA instructions - // PPC + +#elif V8_TARGET_ARCH_PPC + FPU, FPR_GPR_MOV, LWSYNC, ISELECT, VSX, MODULO, - // S390 + +#elif V8_TARGET_ARCH_S390X + FPU, DISTINCT_OPS, GENERAL_INSTR_EXT, FLOATING_POINT_EXT, VECTOR_FACILITY, VECTOR_ENHANCE_FACILITY_1, MISC_INSTR_EXT2, +#endif - NUMBER_OF_CPU_FEATURES, - - // ARM feature aliases (based on the standard configurations above). - VFPv3 = ARMv7, - NEON = ARMv7, - VFP32DREGS = ARMv7, - SUDIV = ARMv7_SUDIV + NUMBER_OF_CPU_FEATURES }; // CpuFeatures keeps track of which features are supported by the target CPU. diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc index 44503e532d1ed0..e1f873cb38d330 100644 --- a/deps/v8/src/codegen/external-reference.cc +++ b/deps/v8/src/codegen/external-reference.cc @@ -217,10 +217,8 @@ struct IsValidExternalReferenceType { FUNCTION_REFERENCE(incremental_marking_record_write_function, IncrementalMarking::RecordWriteFromCode) -ExternalReference ExternalReference::store_buffer_overflow_function() { - return ExternalReference( - Redirect(Heap::store_buffer_overflow_function_address())); -} +FUNCTION_REFERENCE(insert_remembered_set_function, + Heap::InsertIntoRememberedSetFromCode) FUNCTION_REFERENCE(delete_handle_scope_extensions, HandleScope::DeleteExtensions) @@ -342,10 +340,6 @@ ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) { return ExternalReference(address); } -ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { - return ExternalReference(isolate->heap()->store_buffer_top_address()); -} - ExternalReference ExternalReference::heap_is_marking_flag_address( Isolate* isolate) { return ExternalReference(isolate->heap()->IsMarkingFlagAddress()); @@ -529,19 +523,19 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address( FUNCTION_REFERENCE_WITH_TYPE(ieee754_acos_function, base::ieee754::acos, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_acosh_function, base::ieee754::acosh, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_asin_function, base::ieee754::asin, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_asinh_function, base::ieee754::asinh, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan_function, base::ieee754::atan, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_atanh_function, base::ieee754::atanh, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_atan2_function, base::ieee754::atan2, BUILTIN_FP_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_cbrt_function, base::ieee754::cbrt, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_cos_function, base::ieee754::cos, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh, @@ -549,7 +543,7 @@ FUNCTION_REFERENCE_WITH_TYPE(ieee754_cosh_function, base::ieee754::cosh, FUNCTION_REFERENCE_WITH_TYPE(ieee754_exp_function, base::ieee754::exp, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_expm1_function, base::ieee754::expm1, - BUILTIN_FP_FP_CALL) + BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_log_function, base::ieee754::log, BUILTIN_FP_CALL) FUNCTION_REFERENCE_WITH_TYPE(ieee754_log1p_function, base::ieee754::log1p, diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h index 45c26bdfb091d5..7cc0241fc4a5f8 100644 --- a/deps/v8/src/codegen/external-reference.h +++ b/deps/v8/src/codegen/external-reference.h @@ -38,7 +38,6 @@ class StatsCounter; V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \ V(address_of_jslimit, "StackGuard::address_of_jslimit()") \ V(address_of_real_jslimit, "StackGuard::address_of_real_jslimit()") \ - V(store_buffer_top, "store_buffer_top") \ V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \ V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()") \ V(new_space_allocation_limit_address, \ @@ -143,6 +142,7 @@ class StatsCounter; V(ieee754_tanh_function, "base::ieee754::tanh") \ V(incremental_marking_record_write_function, \ "IncrementalMarking::RecordWrite") \ + V(insert_remembered_set_function, "Heap::InsertIntoRememberedSetFromCode") \ V(invalidate_prototype_chains_function, \ "JSObject::InvalidatePrototypeChains()") \ V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \ @@ -170,7 +170,6 @@ class StatsCounter; V(search_string_raw_two_one, "search_string_raw_two_one") \ V(search_string_raw_two_two, "search_string_raw_two_two") \ V(smi_lexicographic_compare_function, "smi_lexicographic_compare_function") \ - V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \ V(try_internalize_string_function, "try_internalize_string_function") \ V(wasm_call_trap_callback_for_testing, \ "wasm::call_trap_callback_for_testing") \ diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h index e274b41fa33b77..174a4838683df8 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h @@ -39,6 +39,7 @@ #include "src/codegen/ia32/assembler-ia32.h" +#include "src/base/memory.h" #include "src/codegen/assembler.h" #include "src/debug/debug.h" #include "src/objects/objects-inl.h" @@ -58,12 +59,12 @@ void RelocInfo::apply(intptr_t delta) { RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY))); if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) || IsOffHeapTarget(rmode_)) { - int32_t* p = reinterpret_cast(pc_); - *p -= delta; // Relocate entry. + base::WriteUnalignedValue(pc_, + base::ReadUnalignedValue(pc_) - delta); } else if (IsInternalReference(rmode_)) { - // absolute code pointer inside code object moves with the code object. - int32_t* p = reinterpret_cast(pc_); - *p += delta; // Relocate entry. + // Absolute code pointer inside code object moves with the code object. + base::WriteUnalignedValue(pc_, + base::ReadUnalignedValue(pc_) + delta); } } @@ -103,7 +104,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, if (icache_flush_mode != SKIP_ICACHE_FLUSH) { FlushInstructionCache(pc_, sizeof(Address)); } - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc index aefcab7299c7c8..405e4b7c553fe1 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc @@ -272,8 +272,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: { const StringConstantBase* str = request.string(); @@ -2163,70 +2163,6 @@ void Assembler::divsd(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } -void Assembler::xorpd(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x66); - EMIT(0x0F); - EMIT(0x57); - emit_sse_operand(dst, src); -} - -void Assembler::andps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x54); - emit_sse_operand(dst, src); -} - -void Assembler::andnps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x55); - emit_sse_operand(dst, src); -} - -void Assembler::orps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x56); - emit_sse_operand(dst, src); -} - -void Assembler::xorps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x57); - emit_sse_operand(dst, src); -} - -void Assembler::addps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x58); - emit_sse_operand(dst, src); -} - -void Assembler::subps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x5C); - emit_sse_operand(dst, src); -} - -void Assembler::mulps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x59); - emit_sse_operand(dst, src); -} - -void Assembler::divps(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x0F); - EMIT(0x5E); - emit_sse_operand(dst, src); -} - void Assembler::rcpps(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x0F); @@ -2234,29 +2170,31 @@ void Assembler::rcpps(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } -void Assembler::rsqrtps(XMMRegister dst, Operand src) { +void Assembler::sqrtps(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x0F); - EMIT(0x52); + EMIT(0x51); emit_sse_operand(dst, src); } -void Assembler::minps(XMMRegister dst, Operand src) { +void Assembler::rsqrtps(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x0F); - EMIT(0x5D); + EMIT(0x52); emit_sse_operand(dst, src); } -void Assembler::maxps(XMMRegister dst, Operand src) { +void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) { EnsureSpace ensure_space(this); EMIT(0x0F); - EMIT(0x5F); + EMIT(0xC2); emit_sse_operand(dst, src); + EMIT(cmp); } -void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) { +void Assembler::cmppd(XMMRegister dst, Operand src, uint8_t cmp) { EnsureSpace ensure_space(this); + EMIT(0x66); EMIT(0x0F); EMIT(0xC2); emit_sse_operand(dst, src); @@ -2280,22 +2218,6 @@ void Assembler::haddps(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } -void Assembler::andpd(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x66); - EMIT(0x0F); - EMIT(0x54); - emit_sse_operand(dst, src); -} - -void Assembler::orpd(XMMRegister dst, Operand src) { - EnsureSpace ensure_space(this); - EMIT(0x66); - EMIT(0x0F); - EMIT(0x56); - emit_sse_operand(dst, src); -} - void Assembler::ucomisd(XMMRegister dst, Operand src) { EnsureSpace ensure_space(this); EMIT(0x66); @@ -2398,6 +2320,16 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) { EMIT(imm8); } +void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) { + DCHECK(is_uint8(imm8)); + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x0F); + EMIT(0xC6); + emit_sse_operand(dst, src); + EMIT(imm8); +} + void Assembler::movdqa(Operand dst, XMMRegister src) { EnsureSpace ensure_space(this); EMIT(0x66); @@ -2776,6 +2708,23 @@ void Assembler::minss(XMMRegister dst, Operand src) { emit_sse_operand(dst, src); } +// Packed single-precision floating-point SSE instructions. +void Assembler::ps(byte opcode, XMMRegister dst, Operand src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(opcode); + emit_sse_operand(dst, src); +} + +// Packed double-precision floating-point SSE instructions. +void Assembler::pd(byte opcode, XMMRegister dst, Operand src) { + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x0F); + EMIT(opcode); + emit_sse_operand(dst, src); +} + // AVX instructions void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) { @@ -2811,12 +2760,25 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) { vinstr(op, dst, src1, src2, k66, k0F, kWIG); } +void Assembler::vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, + byte imm8) { + DCHECK(is_uint8(imm8)); + vpd(0xC6, dst, src1, src2); + EMIT(imm8); +} + void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp) { vps(0xC2, dst, src1, src2); EMIT(cmp); } +void Assembler::vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, + uint8_t cmp) { + vpd(0xC2, dst, src1, src2); + EMIT(cmp); +} + void Assembler::vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8) { DCHECK(is_uint8(imm8)); @@ -2848,6 +2810,12 @@ void Assembler::vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) { EMIT(imm8); } +void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8) { + XMMRegister iop = XMMRegister::from_code(2); + vinstr(0x73, iop, dst, Operand(src), k66, k0F, kWIG); + EMIT(imm8); +} + void Assembler::vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) { XMMRegister iop = XMMRegister::from_code(4); vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG); @@ -3158,11 +3126,10 @@ void Assembler::emit_operand(int code, Operand adr) { DCHECK_GT(length, 0); // Emit updated ModRM byte containing the given register. - pc_[0] = (adr.buf_[0] & ~0x38) | (code << 3); + EMIT((adr.buf_[0] & ~0x38) | (code << 3)); // Emit the rest of the encoded operand. - for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i]; - pc_ += length; + for (unsigned i = 1; i < length; i++) EMIT(adr.buf_[i]); // Emit relocation information if necessary. if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) { diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h index 52256212763e44..8161ff83223688 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/assembler-ia32.h @@ -38,6 +38,7 @@ #define V8_CODEGEN_IA32_ASSEMBLER_IA32_H_ #include +#include #include "src/codegen/assembler.h" #include "src/codegen/ia32/constants-ia32.h" @@ -292,7 +293,7 @@ class V8_EXPORT_PRIVATE Operand { // Only valid if len_ > 4. RelocInfo::Mode rmode_ = RelocInfo::NONE; - // TODO(clemensh): Get rid of this friendship, or make Operand immutable. + // TODO(clemensb): Get rid of this friendship, or make Operand immutable. friend class Assembler; }; ASSERT_TRIVIALLY_COPYABLE(Operand); @@ -371,7 +372,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // own buffer. Otherwise it takes ownership of the provided buffer. explicit Assembler(const AssemblerOptions&, std::unique_ptr = {}); - virtual ~Assembler() {} // GetCode emits any pending (non-emitted) code and fills the descriptor desc. static constexpr int kNoHandlerTable = 0; @@ -512,6 +512,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movzx_w(Register dst, Operand src); void movq(XMMRegister dst, Operand src); + // Conditional moves void cmov(Condition cc, Register dst, Register src) { cmov(cc, dst, Operand(src)); @@ -849,56 +850,54 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movups(XMMRegister dst, Operand src); void movups(Operand dst, XMMRegister src); void shufps(XMMRegister dst, XMMRegister src, byte imm8); + void shufpd(XMMRegister dst, XMMRegister src, byte imm8); void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); } void maxss(XMMRegister dst, Operand src); void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); } void minss(XMMRegister dst, Operand src); - void andps(XMMRegister dst, Operand src); - void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); } - void andnps(XMMRegister dst, Operand src); - void andnps(XMMRegister dst, XMMRegister src) { andnps(dst, Operand(src)); } - void xorps(XMMRegister dst, Operand src); - void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); } - void orps(XMMRegister dst, Operand src); - void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); } - - void addps(XMMRegister dst, Operand src); - void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); } - void subps(XMMRegister dst, Operand src); - void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); } - void mulps(XMMRegister dst, Operand src); - void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); } - void divps(XMMRegister dst, Operand src); - void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); } void rcpps(XMMRegister dst, Operand src); void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); } + void sqrtps(XMMRegister dst, Operand src); + void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); } void rsqrtps(XMMRegister dst, Operand src); void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); } void haddps(XMMRegister dst, Operand src); void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); } - - void minps(XMMRegister dst, Operand src); - void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); } - void maxps(XMMRegister dst, Operand src); - void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); } + void sqrtpd(XMMRegister dst, Operand src) { + sse2_instr(dst, src, 0x66, 0x0F, 0x51); + } + void sqrtpd(XMMRegister dst, XMMRegister src) { sqrtpd(dst, Operand(src)); } void cmpps(XMMRegister dst, Operand src, uint8_t cmp); void cmpps(XMMRegister dst, XMMRegister src, uint8_t cmp) { cmpps(dst, Operand(src), cmp); } -#define SSE_CMP_P(instr, imm8) \ - void instr##ps(XMMRegister dst, XMMRegister src) { \ - cmpps(dst, Operand(src), imm8); \ - } \ - void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } + void cmppd(XMMRegister dst, Operand src, uint8_t cmp); + void cmppd(XMMRegister dst, XMMRegister src, uint8_t cmp) { + cmppd(dst, Operand(src), cmp); + } + +// Packed floating-point comparison operations. +#define PACKED_CMP_LIST(V) \ + V(cmpeq, 0x0) \ + V(cmplt, 0x1) \ + V(cmple, 0x2) \ + V(cmpunord, 0x3) \ + V(cmpneq, 0x4) - SSE_CMP_P(cmpeq, 0x0) - SSE_CMP_P(cmplt, 0x1) - SSE_CMP_P(cmple, 0x2) - SSE_CMP_P(cmpneq, 0x4) +#define SSE_CMP_P(instr, imm8) \ + void instr##ps(XMMRegister dst, XMMRegister src) { \ + cmpps(dst, Operand(src), imm8); \ + } \ + void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); } \ + void instr##pd(XMMRegister dst, XMMRegister src) { \ + cmppd(dst, Operand(src), imm8); \ + } \ + void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); } + PACKED_CMP_LIST(SSE_CMP_P) #undef SSE_CMP_P // SSE2 instructions @@ -941,22 +940,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void mulsd(XMMRegister dst, Operand src); void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); } void divsd(XMMRegister dst, Operand src); - void xorpd(XMMRegister dst, XMMRegister src) { xorpd(dst, Operand(src)); } - void xorpd(XMMRegister dst, Operand src); void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); } void sqrtsd(XMMRegister dst, Operand src); - void andpd(XMMRegister dst, XMMRegister src) { andpd(dst, Operand(src)); } - void andpd(XMMRegister dst, Operand src); - void orpd(XMMRegister dst, XMMRegister src) { orpd(dst, Operand(src)); } - void orpd(XMMRegister dst, Operand src); - void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); } void ucomisd(XMMRegister dst, Operand src); void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode); void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); + void movapd(XMMRegister dst, XMMRegister src) { movapd(dst, Operand(src)); } + void movapd(XMMRegister dst, Operand src) { + sse2_instr(dst, src, 0x66, 0x0F, 0x28); + } + void movmskpd(Register dst, XMMRegister src); void movmskps(Register dst, XMMRegister src); @@ -1298,6 +1295,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vrcpps(XMMRegister dst, Operand src) { vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG); } + void vsqrtps(XMMRegister dst, XMMRegister src) { vsqrtps(dst, Operand(src)); } + void vsqrtps(XMMRegister dst, Operand src) { + vinstr(0x51, dst, xmm0, src, kNone, k0F, kWIG); + } void vrsqrtps(XMMRegister dst, XMMRegister src) { vrsqrtps(dst, Operand(src)); } @@ -1310,14 +1311,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vhaddps(XMMRegister dst, XMMRegister src1, Operand src2) { vinstr(0x7C, dst, src1, src2, kF2, k0F, kWIG); } + void vsqrtpd(XMMRegister dst, XMMRegister src) { vsqrtpd(dst, Operand(src)); } + void vsqrtpd(XMMRegister dst, Operand src) { + vinstr(0x51, dst, xmm0, src, k66, k0F, kWIG); + } void vmovaps(XMMRegister dst, XMMRegister src) { vmovaps(dst, Operand(src)); } void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); } + void vmovapd(XMMRegister dst, XMMRegister src) { vmovapd(dst, Operand(src)); } + void vmovapd(XMMRegister dst, Operand src) { vpd(0x28, dst, xmm0, src); } void vmovups(XMMRegister dst, XMMRegister src) { vmovups(dst, Operand(src)); } void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); } void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) { vshufps(dst, src1, Operand(src2), imm8); } void vshufps(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8); + void vshufpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) { + vshufpd(dst, src1, Operand(src2), imm8); + } + void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8); void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8); @@ -1325,6 +1336,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8); + void vpsrlq(XMMRegister dst, XMMRegister src, uint8_t imm8); void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) { vpshufhw(dst, Operand(src), shuffle); @@ -1489,6 +1501,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } void rorx(Register dst, Operand src, byte imm8); + // Implementation of packed single-precision floating-point SSE instructions. + void ps(byte op, XMMRegister dst, Operand src); + // Implementation of packed double-precision floating-point SSE instructions. + void pd(byte op, XMMRegister dst, Operand src); + #define PACKED_OP_LIST(V) \ V(and, 0x54) \ V(andn, 0x55) \ @@ -1501,6 +1518,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { V(div, 0x5e) \ V(max, 0x5f) +#define SSE_PACKED_OP_DECLARE(name, opcode) \ + void name##ps(XMMRegister dst, XMMRegister src) { \ + ps(opcode, dst, Operand(src)); \ + } \ + void name##ps(XMMRegister dst, Operand src) { ps(opcode, dst, src); } \ + void name##pd(XMMRegister dst, XMMRegister src) { \ + pd(opcode, dst, Operand(src)); \ + } \ + void name##pd(XMMRegister dst, Operand src) { pd(opcode, dst, src); } + + PACKED_OP_LIST(SSE_PACKED_OP_DECLARE) +#undef SSE_PACKED_OP_DECLARE + #define AVX_PACKED_OP_DECLARE(name, opcode) \ void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ vps(opcode, dst, src1, Operand(src2)); \ @@ -1516,24 +1546,32 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } PACKED_OP_LIST(AVX_PACKED_OP_DECLARE) +#undef AVX_PACKED_OP_DECLARE +#undef PACKED_OP_LIST + void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2); void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2); void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp); -#define AVX_CMP_P(instr, imm8) \ - void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ - vcmpps(dst, src1, Operand(src2), imm8); \ - } \ - void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \ - vcmpps(dst, src1, src2, imm8); \ - } - - AVX_CMP_P(vcmpeq, 0x0) - AVX_CMP_P(vcmplt, 0x1) - AVX_CMP_P(vcmple, 0x2) - AVX_CMP_P(vcmpneq, 0x4) - + void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t cmp); + +#define AVX_CMP_P(instr, imm8) \ + void v##instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ + vcmpps(dst, src1, Operand(src2), imm8); \ + } \ + void v##instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \ + vcmpps(dst, src1, src2, imm8); \ + } \ + void v##instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ + vcmppd(dst, src1, Operand(src2), imm8); \ + } \ + void v##instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \ + vcmppd(dst, src1, src2, imm8); \ + } + + PACKED_CMP_LIST(AVX_CMP_P) #undef AVX_CMP_P +#undef PACKED_CMP_LIST // Other SSE and AVX instructions #define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \ diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index 070f3159776a76..dd11bc496eda80 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -1168,57 +1168,44 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - - ExternalReference debug_hook_active = - ExternalReference::debug_hook_on_function_call_address(isolate()); - push(eax); - cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0)); - pop(eax); - j(equal, &skip_hook); - - { - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - if (expected.is_reg()) { - SmiTag(expected.reg()); - Push(expected.reg()); - } - if (actual.is_reg()) { - SmiTag(actual.reg()); - Push(actual.reg()); - SmiUntag(actual.reg()); - } - if (new_target.is_valid()) { - Push(new_target); - } - Push(fun); - Push(fun); - Operand receiver_op = - actual.is_reg() - ? Operand(ebp, actual.reg(), times_system_pointer_size, - kSystemPointerSize * 2) - : Operand(ebp, actual.immediate() * times_system_pointer_size + - kSystemPointerSize * 2); - Push(receiver_op); - CallRuntime(Runtime::kDebugOnFunctionCall); - Pop(fun); - if (new_target.is_valid()) { - Pop(new_target); - } - if (actual.is_reg()) { - Pop(actual.reg()); - SmiUntag(actual.reg()); - } - if (expected.is_reg()) { - Pop(expected.reg()); - SmiUntag(expected.reg()); - } +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); + if (expected.is_reg()) { + SmiTag(expected.reg()); + Push(expected.reg()); + } + if (actual.is_reg()) { + SmiTag(actual.reg()); + Push(actual.reg()); + SmiUntag(actual.reg()); + } + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Operand receiver_op = + actual.is_reg() + ? Operand(ebp, actual.reg(), times_system_pointer_size, + kSystemPointerSize * 2) + : Operand(ebp, actual.immediate() * times_system_pointer_size + + kSystemPointerSize * 2); + Push(receiver_op); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + if (actual.is_reg()) { + Pop(actual.reg()); + SmiUntag(actual.reg()); + } + if (expected.is_reg()) { + Pop(expected.reg()); + SmiUntag(expected.reg()); } - bind(&skip_hook); } void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, @@ -1233,7 +1220,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + ExternalReference debug_hook_active = + ExternalReference::debug_hook_on_function_call_address(isolate()); + push(eax); + cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0)); + pop(eax); + j(not_equal, &debug_hook, Label::kNear); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -1256,8 +1252,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK(flag == JUMP_FUNCTION); JumpCodeObject(ecx); } - bind(&done); } + jmp(&done, Label::kNear); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + jmp(&continue_after_hook, Label::kNear); + + bind(&done); } void MacroAssembler::InvokeFunction(Register fun, Register new_target, @@ -1479,6 +1482,15 @@ void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) { } } +void TurboAssembler::Psrlq(XMMRegister dst, uint8_t shift) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrlq(dst, dst, shift); + } else { + psrlq(dst, shift); + } +} + void TurboAssembler::Psignb(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h index c65871cfad34a2..9e7774c55d5da6 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h @@ -237,6 +237,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Pshufd(XMMRegister dst, Operand src, uint8_t shuffle); void Psraw(XMMRegister dst, uint8_t shift); void Psrlw(XMMRegister dst, uint8_t shift); + void Psrlq(XMMRegister dst, uint8_t shift); // SSE/SSE2 instructions with AVX version. #define AVX_OP2_WITH_TYPE(macro_name, name, dst_type, src_type) \ @@ -258,6 +259,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister) AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister) AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand) + AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&) + AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister) + AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&) #undef AVX_OP2_WITH_TYPE @@ -278,6 +282,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP3_XO(Packsswb, packsswb) AVX_OP3_XO(Packuswb, packuswb) + AVX_OP3_XO(Paddusb, paddusb) AVX_OP3_XO(Pcmpeqb, pcmpeqb) AVX_OP3_XO(Pcmpeqw, pcmpeqw) AVX_OP3_XO(Pcmpeqd, pcmpeqd) @@ -294,10 +299,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP3_XO(Xorpd, xorpd) AVX_OP3_XO(Sqrtss, sqrtss) AVX_OP3_XO(Sqrtsd, sqrtsd) + AVX_OP3_XO(Orpd, orpd) + AVX_OP3_XO(Andnpd, andnpd) #undef AVX_OP3_XO #undef AVX_OP3_WITH_TYPE +// Only use this macro when dst and src1 is the same in SSE case. +#define AVX_PACKED_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \ + void macro_name(dst_type dst, dst_type src1, src_type src2) { \ + if (CpuFeatures::IsSupported(AVX)) { \ + CpuFeatureScope scope(this, AVX); \ + v##name(dst, src1, src2); \ + } else { \ + DCHECK_EQ(dst, src1); \ + name(dst, src2); \ + } \ + } +#define AVX_PACKED_OP3(macro_name, name) \ + AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \ + AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand) + + AVX_PACKED_OP3(Addpd, addpd) + AVX_PACKED_OP3(Subpd, subpd) + AVX_PACKED_OP3(Mulpd, mulpd) + AVX_PACKED_OP3(Divpd, divpd) + AVX_PACKED_OP3(Cmpeqpd, cmpeqpd) + AVX_PACKED_OP3(Cmpneqpd, cmpneqpd) + AVX_PACKED_OP3(Cmpltpd, cmpltpd) + AVX_PACKED_OP3(Cmplepd, cmplepd) + AVX_PACKED_OP3(Minpd, minpd) + AVX_PACKED_OP3(Maxpd, maxpd) + AVX_PACKED_OP3(Cmpunordpd, cmpunordpd) +#undef AVX_PACKED_OP3 +#undef AVX_PACKED_OP3_WITH_TYPE + // Non-SSE2 instructions. #define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \ sse_scope) \ @@ -529,11 +565,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); - // On function call, call into the debugger if necessary. + // On function call, call into the debugger. // This may clobber ecx. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc index f537ebc899428c..1525f814cd97d6 100644 --- a/deps/v8/src/codegen/interface-descriptors.cc +++ b/deps/v8/src/codegen/interface-descriptors.cc @@ -278,6 +278,11 @@ void AsyncFunctionStackParameterDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(0, nullptr); } +void GetIteratorStackParameterDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + data->InitializePlatformSpecific(0, nullptr); +} + void LoadWithVectorDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(), diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h index 544d62fd9f01d7..e305d666a3e70e 100644 --- a/deps/v8/src/codegen/interface-descriptors.h +++ b/deps/v8/src/codegen/interface-descriptors.h @@ -9,12 +9,17 @@ #include "src/codegen/machine-type.h" #include "src/codegen/register-arch.h" +#include "src/codegen/tnode.h" #include "src/common/globals.h" #include "src/execution/isolate.h" namespace v8 { namespace internal { +#define TORQUE_BUILTIN_LIST_TFC(V) \ + BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \ + IGNORE_BUILTIN, IGNORE_BUILTIN) + #define INTERFACE_DESCRIPTOR_LIST(V) \ V(Abort) \ V(Allocate) \ @@ -52,6 +57,7 @@ namespace internal { V(FastNewFunctionContext) \ V(FastNewObject) \ V(FrameDropperTrampoline) \ + V(GetIteratorStackParameter) \ V(GetProperty) \ V(GrowArrayElements) \ V(InterpreterCEntry1) \ @@ -89,7 +95,8 @@ namespace internal { V(WasmTableGet) \ V(WasmTableSet) \ V(WasmThrow) \ - BUILTIN_LIST_TFS(V) + BUILTIN_LIST_TFS(V) \ + TORQUE_BUILTIN_LIST_TFC(V) class V8_EXPORT_PRIVATE CallInterfaceDescriptorData { public: @@ -486,6 +493,46 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor { DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor) }; +// This class is subclassed by Torque-generated call interface descriptors. +template +class TorqueInterfaceDescriptor : public CallInterfaceDescriptor { + public: + static constexpr int kDescriptorFlags = CallInterfaceDescriptorData::kNoFlags; + static constexpr int kParameterCount = parameter_count; + enum ParameterIndices { kContext = kParameterCount }; + template + static ParameterIndices ParameterIndex() { + STATIC_ASSERT(0 <= i && i < kParameterCount); + return static_cast(i); + } + static constexpr int kReturnCount = 1; + + using CallInterfaceDescriptor::CallInterfaceDescriptor; + + protected: + static const int kRegisterParams = + kParameterCount > kMaxTFSBuiltinRegisterParams + ? kMaxTFSBuiltinRegisterParams + : kParameterCount; + static const int kStackParams = kParameterCount - kRegisterParams; + virtual MachineType ReturnType() = 0; + virtual std::array ParameterTypes() = 0; + void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override { + DefaultInitializePlatformSpecific(data, kRegisterParams); + } + void InitializePlatformIndependent( + CallInterfaceDescriptorData* data) override { + std::vector machine_types = {ReturnType()}; + auto parameter_types = ParameterTypes(); + machine_types.insert(machine_types.end(), parameter_types.begin(), + parameter_types.end()); + DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size()); + data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, + kParameterCount, machine_types.data(), + static_cast(machine_types.size())); + } +}; + // Dummy descriptor used to mark builtins that don't yet have their proper // descriptor associated. using DummyDescriptor = VoidDescriptor; @@ -706,7 +753,7 @@ class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor { public: DEFINE_PARAMETERS(kScopeInfo, kSlots) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kScopeInfo - MachineType::Int32()) // kSlots + MachineType::Uint32()) // kSlots DECLARE_DESCRIPTOR(FastNewFunctionContextDescriptor, CallInterfaceDescriptor) static const Register ScopeInfoRegister(); @@ -771,6 +818,16 @@ class AsyncFunctionStackParameterDescriptor final CallInterfaceDescriptor) }; +class GetIteratorStackParameterDescriptor final + : public CallInterfaceDescriptor { + public: + DEFINE_PARAMETERS(kReceiver, kCallSlot, kFeedback, kResult) + DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::AnyTagged(), + MachineType::AnyTagged(), MachineType::AnyTagged()) + DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor, + CallInterfaceDescriptor) +}; + class GetPropertyDescriptor final : public CallInterfaceDescriptor { public: DEFINE_PARAMETERS(kObject, kKey) @@ -1298,6 +1355,11 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor { BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR) #undef DEFINE_TFS_BUILTIN_DESCRIPTOR +// This file contains interface descriptor class definitions for builtins +// defined in Torque. It is included here because the class definitions need to +// precede the definition of name##Descriptor::key() below. +#include "torque-generated/interface-descriptors-tq.inc" + #undef DECLARE_DEFAULT_DESCRIPTOR #undef DECLARE_DESCRIPTOR_WITH_BASE #undef DECLARE_DESCRIPTOR diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h index 15e3df65c5adc9..a0bef4e07d65a4 100644 --- a/deps/v8/src/codegen/machine-type.h +++ b/deps/v8/src/codegen/machine-type.h @@ -9,6 +9,7 @@ #include "src/base/bits.h" #include "src/common/globals.h" +#include "src/flags/flags.h" namespace v8 { namespace internal { @@ -114,6 +115,10 @@ class MachineType { constexpr bool IsCompressedPointer() const { return representation() == MachineRepresentation::kCompressedPointer; } + constexpr static MachineRepresentation TaggedRepresentation() { + return (kTaggedSize == 4) ? MachineRepresentation::kWord32 + : MachineRepresentation::kWord64; + } constexpr static MachineRepresentation PointerRepresentation() { return (kSystemPointerSize == 4) ? MachineRepresentation::kWord32 : MachineRepresentation::kWord64; @@ -239,71 +244,79 @@ class MachineType { // pointer flag is enabled. Otherwise, they returned the corresponding tagged // one. constexpr static MachineRepresentation RepCompressedTagged() { -#ifdef V8_COMPRESS_POINTERS - return MachineRepresentation::kCompressed; -#else - return MachineRepresentation::kTagged; -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineRepresentation::kCompressed; + } else { + return MachineRepresentation::kTagged; + } } constexpr static MachineRepresentation RepCompressedTaggedSigned() { -#ifdef V8_COMPRESS_POINTERS - return MachineRepresentation::kCompressedSigned; -#else - return MachineRepresentation::kTaggedSigned; -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineRepresentation::kCompressedSigned; + } else { + return MachineRepresentation::kTaggedSigned; + } } constexpr static MachineRepresentation RepCompressedTaggedPointer() { -#ifdef V8_COMPRESS_POINTERS - return MachineRepresentation::kCompressedPointer; -#else - return MachineRepresentation::kTaggedPointer; -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineRepresentation::kCompressedPointer; + } else { + return MachineRepresentation::kTaggedPointer; + } + } + + constexpr static MachineType TypeRawTagged() { + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::Int32(); + } else { + return MachineType::Pointer(); + } } constexpr static MachineType TypeCompressedTagged() { -#ifdef V8_COMPRESS_POINTERS - return MachineType::AnyCompressed(); -#else - return MachineType::AnyTagged(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::AnyCompressed(); + } else { + return MachineType::AnyTagged(); + } } constexpr static MachineType TypeCompressedTaggedSigned() { -#ifdef V8_COMPRESS_POINTERS - return MachineType::CompressedSigned(); -#else - return MachineType::TaggedSigned(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::CompressedSigned(); + } else { + return MachineType::TaggedSigned(); + } } constexpr static MachineType TypeCompressedTaggedPointer() { -#ifdef V8_COMPRESS_POINTERS - return MachineType::CompressedPointer(); -#else - return MachineType::TaggedPointer(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return MachineType::CompressedPointer(); + } else { + return MachineType::TaggedPointer(); + } } constexpr bool IsCompressedTagged() const { -#ifdef V8_COMPRESS_POINTERS - return IsCompressed(); -#else - return IsTagged(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsCompressed(); + } else { + return IsTagged(); + } } constexpr bool IsCompressedTaggedSigned() const { -#ifdef V8_COMPRESS_POINTERS - return IsCompressedSigned(); -#else - return IsTaggedSigned(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsCompressedSigned(); + } else { + return IsTaggedSigned(); + } } constexpr bool IsCompressedTaggedPointer() const { -#ifdef V8_COMPRESS_POINTERS - return IsCompressedPointer(); -#else - return IsTaggedPointer(); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsCompressedPointer(); + } else { + return IsTaggedPointer(); + } } static MachineType TypeForRepresentation(const MachineRepresentation& rep, @@ -405,11 +418,11 @@ inline bool IsAnyCompressed(MachineRepresentation rep) { } inline bool IsAnyCompressedTagged(MachineRepresentation rep) { -#ifdef V8_COMPRESS_POINTERS - return IsAnyCompressed(rep); -#else - return IsAnyTagged(rep); -#endif + if (COMPRESS_POINTERS_BOOL && FLAG_turbo_decompression_elimination) { + return IsAnyCompressed(rep); + } else { + return IsAnyTagged(rep); + } } // Gets the log2 of the element size in bytes of the machine type. @@ -431,7 +444,6 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) { case MachineRepresentation::kTaggedSigned: case MachineRepresentation::kTaggedPointer: case MachineRepresentation::kTagged: - return kSystemPointerSizeLog2; case MachineRepresentation::kCompressedSigned: case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressed: diff --git a/deps/v8/src/codegen/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h index d8181ad8f5b958..53e6f93411b700 100644 --- a/deps/v8/src/codegen/mips/assembler-mips-inl.h +++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h @@ -133,7 +133,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc, if (Assembler::IsJicOrJialc(instr2)) { // Encoded internal references are lui/jic load of 32-bit absolute address. uint32_t lui_offset_u, jic_offset_u; - Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u); + Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); @@ -183,7 +183,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc index 423da2fb65f778..768b16b86c4433 100644 --- a/deps/v8/src/codegen/mips/assembler-mips.cc +++ b/deps/v8/src/codegen/mips/assembler-mips.cc @@ -231,8 +231,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: const StringConstantBase* str = request.string(); @@ -742,27 +742,27 @@ uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) { // before that addition, difference between upper part of the target address and // upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted // in jic register with lui instruction. -void Assembler::UnpackTargetAddress(uint32_t address, int16_t& lui_offset, - int16_t& jic_offset) { - lui_offset = (address & kHiMask) >> kLuiShift; - jic_offset = address & kLoMask; +void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset, + int16_t* jic_offset) { + *lui_offset = (address & kHiMask) >> kLuiShift; + *jic_offset = address & kLoMask; - if (jic_offset < 0) { - lui_offset -= kImm16Mask; + if (*jic_offset < 0) { + *lui_offset -= kImm16Mask; } } void Assembler::UnpackTargetAddressUnsigned(uint32_t address, - uint32_t& lui_offset, - uint32_t& jic_offset) { + uint32_t* lui_offset, + uint32_t* jic_offset) { int16_t lui_offset16 = (address & kHiMask) >> kLuiShift; int16_t jic_offset16 = address & kLoMask; if (jic_offset16 < 0) { lui_offset16 -= kImm16Mask; } - lui_offset = static_cast(lui_offset16) & kImm16Mask; - jic_offset = static_cast(jic_offset16) & kImm16Mask; + *lui_offset = static_cast(lui_offset16) & kImm16Mask; + *jic_offset = static_cast(jic_offset16) & kImm16Mask; } void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui, @@ -977,7 +977,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos, if (IsJicOrJialc(instr2)) { uint32_t lui_offset_u, jic_offset_u; - UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u); + UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u); instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u); } else { @@ -1928,7 +1928,7 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) { // ------------Memory-instructions------------- -void Assembler::AdjustBaseAndOffset(MemOperand& src, +void Assembler::AdjustBaseAndOffset(MemOperand* src, OffsetAccessType access_type, int second_access_add_to_offset) { // This method is used to adjust the base register and offset pair @@ -1941,26 +1941,26 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // pointer register). // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8. - bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0; + bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; bool two_accesses = static_cast(access_type) || !doubleword_aligned; DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. // is_int16 must be passed a signed value, hence the static cast below. - if (is_int16(src.offset()) && + if (is_int16(src->offset()) && (!two_accesses || is_int16(static_cast( - src.offset() + second_access_add_to_offset)))) { + src->offset() + second_access_add_to_offset)))) { // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified // value) fits into int16_t. return; } UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - DCHECK(src.rm() != scratch); // Must not overwrite the register 'base' - // while loading 'offset'. + DCHECK(src->rm() != scratch); // Must not overwrite the register 'base' + // while loading 'offset'. #ifdef DEBUG // Remember the "(mis)alignment" of 'offset', it will be checked at the end. - uint32_t misalignment = src.offset() & (kDoubleSize - 1); + uint32_t misalignment = src->offset() & (kDoubleSize - 1); #endif // Do not load the whole 32-bit 'offset' if it can be represented as @@ -1972,13 +1972,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, 0x7FF8; // Max int16_t that's a multiple of 8. constexpr int32_t kMaxOffsetForSimpleAdjustment = 2 * kMinOffsetForSimpleAdjustment; - if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) { - addiu(at, src.rm(), kMinOffsetForSimpleAdjustment); - src.offset_ -= kMinOffsetForSimpleAdjustment; - } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() && - src.offset() < 0) { - addiu(at, src.rm(), -kMinOffsetForSimpleAdjustment); - src.offset_ += kMinOffsetForSimpleAdjustment; + if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { + addiu(at, src->rm(), kMinOffsetForSimpleAdjustment); + src->offset_ -= kMinOffsetForSimpleAdjustment; + } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && + src->offset() < 0) { + addiu(at, src->rm(), -kMinOffsetForSimpleAdjustment); + src->offset_ += kMinOffsetForSimpleAdjustment; } else if (IsMipsArchVariant(kMips32r6)) { // On r6 take advantage of the aui instruction, e.g.: // aui at, base, offset_high @@ -1989,12 +1989,12 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // addiu at, at, 8 // lw reg_lo, (offset_low-8)(at) // lw reg_hi, (offset_low-4)(at) - int16_t offset_high = static_cast(src.offset() >> 16); - int16_t offset_low = static_cast(src.offset()); + int16_t offset_high = static_cast(src->offset() >> 16); + int16_t offset_low = static_cast(src->offset()); offset_high += (offset_low < 0) ? 1 : 0; // Account for offset sign extension in load/store. - aui(scratch, src.rm(), static_cast(offset_high)); + aui(scratch, src->rm(), static_cast(offset_high)); if (two_accesses && !is_int16(static_cast( offset_low + second_access_add_to_offset))) { // Avoid overflow in the 16-bit offset of the load/store instruction when @@ -2002,7 +2002,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, addiu(scratch, scratch, kDoubleSize); offset_low -= kDoubleSize; } - src.offset_ = offset_low; + src->offset_ = offset_low; } else { // Do not load the whole 32-bit 'offset' if it can be represented as // a sum of three 16-bit signed offsets. This can save an instruction. @@ -2013,62 +2013,62 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, 2 * kMinOffsetForSimpleAdjustment; constexpr int32_t kMaxOffsetForMediumAdjustment = 3 * kMinOffsetForSimpleAdjustment; - if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) { - addiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2); + if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { + addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); - src.offset_ -= kMinOffsetForMediumAdjustment; - } else if (-kMaxOffsetForMediumAdjustment <= src.offset() && - src.offset() < 0) { - addiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2); + src->offset_ -= kMinOffsetForMediumAdjustment; + } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && + src->offset() < 0) { + addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); - src.offset_ += kMinOffsetForMediumAdjustment; + src->offset_ += kMinOffsetForMediumAdjustment; } else { // Now that all shorter options have been exhausted, load the full 32-bit // offset. - int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize); + int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. - addu(scratch, scratch, src.rm()); - src.offset_ -= loaded_offset; + addu(scratch, scratch, src->rm()); + src->offset_ -= loaded_offset; } } - src.rm_ = scratch; + src->rm_ = scratch; - DCHECK(is_int16(src.offset())); + DCHECK(is_int16(src->offset())); if (two_accesses) { DCHECK(is_int16( - static_cast(src.offset() + second_access_add_to_offset))); + static_cast(src->offset() + second_access_add_to_offset))); } - DCHECK(misalignment == (src.offset() & (kDoubleSize - 1))); + DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); } void Assembler::lb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LB, source.rm(), rd, source.offset()); } void Assembler::lbu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LBU, source.rm(), rd, source.offset()); } void Assembler::lh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LH, source.rm(), rd, source.offset()); } void Assembler::lhu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LHU, source.rm(), rd, source.offset()); } void Assembler::lw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(LW, source.rm(), rd, source.offset()); } @@ -2088,19 +2088,19 @@ void Assembler::lwr(Register rd, const MemOperand& rs) { void Assembler::sb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(SB, source.rm(), rd, source.offset()); } void Assembler::sh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(SH, source.rm(), rd, source.offset()); } void Assembler::sw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); GenInstrImmediate(SW, source.rm(), rd, source.offset()); } @@ -2385,13 +2385,13 @@ void Assembler::seb(Register rd, Register rt) { // Load, store, move. void Assembler::lwc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset()); } void Assembler::swc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset()); } @@ -2969,7 +2969,7 @@ MSA_BRANCH_LIST(MSA_BRANCH) #define MSA_LD_ST(name, opcode) \ void Assembler::name(MSARegister wd, const MemOperand& rs) { \ MemOperand source = rs; \ - AdjustBaseAndOffset(source); \ + AdjustBaseAndOffset(&source); \ if (is_int10(source.offset())) { \ GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \ } else { \ @@ -3473,7 +3473,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, if (IsJicOrJialc(instr2)) { uint32_t lui_offset_u, jic_offset_u; - Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u); + Assembler::UnpackTargetAddressUnsigned(imm, + &lui_offset_u, &jic_offset_u); instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); } else { @@ -3717,7 +3718,7 @@ void Assembler::set_target_value_at(Address pc, uint32_t target, if (IsJicOrJialc(instr2)) { // Must use 2 instructions to insure patchable code => use lui and jic uint32_t lui_offset, jic_offset; - Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset); + Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); instr1 &= ~kImm16Mask; instr2 &= ~kImm16Mask; diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h index 0359be2c94aef8..d8cb8ec3f2a9e8 100644 --- a/deps/v8/src/codegen/mips/assembler-mips.h +++ b/deps/v8/src/codegen/mips/assembler-mips.h @@ -36,6 +36,7 @@ #define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ #include +#include #include @@ -1478,13 +1479,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static bool IsAddImmediate(Instr instr); static Instr SetAddImmediateOffset(Instr instr, int16_t offset); static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic); - static void UnpackTargetAddress( - uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references) - int16_t& jic_offset); // NOLINT(runtime/references) - static void UnpackTargetAddressUnsigned( - uint32_t address, - uint32_t& lui_offset, // NOLINT(runtime/references) - uint32_t& jic_offset); // NOLINT(runtime/references) + static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset, + int16_t* jic_offset); + static void UnpackTargetAddressUnsigned(uint32_t address, + uint32_t* lui_offset, + uint32_t* jic_offset); static bool IsAndImmediate(Instr instr); static bool IsEmittedConstant(Instr instr); @@ -1515,7 +1514,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, // NOLINT(runtime/references) + MemOperand* src, OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc index 2e4698a9e71c78..760d33d7c9179b 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc @@ -1063,7 +1063,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); if (rd != source.rm()) { lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset)); lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset)); @@ -1089,7 +1089,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) { DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset)); swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset)); } @@ -1105,7 +1105,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { IsMipsArchVariant(kLoongson)); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1140,7 +1140,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { IsMipsArchVariant(kLoongson)); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1177,7 +1177,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { IsMipsArchVariant(kLoongson)); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); if (scratch != rd) { mov(scratch, rd); @@ -1256,7 +1256,7 @@ void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); MemOperand tmp = src; - AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES); + AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); if (IsFp32Mode()) { // fp32 mode. FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); @@ -1284,7 +1284,7 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); MemOperand tmp = src; - AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES); + AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); if (IsFp32Mode()) { // fp32 mode. FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); @@ -1305,13 +1305,13 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) { void TurboAssembler::Lw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lw(rd, source); } void TurboAssembler::Sw(Register rd, const MemOperand& rs) { MemOperand dest = rs; - AdjustBaseAndOffset(dest); + AdjustBaseAndOffset(&dest); sw(rd, dest); } @@ -2926,18 +2926,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; - offset = GetOffset(offset, L, bits); + *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt) { +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; - scratch = GetRtAsRegisterHelper(rt, scratch); - offset = GetOffset(offset, L, bits); + *scratch = GetRtAsRegisterHelper(rt, *scratch); + *offset = GetOffset(*offset, L, bits); return true; } @@ -2955,23 +2955,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); break; case eq: if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 beq is used here to make the code patchable. Otherwise bc // should be used which has no condition field so is not patchable. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beq(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beqc(rs, scratch, offset); } @@ -2980,16 +2980,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 bne is used here to make the code patchable. Otherwise we // should not generate any instruction. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bne(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bnec(rs, scratch, offset); } @@ -3001,14 +3001,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(scratch, rs, offset); @@ -3017,17 +3017,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(rs, scratch, offset); @@ -3038,14 +3038,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(rs, scratch, offset); @@ -3054,17 +3054,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(scratch, rs, offset); @@ -3077,14 +3077,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(scratch, rs, offset); @@ -3093,17 +3093,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Ugreater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; beqzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(rs, scratch, offset); @@ -3114,13 +3114,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { break; // No code needs to be emitted. } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(rs, scratch, offset); @@ -3129,17 +3129,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Uless_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt)) return false; bc(offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(scratch, rs, offset); @@ -3418,7 +3418,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset)); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); break; case eq: @@ -3440,11 +3440,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3456,14 +3456,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3477,11 +3477,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3493,14 +3493,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= r2 if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3751,8 +3751,8 @@ void TurboAssembler::Jump(Register target, const Operand& offset, if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && !is_int16(offset.immediate())) { uint32_t aui_offset, jic_offset; - Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset, - jic_offset); + Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset, + &jic_offset); RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate()); aui(target, target, aui_offset); if (cond == cc_always) { @@ -3790,7 +3790,7 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, // This is not an issue, t9 is expected to be clobbered anyway. if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { uint32_t lui_offset, jic_offset; - UnpackTargetAddressUnsigned(target, lui_offset, jic_offset); + UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); if (MustUseReg(rmode)) { RecordRelocInfo(rmode, target); } @@ -3853,10 +3853,8 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, } void TurboAssembler::Jump(const ExternalReference& reference) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, reference); - Jump(scratch); + li(t9, reference); + Jump(t9); } void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, @@ -3940,7 +3938,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, int32_t target_int = static_cast(target); if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) { uint32_t lui_offset, jialc_offset; - UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset); + UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset); if (MustUseReg(rmode)) { RecordRelocInfo(rmode, target_int); } @@ -3990,7 +3988,6 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, } } DCHECK(RelocInfo::IsCodeTarget(rmode)); - AllowDeferredHandleDereference embedding_raw_address; Call(code.address(), rmode, cond, rs, rt, bd); } diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h index d9c372f8687155..e82c88f0b5e4c6 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.h +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h @@ -849,12 +849,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd = PROTECT); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits, - Register& scratch, // NOLINT(runtime/references) - const Operand& rt); + // TODO(mips) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h index 7b9946d16eb061..cacdbd8f8bbb32 100644 --- a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h +++ b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h @@ -159,7 +159,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc index 801faf6306d861..37a05585c4b873 100644 --- a/deps/v8/src/codegen/mips64/assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc @@ -207,8 +207,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber( + request.heap_number()); break; case HeapObjectRequest::kStringConstant: const StringConstantBase* str = request.string(); @@ -1996,7 +1996,7 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) { // ------------Memory-instructions------------- -void Assembler::AdjustBaseAndOffset(MemOperand& src, +void Assembler::AdjustBaseAndOffset(MemOperand* src, OffsetAccessType access_type, int second_access_add_to_offset) { // This method is used to adjust the base register and offset pair @@ -2009,25 +2009,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // pointer register). // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8. - bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0; + bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; bool two_accesses = static_cast(access_type) || !doubleword_aligned; DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. // is_int16 must be passed a signed value, hence the static cast below. - if (is_int16(src.offset()) && + if (is_int16(src->offset()) && (!two_accesses || is_int16(static_cast( - src.offset() + second_access_add_to_offset)))) { + src->offset() + second_access_add_to_offset)))) { // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified // value) fits into int16_t. return; } - DCHECK(src.rm() != + DCHECK(src->rm() != at); // Must not overwrite the register 'base' while loading 'offset'. #ifdef DEBUG // Remember the "(mis)alignment" of 'offset', it will be checked at the end. - uint32_t misalignment = src.offset() & (kDoubleSize - 1); + uint32_t misalignment = src->offset() & (kDoubleSize - 1); #endif // Do not load the whole 32-bit 'offset' if it can be represented as @@ -2042,13 +2042,13 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) { - daddiu(scratch, src.rm(), kMinOffsetForSimpleAdjustment); - src.offset_ -= kMinOffsetForSimpleAdjustment; - } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() && - src.offset() < 0) { - daddiu(scratch, src.rm(), -kMinOffsetForSimpleAdjustment); - src.offset_ += kMinOffsetForSimpleAdjustment; + if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { + daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment); + src->offset_ -= kMinOffsetForSimpleAdjustment; + } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && + src->offset() < 0) { + daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); + src->offset_ += kMinOffsetForSimpleAdjustment; } else if (kArchVariant == kMips64r6) { // On r6 take advantage of the daui instruction, e.g.: // daui at, base, offset_high @@ -2060,9 +2060,9 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, // daddiu at, at, 8 // lw reg_lo, (offset_low-8)(at) // lw reg_hi, (offset_low-4)(at) - int16_t offset_low = static_cast(src.offset()); + int16_t offset_low = static_cast(src->offset()); int32_t offset_low32 = offset_low; - int16_t offset_high = static_cast(src.offset() >> 16); + int16_t offset_high = static_cast(src->offset() >> 16); bool increment_hi16 = offset_low < 0; bool overflow_hi16 = false; @@ -2070,7 +2070,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, offset_high++; overflow_hi16 = (offset_high == -32768); } - daui(scratch, src.rm(), static_cast(offset_high)); + daui(scratch, src->rm(), static_cast(offset_high)); if (overflow_hi16) { dahi(scratch, 1); @@ -2084,7 +2084,7 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, offset_low32 -= kDoubleSize; } - src.offset_ = offset_low32; + src->offset_ = offset_low32; } else { // Do not load the whole 32-bit 'offset' if it can be represented as // a sum of three 16-bit signed offsets. This can save an instruction. @@ -2095,33 +2095,33 @@ void Assembler::AdjustBaseAndOffset(MemOperand& src, 2 * kMinOffsetForSimpleAdjustment; constexpr int32_t kMaxOffsetForMediumAdjustment = 3 * kMinOffsetForSimpleAdjustment; - if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) { - daddiu(scratch, src.rm(), kMinOffsetForMediumAdjustment / 2); + if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { + daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); - src.offset_ -= kMinOffsetForMediumAdjustment; - } else if (-kMaxOffsetForMediumAdjustment <= src.offset() && - src.offset() < 0) { - daddiu(scratch, src.rm(), -kMinOffsetForMediumAdjustment / 2); + src->offset_ -= kMinOffsetForMediumAdjustment; + } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && + src->offset() < 0) { + daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); - src.offset_ += kMinOffsetForMediumAdjustment; + src->offset_ += kMinOffsetForMediumAdjustment; } else { // Now that all shorter options have been exhausted, load the full 32-bit // offset. - int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize); + int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. - daddu(scratch, scratch, src.rm()); - src.offset_ -= loaded_offset; + daddu(scratch, scratch, src->rm()); + src->offset_ -= loaded_offset; } } - src.rm_ = scratch; + src->rm_ = scratch; - DCHECK(is_int16(src.offset())); + DCHECK(is_int16(src->offset())); if (two_accesses) { DCHECK(is_int16( - static_cast(src.offset() + second_access_add_to_offset))); + static_cast(src->offset() + second_access_add_to_offset))); } - DCHECK(misalignment == (src.offset() & (kDoubleSize - 1))); + DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); } void Assembler::lb(Register rd, const MemOperand& rs) { @@ -3169,7 +3169,7 @@ MSA_BRANCH_LIST(MSA_BRANCH) #define MSA_LD_ST(name, opcode) \ void Assembler::name(MSARegister wd, const MemOperand& rs) { \ MemOperand source = rs; \ - AdjustBaseAndOffset(source); \ + AdjustBaseAndOffset(&source); \ if (is_int10(source.offset())) { \ GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \ } else { \ diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h index 9695aa652486ff..48733eebea524d 100644 --- a/deps/v8/src/codegen/mips64/assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/assembler-mips64.h @@ -36,7 +36,7 @@ #define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_ #include - +#include #include #include "src/codegen/assembler.h" @@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, // NOLINT(runtime/references) + MemOperand* src, OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc index b3537860643784..2ea770d224070e 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc @@ -1166,7 +1166,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); if (rd != source.rm()) { lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset)); lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset)); @@ -1201,7 +1201,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) { DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset)); swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset)); } @@ -1216,7 +1216,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { DCHECK_EQ(kArchVariant, kMips64r2); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1250,7 +1250,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { DCHECK_EQ(kArchVariant, kMips64r2); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); if (source.rm() == scratch) { @@ -1286,7 +1286,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { DCHECK_EQ(kArchVariant, kMips64r2); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); if (scratch != rd) { mov(scratch, rd); @@ -1314,7 +1314,7 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) { DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 7 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7); if (rd != source.rm()) { ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset)); ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset)); @@ -1349,7 +1349,7 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) { DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7); MemOperand source = rs; // Adjust offset for two accesses and check if offset + 7 fits into int16_t. - AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7); + AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 7); sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset)); sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset)); } @@ -1411,91 +1411,91 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, void TurboAssembler::Lb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lb(rd, source); } void TurboAssembler::Lbu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lbu(rd, source); } void TurboAssembler::Sb(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sb(rd, source); } void TurboAssembler::Lh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lh(rd, source); } void TurboAssembler::Lhu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lhu(rd, source); } void TurboAssembler::Sh(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sh(rd, source); } void TurboAssembler::Lw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lw(rd, source); } void TurboAssembler::Lwu(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); lwu(rd, source); } void TurboAssembler::Sw(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sw(rd, source); } void TurboAssembler::Ld(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); ld(rd, source); } void TurboAssembler::Sd(Register rd, const MemOperand& rs) { MemOperand source = rs; - AdjustBaseAndOffset(source); + AdjustBaseAndOffset(&source); sd(rd, source); } void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); lwc1(fd, tmp); } void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); swc1(fs, tmp); } void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); ldc1(fd, tmp); } void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; - AdjustBaseAndOffset(tmp); + AdjustBaseAndOffset(&tmp); sdc1(fs, tmp); } @@ -3362,18 +3362,18 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; - offset = GetOffset(offset, L, bits); + *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt) { +bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; - scratch = GetRtAsRegisterHelper(rt, scratch); - offset = GetOffset(offset, L, bits); + *scratch = GetRtAsRegisterHelper(rt, *scratch); + *offset = GetOffset(*offset, L, bits); return true; } @@ -3392,23 +3392,23 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); break; case eq: if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 beq is used here to make the code patchable. Otherwise bc // should be used which has no condition field so is not patchable. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beq(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; beqc(rs, scratch, offset); } @@ -3417,16 +3417,16 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { // Pre R6 bne is used here to make the code patchable. Otherwise we // should not generate any instruction. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bne(rs, scratch, offset); nop(); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bnec(rs, scratch, offset); } @@ -3438,14 +3438,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(scratch, rs, offset); @@ -3454,17 +3454,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(rs, scratch, offset); @@ -3475,14 +3475,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltc(rs, scratch, offset); @@ -3491,17 +3491,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgec(scratch, rs, offset); @@ -3514,14 +3514,14 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; bnezc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(scratch, rs, offset); @@ -3530,17 +3530,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Ugreater_equal: // rs >= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; beqzc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(rs, scratch, offset); @@ -3551,13 +3551,13 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, if (rt.is_reg() && rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) return false; bnezc(scratch, offset); } else if (IsZero(rt)) { break; // No code needs to be emitted. } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bltuc(rs, scratch, offset); @@ -3566,17 +3566,17 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, case Uless_equal: // rs <= rt if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; bc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt)) return false; bc(offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; beqzc(rs, offset); } else { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; DCHECK(rs != scratch); bgeuc(scratch, rs, offset); @@ -3858,7 +3858,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset)); switch (cond) { case cc_always: - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); break; case eq: @@ -3880,11 +3880,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bltzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgtzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3896,14 +3896,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case greater_equal: // rs >= rt if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; blezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bgezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3917,11 +3917,11 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, if (rs.code() == rt.rm().code()) { break; // No code needs to be emitted. } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgtzalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; bltzalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -3933,14 +3933,14 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, case less_equal: // rs <= r2 if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; balc(offset); } else if (rs == zero_reg) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt)) + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) return false; bgezalc(scratch, offset); } else if (IsZero(rt)) { - if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false; + if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; blezalc(rs, offset); } else { if (!is_near(L, bits)) return false; @@ -4202,10 +4202,8 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, } void TurboAssembler::Jump(const ExternalReference& reference) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, reference); - Jump(scratch); + li(t9, reference); + Jump(t9); } // Note: To call gcc-compiled C code on mips, you must call through t9. @@ -4284,7 +4282,6 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h index c2b701a5affcaa..886d64e494b3b0 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -850,12 +850,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) - OffsetSize bits, - Register& scratch, // NOLINT(runtime/references) - const Operand& rt); + // TODO(mips) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc index 7dc94f39cd6a3a..de89371adbf365 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.cc +++ b/deps/v8/src/codegen/optimized-compilation-info.cc @@ -111,15 +111,9 @@ OptimizedCompilationInfo::~OptimizedCompilationInfo() { } void OptimizedCompilationInfo::set_deferred_handles( - std::shared_ptr deferred_handles) { + std::unique_ptr deferred_handles) { DCHECK_NULL(deferred_handles_); - deferred_handles_.swap(deferred_handles); -} - -void OptimizedCompilationInfo::set_deferred_handles( - DeferredHandles* deferred_handles) { - DCHECK_NULL(deferred_handles_); - deferred_handles_.reset(deferred_handles); + deferred_handles_ = std::move(deferred_handles); } void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) { @@ -132,6 +126,7 @@ void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) { if (!closure_.is_null()) { closure_ = Handle(*closure_, isolate); } + DCHECK(code_.is_null()); } void OptimizedCompilationInfo::AbortOptimization(BailoutReason reason) { diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h index 624517283e3e2c..2f3afafc68da5e 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.h +++ b/deps/v8/src/codegen/optimized-compilation-info.h @@ -231,11 +231,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { osr_frame_ = osr_frame; } - void set_deferred_handles(std::shared_ptr deferred_handles); - void set_deferred_handles(DeferredHandles* deferred_handles); - std::shared_ptr deferred_handles() { - return deferred_handles_; - } + void set_deferred_handles(std::unique_ptr deferred_handles); void ReopenHandlesInNewHandleScope(Isolate* isolate); @@ -330,7 +326,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { // OptimizedCompilationInfo allocates. Zone* zone_; - std::shared_ptr deferred_handles_; + std::unique_ptr deferred_handles_; BailoutReason bailout_reason_ = BailoutReason::kNoReason; diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc index b7be9c77757eee..84e36fc8438d08 100644 --- a/deps/v8/src/codegen/pending-optimization-table.cc +++ b/deps/v8/src/codegen/pending-optimization-table.cc @@ -83,7 +83,7 @@ void PendingOptimizationTable::MarkedForOptimization( function->ShortPrint(); PrintF( " should be prepared for optimization with " - "%%PrepareFunctionForOptimize before " + "%%PrepareFunctionForOptimization before " "%%OptimizeFunctionOnNextCall / %%OptimizeOSR "); UNREACHABLE(); } diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h index 166b9d4423115f..c55a5a9c0bfa0c 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h +++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h @@ -144,7 +144,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc index 2a638af0705055..03dbb2edaa0dc3 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc @@ -200,8 +200,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Handle object; switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber( + request.heap_number()); break; } case HeapObjectRequest::kStringConstant: { @@ -1121,20 +1121,6 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o, } #endif -// Function descriptor for AIX. -// Code address skips the function descriptor "header". -// TOC and static chain are ignored and set to 0. -void Assembler::function_descriptor() { - if (ABI_USES_FUNCTION_DESCRIPTORS) { - Label instructions; - DCHECK_EQ(pc_offset(), 0); - emit_label_addr(&instructions); - dp(0); - dp(0); - bind(&instructions); - } -} - int Assembler::instructions_required_for_mov(Register dst, const Operand& src) const { bool canOptimize = diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h index dee264a75c06bb..c056de9f2feaeb 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/assembler-ppc.h @@ -41,6 +41,7 @@ #define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_ #include +#include #include #include "src/codegen/assembler.h" @@ -839,8 +840,6 @@ class Assembler : public AssemblerBase { void mtfprwa(DoubleRegister dst, Register src); #endif - void function_descriptor(); - // Exception-generating instructions and debugging support void stop(Condition cond = al, int32_t code = kDefaultStopCode, CRegister cr = cr7); diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h index f6ebc6a7ba53c5..2e499fd2c41357 100644 --- a/deps/v8/src/codegen/ppc/constants-ppc.h +++ b/deps/v8/src/codegen/ppc/constants-ppc.h @@ -60,6 +60,12 @@ namespace internal { // TODO(sigurds): Change this value once we use relative jumps. constexpr size_t kMaxPCRelativeCodeRangeInMB = 0; +// Used to encode a boolean value when emitting 32 bit +// opcodes which will indicate the presence of function descriptors +constexpr int kHasFunctionDescriptorBitShift = 9; +constexpr int kHasFunctionDescriptorBitMask = 1 + << kHasFunctionDescriptorBitShift; + // Number of registers const int kNumRegisters = 32; diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc index 41162063331b2e..08fb85dd2ced21 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -209,6 +209,12 @@ void TurboAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, reference); + if (ABI_USES_FUNCTION_DESCRIPTORS) { + // AIX uses a function descriptor. When calling C code be + // aware of this descriptor and pick up values from it. + LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(scratch, kPointerSize)); + LoadP(scratch, MemOperand(scratch, 0)); + } Jump(scratch); } @@ -1287,12 +1293,11 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, { // Load receiver to pass it later to DebugOnFunctionCall hook. if (actual.is_reg()) { - mr(r7, actual.reg()); + ShiftLeftImm(r7, actual.reg(), Operand(kPointerSizeLog2)); + LoadPX(r7, MemOperand(sp, r7)); } else { - mov(r7, Operand(actual.immediate())); + LoadP(r7, MemOperand(sp, actual.immediate() << kPointerSizeLog2), r0); } - ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2)); - LoadPX(r7, MemOperand(sp, r7)); FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); if (expected.is_reg()) { @@ -1931,28 +1936,35 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1, void TurboAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, - int num_double_arguments) { + int num_double_arguments, + bool has_function_descriptor) { Move(ip, function); - CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); + CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments, + has_function_descriptor); } void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments) { - CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); + int num_double_arguments, + bool has_function_descriptor) { + CallCFunctionHelper(function, num_reg_arguments, num_double_arguments, + has_function_descriptor); } void TurboAssembler::CallCFunction(ExternalReference function, - int num_arguments) { - CallCFunction(function, num_arguments, 0); + int num_arguments, + bool has_function_descriptor) { + CallCFunction(function, num_arguments, 0, has_function_descriptor); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { - CallCFunction(function, num_arguments, 0); +void TurboAssembler::CallCFunction(Register function, int num_arguments, + bool has_function_descriptor) { + CallCFunction(function, num_arguments, 0, has_function_descriptor); } void TurboAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments) { + int num_double_arguments, + bool has_function_descriptor) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -1977,7 +1989,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, // allow preemption, so the return address in the link register // stays correct. Register dest = function; - if (ABI_USES_FUNCTION_DESCRIPTORS) { + if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) { // AIX/PPC64BE Linux uses a function descriptor. When calling C code be // aware of this descriptor and pick up values from it LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize)); @@ -2409,51 +2421,51 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb, void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch, CRegister cr) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Cmpi(src1, Operand(smi), scratch, cr); +#else LoadSmiLiteral(scratch, smi); cmp(src1, scratch, cr); -#else - Cmpi(src1, Operand(smi), scratch, cr); #endif } void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch, CRegister cr) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Cmpli(src1, Operand(smi), scratch, cr); +#else LoadSmiLiteral(scratch, smi); cmpl(src1, scratch, cr); -#else - Cmpli(src1, Operand(smi), scratch, cr); #endif } void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi, Register scratch) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Add(dst, src, static_cast(smi.ptr()), scratch); +#else LoadSmiLiteral(scratch, smi); add(dst, src, scratch); -#else - Add(dst, src, reinterpret_cast(smi), scratch); #endif } void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi, Register scratch) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + Add(dst, src, -(static_cast(smi.ptr())), scratch); +#else LoadSmiLiteral(scratch, smi); sub(dst, src, scratch); -#else - Add(dst, src, -(reinterpret_cast(smi)), scratch); #endif } void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi, Register scratch, RCBit rc) { -#if V8_TARGET_ARCH_PPC64 +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + And(dst, src, Operand(smi), rc); +#else LoadSmiLiteral(scratch, smi); and_(dst, src, scratch, rc); -#else - And(dst, src, Operand(smi), rc); #endif } @@ -2941,14 +2953,18 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + ShiftLeftImm(builtin_index, builtin_index, + Operand(kSystemPointerSizeLog2 - kSmiShift)); +#else ShiftRightArithImm(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); +#endif addi(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index)); diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h index fd4cb6014bb322..1c88558fc4a0a3 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -350,12 +350,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, int num_arguments); + void CallCFunction(ExternalReference function, int num_arguments, + bool has_function_descriptor = kHasFunctionDescriptor); + void CallCFunction(Register function, int num_arguments, + bool has_function_descriptor = kHasFunctionDescriptor); void CallCFunction(ExternalReference function, int num_reg_arguments, - int num_double_arguments); + int num_double_arguments, + bool has_function_descriptor = kHasFunctionDescriptor); void CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments); + int num_double_arguments, + bool has_function_descriptor = kHasFunctionDescriptor); // Call a runtime routine. This expects {centry} to contain a fitting CEntry // builtin for the target runtime function and uses an indirect call. @@ -642,7 +646,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { int CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments); void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); + int num_double_arguments, + bool has_function_descriptor); void CallRecordWriteStub(Register object, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, Handle code_target, @@ -876,12 +881,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { } void SmiToPtrArrayOffset(Register dst, Register src) { -#if V8_TARGET_ARCH_PPC64 - STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); - ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); -#else +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); +#else + STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); + ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); #endif } @@ -895,7 +900,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void AssertNotSmi(Register object); void AssertSmi(Register object); -#if V8_TARGET_ARCH_PPC64 +#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH) // Ensure it is permissible to read/write int value directly from // upper half of the smi. STATIC_ASSERT(kSmiTag == 0); diff --git a/deps/v8/src/codegen/reglist.h b/deps/v8/src/codegen/reglist.h index 609e6b88458e13..4f1d35267d0ba2 100644 --- a/deps/v8/src/codegen/reglist.h +++ b/deps/v8/src/codegen/reglist.h @@ -25,20 +25,18 @@ constexpr int NumRegs(RegList list) { return base::bits::CountPopulation(list); } +namespace detail { // Combine two RegLists by building the union of the contained registers. -// Implemented as a Functor to pass it to base::fold even on gcc < 5 (see -// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52892). -// TODO(clemensh): Remove this once we require gcc >= 5.0. -struct CombineRegListsFunctor { - constexpr RegList operator()(RegList list1, RegList list2) const { - return list1 | list2; - } -}; +// TODO(clemensb): Replace by constexpr lambda once we have C++17. +constexpr RegList CombineRegListsHelper(RegList list1, RegList list2) { + return list1 | list2; +} +} // namespace detail // Combine several RegLists by building the union of the contained registers. template constexpr RegList CombineRegLists(RegLists... lists) { - return base::fold(CombineRegListsFunctor{}, 0, lists...); + return base::fold(detail::CombineRegListsHelper, 0, lists...); } } // namespace internal diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc index a889a8b9c7bfea..039a6746b1b391 100644 --- a/deps/v8/src/codegen/reloc-info.cc +++ b/deps/v8/src/codegen/reloc-info.cc @@ -366,7 +366,7 @@ void RelocInfo::set_target_address(Address target, Assembler::set_target_address_at(pc_, constant_pool_, target, icache_flush_mode); if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && - IsCodeTargetMode(rmode_)) { + IsCodeTargetMode(rmode_) && !FLAG_disable_write_barriers) { Code target_code = Code::GetCodeFromTargetAddress(target); MarkingBarrierForCode(host(), this, target_code); } diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h index 5e7b193c8ace4a..f911bdabf6f301 100644 --- a/deps/v8/src/codegen/s390/assembler-s390-inl.h +++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h @@ -150,7 +150,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), icache_flush_mode); - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc index 873c0a2ad060c8..9de95ed5084bd0 100644 --- a/deps/v8/src/codegen/s390/assembler-s390.cc +++ b/deps/v8/src/codegen/s390/assembler-s390.cc @@ -329,8 +329,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Address pc = reinterpret_cast
(buffer_start_) + request.offset(); switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - object = isolate->factory()->NewHeapNumber(request.heap_number(), - AllocationType::kOld); + object = isolate->factory()->NewHeapNumber( + request.heap_number()); set_target_address_at(pc, kNullAddress, object.address(), SKIP_ICACHE_FLUSH); break; diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h index 0653e79b67cf20..f1a418d1afa01e 100644 --- a/deps/v8/src/codegen/s390/assembler-s390.h +++ b/deps/v8/src/codegen/s390/assembler-s390.h @@ -40,6 +40,7 @@ #ifndef V8_CODEGEN_S390_ASSEMBLER_S390_H_ #define V8_CODEGEN_S390_ASSEMBLER_S390_H_ #include +#include #if V8_HOST_ARCH_S390 // elf.h include is required for auxv check for STFLE facility used // for hardware detection, which is sensible only on s390 hosts. diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc index 355d536379a1b6..4cab44d9e1b612 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc @@ -51,7 +51,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, } RegList list = kJSCallerSaved & ~exclusions; - bytes += NumRegs(list) * kPointerSize; + bytes += NumRegs(list) * kSystemPointerSize; if (fp_mode == kSaveFPRegs) { bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize; @@ -76,7 +76,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, RegList list = kJSCallerSaved & ~exclusions; MultiPush(list); - bytes += NumRegs(list) * kPointerSize; + bytes += NumRegs(list) * kSystemPointerSize; if (fp_mode == kSaveFPRegs) { MultiPushDoubles(kCallerSavedDoubles); @@ -107,7 +107,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, RegList list = kJSCallerSaved & ~exclusions; MultiPop(list); - bytes += NumRegs(list) * kPointerSize; + bytes += NumRegs(list) * kSystemPointerSize; return bytes; } @@ -116,8 +116,8 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); - const uint32_t offset = - FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag; + const uint32_t offset = FixedArray::kHeaderSize + + constant_index * kSystemPointerSize - kHeapObjectTag; CHECK(is_uint19(offset)); DCHECK_NE(destination, r0); @@ -258,7 +258,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, void TurboAssembler::Drop(int count) { if (count > 0) { - int total = count * kPointerSize; + int total = count * kSystemPointerSize; if (is_uint12(total)) { la(sp, MemOperand(sp, total)); } else if (is_int20(total)) { @@ -270,7 +270,7 @@ void TurboAssembler::Drop(int count) { } void TurboAssembler::Drop(Register count, Register scratch) { - ShiftLeftP(scratch, count, Operand(kPointerSizeLog2)); + ShiftLeftP(scratch, count, Operand(kSystemPointerSizeLog2)); AddP(sp, sp, scratch); } @@ -367,12 +367,12 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, void TurboAssembler::MultiPush(RegList regs, Register location) { int16_t num_to_push = base::bits::CountPopulation(regs); - int16_t stack_offset = num_to_push * kPointerSize; + int16_t stack_offset = num_to_push * kSystemPointerSize; SubP(location, location, Operand(stack_offset)); for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) { if ((regs & (1 << i)) != 0) { - stack_offset -= kPointerSize; + stack_offset -= kSystemPointerSize; StoreP(ToRegister(i), MemOperand(location, stack_offset)); } } @@ -384,7 +384,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) { for (int16_t i = 0; i < Register::kNumRegisters; i++) { if ((regs & (1 << i)) != 0) { LoadP(ToRegister(i), MemOperand(location, stack_offset)); - stack_offset += kPointerSize; + stack_offset += kSystemPointerSize; } } AddP(location, location, Operand(stack_offset)); @@ -439,13 +439,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } // Although the object register is tagged, the offset is relative to the start - // of the object, so so offset must be a multiple of kPointerSize. - DCHECK(IsAligned(offset, kPointerSize)); + // of the object, so so offset must be a multiple of kSystemPointerSize. + DCHECK(IsAligned(offset, kSystemPointerSize)); lay(dst, MemOperand(object, offset - kHeapObjectTag)); if (emit_debug_code()) { Label ok; - AndP(r0, dst, Operand(kPointerSize - 1)); + AndP(r0, dst, Operand(kSystemPointerSize - 1)); beq(&ok, Label::kNear); stop(); bind(&ok); @@ -632,7 +632,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { Push(r14, fp); fp_delta = 0; } - la(fp, MemOperand(sp, fp_delta * kPointerSize)); + la(fp, MemOperand(sp, fp_delta * kSystemPointerSize)); } void TurboAssembler::PopCommonFrame(Register marker_reg) { @@ -653,7 +653,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(r14, fp, cp); fp_delta = 1; } - la(fp, MemOperand(sp, fp_delta * kPointerSize)); + la(fp, MemOperand(sp, fp_delta * kSystemPointerSize)); } void TurboAssembler::RestoreFrameStateForTailCall() { @@ -1082,9 +1082,9 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, DCHECK(frame_type == StackFrame::EXIT || frame_type == StackFrame::BUILTIN_EXIT); // Set up the frame structure on the stack. - DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); - DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); - DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); + DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement); + DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset); + DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset); DCHECK_GT(stack_space, 0); // This is an opportunity to build a frame to wrap @@ -1117,7 +1117,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, // since the sp slot and code slot were pushed after the fp. } - lay(sp, MemOperand(sp, -stack_space * kPointerSize)); + lay(sp, MemOperand(sp, -stack_space * kSystemPointerSize)); // Allocate and align the frame preparing for calling the runtime // function. @@ -1127,11 +1127,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8 } - lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); + lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize)); StoreP(MemOperand(sp), Operand::Zero(), r0); // Set the exit frame sp value to point just before the return address // location. - lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize)); + lay(r1, MemOperand(sp, kStackFrameSPSlot * kSystemPointerSize)); StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset)); } @@ -1184,7 +1184,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, if (argument_count.is_valid()) { if (!argument_count_is_length) { - ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2)); + ShiftLeftP(argument_count, argument_count, + Operand(kSystemPointerSizeLog2)); } la(sp, MemOperand(sp, argument_count)); } @@ -1211,22 +1212,24 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, #endif // Calculate the end of destination area where we will put the arguments - // after we drop current frame. We AddP kPointerSize to count the receiver - // argument which is not included into formal parameters count. + // after we drop current frame. We AddP kSystemPointerSize to count the + // receiver argument which is not included into formal parameters count. Register dst_reg = scratch0; - ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2)); + ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kSystemPointerSizeLog2)); AddP(dst_reg, fp, dst_reg); AddP(dst_reg, dst_reg, - Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); + Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); Register src_reg = caller_args_count_reg; - // Calculate the end of source area. +kPointerSize is for the receiver. + // Calculate the end of source area. +kSystemPointerSize is for the receiver. if (callee_args_count.is_reg()) { - ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2)); + ShiftLeftP(src_reg, callee_args_count.reg(), + Operand(kSystemPointerSizeLog2)); AddP(src_reg, sp, src_reg); - AddP(src_reg, src_reg, Operand(kPointerSize)); + AddP(src_reg, src_reg, Operand(kSystemPointerSize)); } else { - mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize)); + mov(src_reg, + Operand((callee_args_count.immediate() + 1) * kSystemPointerSize)); AddP(src_reg, src_reg, sp); } @@ -1253,10 +1256,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, } LoadRR(r1, tmp_reg); bind(&loop); - LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize)); - StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize)); - lay(src_reg, MemOperand(src_reg, -kPointerSize)); - lay(dst_reg, MemOperand(dst_reg, -kPointerSize)); + LoadP(tmp_reg, MemOperand(src_reg, -kSystemPointerSize)); + StoreP(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize)); + lay(src_reg, MemOperand(src_reg, -kSystemPointerSize)); + lay(dst_reg, MemOperand(dst_reg, -kSystemPointerSize)); BranchOnCount(r1, &loop); // Leave current frame. @@ -1342,12 +1345,12 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target, { // Load receiver to pass it later to DebugOnFunctionCall hook. if (actual.is_reg()) { - LoadRR(r6, actual.reg()); + ShiftLeftP(r6, actual.reg(), Operand(kSystemPointerSizeLog2)); + LoadP(r6, MemOperand(sp, r6)); } else { - mov(r6, Operand(actual.immediate())); + LoadP(r6, MemOperand(sp, actual.immediate() << kSystemPointerSizeLog2), + ip); } - ShiftLeftP(r6, r6, Operand(kPointerSizeLog2)); - LoadP(r6, MemOperand(sp, r6)); FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); if (expected.is_reg()) { @@ -1470,8 +1473,8 @@ void MacroAssembler::MaybeDropFrames() { void MacroAssembler::PushStackHandler() { // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize); + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize); // Link the current handler as the next handler. Move(r7, @@ -1486,13 +1489,13 @@ void MacroAssembler::PushStackHandler() { // Copy the old handler into the next handler slot. MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7), - Operand(kPointerSize)); + Operand(kSystemPointerSize)); // Set this new handler as the current one. StoreP(sp, MemOperand(r7)); } void MacroAssembler::PopStackHandler() { - STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); // Pop the Next Handler into r3 and store it into Handler Address reference. @@ -1839,18 +1842,19 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, int stack_passed_arguments = CalculateStackPassedWords(num_reg_arguments, num_double_arguments); int stack_space = kNumRequiredStackFrameSlots; - if (frame_alignment > kPointerSize) { + if (frame_alignment > kSystemPointerSize) { // Make stack end at alignment and make room for stack arguments // -- preserving original value of sp. LoadRR(scratch, sp); - lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize)); + lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kSystemPointerSize)); DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); - StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize)); + StoreP(scratch, + MemOperand(sp, (stack_passed_arguments)*kSystemPointerSize)); } else { stack_space += stack_passed_arguments; } - lay(sp, MemOperand(sp, (-stack_space) * kPointerSize)); + lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize)); } void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, @@ -1940,11 +1944,11 @@ void TurboAssembler::CallCFunctionHelper(Register function, int stack_passed_arguments = CalculateStackPassedWords(num_reg_arguments, num_double_arguments); int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments; - if (ActivationFrameAlignment() > kPointerSize) { + if (ActivationFrameAlignment() > kSystemPointerSize) { // Load the original stack pointer (pre-alignment) from the stack - LoadP(sp, MemOperand(sp, stack_space * kPointerSize)); + LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize)); } else { - la(sp, MemOperand(sp, stack_space * kPointerSize)); + la(sp, MemOperand(sp, stack_space * kSystemPointerSize)); } } @@ -1962,20 +1966,20 @@ void TurboAssembler::CheckPageFlag( uint32_t shifted_mask = mask; // Determine the byte offset to be tested if (mask <= 0x80) { - byte_offset = kPointerSize - 1; + byte_offset = kSystemPointerSize - 1; } else if (mask < 0x8000) { - byte_offset = kPointerSize - 2; + byte_offset = kSystemPointerSize - 2; shifted_mask = mask >> 8; } else if (mask < 0x800000) { - byte_offset = kPointerSize - 3; + byte_offset = kSystemPointerSize - 3; shifted_mask = mask >> 16; } else { - byte_offset = kPointerSize - 4; + byte_offset = kSystemPointerSize - 4; shifted_mask = mask >> 24; } #if V8_TARGET_LITTLE_ENDIAN // Reverse the byte_offset if emulating on little endian platform - byte_offset = kPointerSize - byte_offset - 1; + byte_offset = kSystemPointerSize - byte_offset - 1; #endif tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset), Operand(shifted_mask)); @@ -3415,12 +3419,12 @@ void TurboAssembler::LoadIntLiteral(Register dst, int value) { void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { intptr_t value = static_cast(smi.ptr()); -#if V8_TARGET_ARCH_S390X +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + llilf(dst, Operand(value)); +#else DCHECK_EQ(value & 0xFFFFFFFF, 0); // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros. llihf(dst, Operand(value >> 32)); -#else - llilf(dst, Operand(value)); #endif } @@ -3456,16 +3460,16 @@ void TurboAssembler::LoadFloat32Literal(DoubleRegister result, float value, } void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { -#if V8_TARGET_ARCH_S390X +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + // CFI takes 32-bit immediate. + cfi(src1, Operand(smi)); +#else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { cih(src1, Operand(static_cast(smi.ptr()) >> 32)); } else { LoadSmiLiteral(scratch, smi); cgr(src1, scratch); } -#else - // CFI takes 32-bit immediate. - cfi(src1, Operand(smi)); #endif } @@ -4154,7 +4158,7 @@ void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) { // Clear right most # of bits void TurboAssembler::ClearRightImm(Register dst, Register src, const Operand& val) { - int numBitsToClear = val.immediate() % (kPointerSize * 8); + int numBitsToClear = val.immediate() % (kSystemPointerSize * 8); // Try to use RISBG if possible if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { @@ -4342,14 +4346,19 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); - STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + STATIC_ASSERT(kSmiShiftSize == 0); + ShiftLeftP(builtin_index, builtin_index, + Operand(kSystemPointerSizeLog2 - kSmiShift)); +#else ShiftRightArithP(builtin_index, builtin_index, Operand(kSmiShift - kSystemPointerSizeLog2)); +#endif AddP(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); LoadP(builtin_index, MemOperand(kRootRegister, builtin_index)); @@ -4427,7 +4436,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { Label return_label; larl(r14, &return_label); // Generate the return addr of call later. - StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize)); + StoreP(r14, MemOperand(sp, kStackFrameRASlot * kSystemPointerSize)); // zLinux ABI requires caller's frame to have sufficient space for callee // preserved regsiter save area. diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h index 856e4b592ecef0..06c26cb305f984 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.h +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h @@ -515,26 +515,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { } void push(DoubleRegister src) { - lay(sp, MemOperand(sp, -kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize)); StoreDouble(src, MemOperand(sp)); } void push(Register src) { - lay(sp, MemOperand(sp, -kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize)); StoreP(src, MemOperand(sp)); } void pop(DoubleRegister dst) { LoadDouble(dst, MemOperand(sp)); - la(sp, MemOperand(sp, kPointerSize)); + la(sp, MemOperand(sp, kSystemPointerSize)); } void pop(Register dst) { LoadP(dst, MemOperand(sp)); - la(sp, MemOperand(sp, kPointerSize)); + la(sp, MemOperand(sp, kSystemPointerSize)); } - void pop() { la(sp, MemOperand(sp, kPointerSize)); } + void pop() { la(sp, MemOperand(sp, kSystemPointerSize)); } void Push(Register src) { push(src); } @@ -544,25 +544,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2) { - lay(sp, MemOperand(sp, -kPointerSize * 2)); - StoreP(src1, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 2)); + StoreP(src1, MemOperand(sp, kSystemPointerSize)); StoreP(src2, MemOperand(sp, 0)); } // Push three registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Register src3) { - lay(sp, MemOperand(sp, -kPointerSize * 3)); - StoreP(src1, MemOperand(sp, kPointerSize * 2)); - StoreP(src2, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 3)); + StoreP(src1, MemOperand(sp, kSystemPointerSize * 2)); + StoreP(src2, MemOperand(sp, kSystemPointerSize)); StoreP(src3, MemOperand(sp, 0)); } // Push four registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Register src3, Register src4) { - lay(sp, MemOperand(sp, -kPointerSize * 4)); - StoreP(src1, MemOperand(sp, kPointerSize * 3)); - StoreP(src2, MemOperand(sp, kPointerSize * 2)); - StoreP(src3, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 4)); + StoreP(src1, MemOperand(sp, kSystemPointerSize * 3)); + StoreP(src2, MemOperand(sp, kSystemPointerSize * 2)); + StoreP(src3, MemOperand(sp, kSystemPointerSize)); StoreP(src4, MemOperand(sp, 0)); } @@ -580,11 +580,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { DCHECK(src3 != src5); DCHECK(src4 != src5); - lay(sp, MemOperand(sp, -kPointerSize * 5)); - StoreP(src1, MemOperand(sp, kPointerSize * 4)); - StoreP(src2, MemOperand(sp, kPointerSize * 3)); - StoreP(src3, MemOperand(sp, kPointerSize * 2)); - StoreP(src4, MemOperand(sp, kPointerSize)); + lay(sp, MemOperand(sp, -kSystemPointerSize * 5)); + StoreP(src1, MemOperand(sp, kSystemPointerSize * 4)); + StoreP(src2, MemOperand(sp, kSystemPointerSize * 3)); + StoreP(src3, MemOperand(sp, kSystemPointerSize * 2)); + StoreP(src4, MemOperand(sp, kSystemPointerSize)); StoreP(src5, MemOperand(sp, 0)); } @@ -593,36 +593,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Pop two registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2) { LoadP(src2, MemOperand(sp, 0)); - LoadP(src1, MemOperand(sp, kPointerSize)); - la(sp, MemOperand(sp, 2 * kPointerSize)); + LoadP(src1, MemOperand(sp, kSystemPointerSize)); + la(sp, MemOperand(sp, 2 * kSystemPointerSize)); } // Pop three registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Register src3) { LoadP(src3, MemOperand(sp, 0)); - LoadP(src2, MemOperand(sp, kPointerSize)); - LoadP(src1, MemOperand(sp, 2 * kPointerSize)); - la(sp, MemOperand(sp, 3 * kPointerSize)); + LoadP(src2, MemOperand(sp, kSystemPointerSize)); + LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize)); + la(sp, MemOperand(sp, 3 * kSystemPointerSize)); } // Pop four registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Register src3, Register src4) { LoadP(src4, MemOperand(sp, 0)); - LoadP(src3, MemOperand(sp, kPointerSize)); - LoadP(src2, MemOperand(sp, 2 * kPointerSize)); - LoadP(src1, MemOperand(sp, 3 * kPointerSize)); - la(sp, MemOperand(sp, 4 * kPointerSize)); + LoadP(src3, MemOperand(sp, kSystemPointerSize)); + LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize)); + LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize)); + la(sp, MemOperand(sp, 4 * kSystemPointerSize)); } // Pop five registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Register src3, Register src4, Register src5) { LoadP(src5, MemOperand(sp, 0)); - LoadP(src4, MemOperand(sp, kPointerSize)); - LoadP(src3, MemOperand(sp, 2 * kPointerSize)); - LoadP(src2, MemOperand(sp, 3 * kPointerSize)); - LoadP(src1, MemOperand(sp, 4 * kPointerSize)); - la(sp, MemOperand(sp, 5 * kPointerSize)); + LoadP(src4, MemOperand(sp, kSystemPointerSize)); + LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize)); + LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize)); + LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize)); + la(sp, MemOperand(sp, 5 * kSystemPointerSize)); } // Push a fixed frame, consisting of lr, fp, constant pool. @@ -1182,12 +1182,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { } void SmiToPtrArrayOffset(Register dst, Register src) { -#if V8_TARGET_ARCH_S390X - STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); - ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2)); +#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kSystemPointerSizeLog2); + ShiftLeftP(dst, src, Operand(kSystemPointerSizeLog2 - kSmiShift)); #else - STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); - ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); + STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kSystemPointerSizeLog2); + ShiftRightArithP(dst, src, Operand(kSmiShift - kSystemPointerSizeLog2)); #endif } @@ -1201,14 +1201,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void AssertNotSmi(Register object); void AssertSmi(Register object); -#if V8_TARGET_ARCH_S390X +#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH) // Ensure it is permissible to read/write int value directly from // upper half of the smi. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); #endif #if V8_TARGET_LITTLE_ENDIAN -#define SmiWordOffset(offset) (offset + kPointerSize / 2) +#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2) #else #define SmiWordOffset(offset) offset #endif diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc index 870241eac69b92..ba8e5981f06fb2 100644 --- a/deps/v8/src/codegen/source-position-table.cc +++ b/deps/v8/src/codegen/source-position-table.cc @@ -31,24 +31,23 @@ using MoreBit = BitField8; using ValueBits = BitField8; // Helper: Add the offsets from 'other' to 'value'. Also set is_statement. -void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references) +void AddAndSetEntry(PositionTableEntry* value, const PositionTableEntry& other) { - value.code_offset += other.code_offset; - value.source_position += other.source_position; - value.is_statement = other.is_statement; + value->code_offset += other.code_offset; + value->source_position += other.source_position; + value->is_statement = other.is_statement; } // Helper: Subtract the offsets from 'other' from 'value'. -void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references) +void SubtractFromEntry(PositionTableEntry* value, const PositionTableEntry& other) { - value.code_offset -= other.code_offset; - value.source_position -= other.source_position; + value->code_offset -= other.code_offset; + value->source_position -= other.source_position; } // Helper: Encode an integer. template -void EncodeInt(std::vector& bytes, // NOLINT(runtime/references) - T value) { +void EncodeInt(std::vector* bytes, T value) { using unsigned_type = typename std::make_unsigned::type; // Zig-zag encoding. static const int kShift = sizeof(T) * kBitsPerByte - 1; @@ -60,14 +59,13 @@ void EncodeInt(std::vector& bytes, // NOLINT(runtime/references) more = encoded > ValueBits::kMax; byte current = MoreBit::encode(more) | ValueBits::encode(encoded & ValueBits::kMask); - bytes.push_back(current); + bytes->push_back(current); encoded >>= ValueBits::kSize; } while (more); } // Encode a PositionTableEntry. -void EncodeEntry(std::vector& bytes, // NOLINT(runtime/references) - const PositionTableEntry& entry) { +void EncodeEntry(std::vector* bytes, const PositionTableEntry& entry) { // We only accept ascending code offsets. DCHECK_GE(entry.code_offset, 0); // Since code_offset is not negative, we use sign to encode is_statement. @@ -115,17 +113,16 @@ Vector VectorFromByteArray(ByteArray byte_array) { } #ifdef ENABLE_SLOW_DCHECKS -void CheckTableEquals( - std::vector& raw_entries, // NOLINT(runtime/references) - SourcePositionTableIterator& encoded) { // NOLINT(runtime/references) +void CheckTableEquals(const std::vector& raw_entries, + SourcePositionTableIterator* encoded) { // Brute force testing: Record all positions and decode // the entire table to verify they are identical. auto raw = raw_entries.begin(); - for (; !encoded.done(); encoded.Advance(), raw++) { + for (; !encoded->done(); encoded->Advance(), raw++) { DCHECK(raw != raw_entries.end()); - DCHECK_EQ(encoded.code_offset(), raw->code_offset); - DCHECK_EQ(encoded.source_position().raw(), raw->source_position); - DCHECK_EQ(encoded.is_statement(), raw->is_statement); + DCHECK_EQ(encoded->code_offset(), raw->code_offset); + DCHECK_EQ(encoded->source_position().raw(), raw->source_position); + DCHECK_EQ(encoded->is_statement(), raw->is_statement); } DCHECK(raw == raw_entries.end()); } @@ -148,8 +145,8 @@ void SourcePositionTableBuilder::AddPosition(size_t code_offset, void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) { PositionTableEntry tmp(entry); - SubtractFromEntry(tmp, previous_); - EncodeEntry(bytes_, tmp); + SubtractFromEntry(&tmp, previous_); + EncodeEntry(&bytes_, tmp); previous_ = entry; #ifdef ENABLE_SLOW_DCHECKS raw_entries_.push_back(entry); @@ -169,7 +166,7 @@ Handle SourcePositionTableBuilder::ToSourcePositionTable( // Brute force testing: Record all positions and decode // the entire table to verify they are identical. SourcePositionTableIterator it(*table, SourcePositionTableIterator::kAll); - CheckTableEquals(raw_entries_, it); + CheckTableEquals(raw_entries_, &it); // No additional source positions after creating the table. mode_ = OMIT_SOURCE_POSITIONS; #endif @@ -187,7 +184,7 @@ OwnedVector SourcePositionTableBuilder::ToSourcePositionTableVector() { // the entire table to verify they are identical. SourcePositionTableIterator it(table.as_vector(), SourcePositionTableIterator::kAll); - CheckTableEquals(raw_entries_, it); + CheckTableEquals(raw_entries_, &it); // No additional source positions after creating the table. mode_ = OMIT_SOURCE_POSITIONS; #endif @@ -232,7 +229,7 @@ void SourcePositionTableIterator::Advance() { } else { PositionTableEntry tmp; DecodeEntry(bytes, &index_, &tmp); - AddAndSetEntry(current_, tmp); + AddAndSetEntry(¤t_, tmp); SourcePosition p = source_position(); filter_satisfied = (filter_ == kAll) || (filter_ == kJavaScriptOnly && p.IsJavaScript()) || diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h new file mode 100644 index 00000000000000..1f6c627929b11e --- /dev/null +++ b/deps/v8/src/codegen/tnode.h @@ -0,0 +1,374 @@ +// Copyright 2015 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_TNODE_H_ +#define V8_CODEGEN_TNODE_H_ + +#include "src/codegen/machine-type.h" + +namespace v8 { +namespace internal { + +class HeapNumber; +class BigInt; +class Object; + +namespace compiler { + +class Node; + +} + +struct UntaggedT {}; + +struct IntegralT : UntaggedT {}; + +struct WordT : IntegralT { + static const MachineRepresentation kMachineRepresentation = + (kSystemPointerSize == 4) ? MachineRepresentation::kWord32 + : MachineRepresentation::kWord64; +}; + +struct RawPtrT : WordT { + static constexpr MachineType kMachineType = MachineType::Pointer(); +}; + +template +struct RawPtr : RawPtrT {}; + +struct Word32T : IntegralT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kWord32; +}; +struct Int32T : Word32T { + static constexpr MachineType kMachineType = MachineType::Int32(); +}; +struct Uint32T : Word32T { + static constexpr MachineType kMachineType = MachineType::Uint32(); +}; +struct Int16T : Int32T { + static constexpr MachineType kMachineType = MachineType::Int16(); +}; +struct Uint16T : Uint32T, Int32T { + static constexpr MachineType kMachineType = MachineType::Uint16(); +}; +struct Int8T : Int16T { + static constexpr MachineType kMachineType = MachineType::Int8(); +}; +struct Uint8T : Uint16T, Int16T { + static constexpr MachineType kMachineType = MachineType::Uint8(); +}; + +struct Word64T : IntegralT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kWord64; +}; +struct Int64T : Word64T { + static constexpr MachineType kMachineType = MachineType::Int64(); +}; +struct Uint64T : Word64T { + static constexpr MachineType kMachineType = MachineType::Uint64(); +}; + +struct IntPtrT : WordT { + static constexpr MachineType kMachineType = MachineType::IntPtr(); +}; +struct UintPtrT : WordT { + static constexpr MachineType kMachineType = MachineType::UintPtr(); +}; + +struct Float32T : UntaggedT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kFloat32; + static constexpr MachineType kMachineType = MachineType::Float32(); +}; + +struct Float64T : UntaggedT { + static const MachineRepresentation kMachineRepresentation = + MachineRepresentation::kFloat64; + static constexpr MachineType kMachineType = MachineType::Float64(); +}; + +#ifdef V8_COMPRESS_POINTERS +using TaggedT = Int32T; +#else +using TaggedT = IntPtrT; +#endif + +// Result of a comparison operation. +struct BoolT : Word32T {}; + +// Value type of a Turbofan node with two results. +template +struct PairT {}; + +inline constexpr MachineType CommonMachineType(MachineType type1, + MachineType type2) { + return (type1 == type2) ? type1 + : ((type1.IsTagged() && type2.IsTagged()) + ? MachineType::AnyTagged() + : MachineType::None()); +} + +template +struct MachineTypeOf { + static constexpr MachineType value = Type::kMachineType; +}; + +template +constexpr MachineType MachineTypeOf::value; + +template <> +struct MachineTypeOf { + static constexpr MachineType value = MachineType::AnyTagged(); +}; +template <> +struct MachineTypeOf { + static constexpr MachineType value = MachineType::AnyTagged(); +}; +template <> +struct MachineTypeOf { + static constexpr MachineType value = MachineType::TaggedSigned(); +}; +template +struct MachineTypeOf::value>::type> { + static constexpr MachineType value = MachineType::TaggedPointer(); +}; + +template +constexpr MachineType MachineTypeOf< + HeapObjectSubtype, typename std::enable_if::value>::type>::value; + +template +struct MachineRepresentationOf { + static const MachineRepresentation value = Type::kMachineRepresentation; +}; +template +struct MachineRepresentationOf< + T, typename std::enable_if::value>::type> { + static const MachineRepresentation value = + MachineTypeOf::value.representation(); +}; +template +struct MachineRepresentationOf< + T, typename std::enable_if::value>::type> { + static const MachineRepresentation value = + MachineTypeOf::value.representation(); +}; +template <> +struct MachineRepresentationOf { + static const MachineRepresentation value = RawPtrT::kMachineRepresentation; +}; + +template +struct is_valid_type_tag { + static const bool value = std::is_base_of::value || + std::is_base_of::value || + std::is_base_of::value || + std::is_same::value; + static const bool is_tagged = std::is_base_of::value || + std::is_base_of::value; +}; + +template +struct is_valid_type_tag> { + static const bool value = + is_valid_type_tag::value && is_valid_type_tag::value; + static const bool is_tagged = false; +}; + +template +struct UnionT; + +template +struct is_valid_type_tag> { + static const bool is_tagged = + is_valid_type_tag::is_tagged && is_valid_type_tag::is_tagged; + static const bool value = is_tagged; +}; + +template +struct UnionT { + static constexpr MachineType kMachineType = + CommonMachineType(MachineTypeOf::value, MachineTypeOf::value); + static const MachineRepresentation kMachineRepresentation = + kMachineType.representation(); + static_assert(kMachineRepresentation != MachineRepresentation::kNone, + "no common representation"); + static_assert(is_valid_type_tag::is_tagged && + is_valid_type_tag::is_tagged, + "union types are only possible for tagged values"); +}; + +using AnyTaggedT = UnionT; +using Number = UnionT; +using Numeric = UnionT; + +// A pointer to a builtin function, used by Torque's function pointers. +using BuiltinPtr = Smi; + +class int31_t { + public: + int31_t() : value_(0) {} + int31_t(int value) : value_(value) { // NOLINT(runtime/explicit) + DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); + } + int31_t& operator=(int value) { + DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); + value_ = value; + return *this; + } + int32_t value() const { return value_; } + operator int32_t() const { return value_; } + + private: + int32_t value_; +}; + +template +struct is_subtype { + static const bool value = std::is_base_of::value; +}; +template +struct is_subtype, U> { + static const bool value = + is_subtype::value && is_subtype::value; +}; +template +struct is_subtype> { + static const bool value = + is_subtype::value || is_subtype::value; +}; +template +struct is_subtype, UnionT> { + static const bool value = + (is_subtype::value || is_subtype::value) && + (is_subtype::value || is_subtype::value); +}; + +template +struct types_have_common_values { + static const bool value = is_subtype::value || is_subtype::value; +}; +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template +struct types_have_common_values, U> { + static const bool value = types_have_common_values::value || + types_have_common_values::value; +}; + +template +struct types_have_common_values> { + static const bool value = types_have_common_values::value || + types_have_common_values::value; +}; +template +struct types_have_common_values, UnionT> { + static const bool value = types_have_common_values::value || + types_have_common_values::value || + types_have_common_values::value || + types_have_common_values::value; +}; + +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; + +template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; + +// TNode is an SSA value with the static type tag T, which is one of the +// following: +// - a subclass of internal::Object represents a tagged type +// - a subclass of internal::UntaggedT represents an untagged type +// - ExternalReference +// - PairT for an operation returning two values, with types T1 +// and T2 +// - UnionT represents either a value of type T1 or of type T2. +template +class TNode { + public: + template ::value, int>::type = 0> + TNode(const TNode& other) : node_(other) { + LazyTemplateChecks(); + } + TNode() : TNode(nullptr) {} + + TNode operator=(TNode other) { + DCHECK_NOT_NULL(other.node_); + node_ = other.node_; + return *this; + } + + bool is_null() { return node_ == nullptr; } + + operator compiler::Node*() const { return node_; } + + static TNode UncheckedCast(compiler::Node* node) { return TNode(node); } + + protected: + explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); } + + private: + // These checks shouldn't be checked before TNode is actually used. + void LazyTemplateChecks() { + static_assert(is_valid_type_tag::value, "invalid type tag"); + } + + compiler::Node* node_; +}; + +// SloppyTNode is a variant of TNode and allows implicit casts from +// Node*. It is intended for function arguments as long as some call sites +// still use untyped Node* arguments. +// TODO(tebbi): Delete this class once transition is finished. +template +class SloppyTNode : public TNode { + public: + SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit) + : TNode(node) {} + template ::value, + int>::type = 0> + SloppyTNode(const TNode& other) // NOLINT(runtime/explicit) + : TNode(other) {} +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_TNODE_H_ diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h index 3a3e65a41e753d..6e11ad5c3fbf69 100644 --- a/deps/v8/src/codegen/turbo-assembler.h +++ b/deps/v8/src/codegen/turbo-assembler.h @@ -5,6 +5,8 @@ #ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_ #define V8_CODEGEN_TURBO_ASSEMBLER_H_ +#include + #include "src/base/template-utils.h" #include "src/builtins/builtins.h" #include "src/codegen/assembler-arch.h" @@ -100,7 +102,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { static bool IsAddressableThroughRootRegister( Isolate* isolate, const ExternalReference& reference); -#if V8_OS_WIN +#ifdef V8_TARGET_OS_WIN // Minimum page size. We must touch memory once per page when expanding the // stack, to avoid access violations. static constexpr int kStackPageSize = 4 * KB; diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h index f5d0c0ffcf528c..d8457d9d3e38a0 100644 --- a/deps/v8/src/codegen/x64/assembler-x64-inl.h +++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h @@ -218,6 +218,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) { void Assembler::set_target_address_at(Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode) { + DCHECK(is_int32(target - pc - 4)); WriteUnalignedValue(pc, static_cast(target - pc - 4)); if (icache_flush_mode != SKIP_ICACHE_FLUSH) { FlushInstructionCache(pc, sizeof(int32_t)); @@ -363,7 +364,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, if (icache_flush_mode != SKIP_ICACHE_FLUSH) { FlushInstructionCache(pc_, sizeof(Address)); } - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { + if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && + !FLAG_disable_write_barriers) { WriteBarrierForCode(host(), this, target); } } diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc index 1783da700ba53d..16791a6453926d 100644 --- a/deps/v8/src/codegen/x64/assembler-x64.cc +++ b/deps/v8/src/codegen/x64/assembler-x64.cc @@ -327,8 +327,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Address pc = reinterpret_cast
(buffer_start_) + request.offset(); switch (request.kind()) { case HeapObjectRequest::kHeapNumber: { - Handle object = isolate->factory()->NewHeapNumber( - request.heap_number(), AllocationType::kOld); + Handle object = + isolate->factory()->NewHeapNumber( + request.heap_number()); WriteUnalignedValue(pc, object); break; } @@ -1777,6 +1778,13 @@ void Assembler::emit_mov(Register dst, Immediate64 value, int size) { } } +void Assembler::movq_imm64(Register dst, int64_t value) { + EnsureSpace ensure_space(this); + emit_rex(dst, kInt64Size); + emit(0xB8 | dst.low_bits()); + emitq(static_cast(value)); +} + void Assembler::movq_heap_number(Register dst, double value) { EnsureSpace ensure_space(this); emit_rex(dst, kInt64Size); @@ -1963,6 +1971,13 @@ void Assembler::emit_repmovs(int size) { emit(0xA5); } +void Assembler::repstosq() { + EnsureSpace ensure_space(this); + emit(0xF3); + emit_rex_64(); + emit(0xAB); +} + void Assembler::mull(Register src) { EnsureSpace ensure_space(this); emit_optional_rex_32(src); @@ -4099,6 +4114,42 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1, emit_sse_operand(dst, src2); } +void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1, + XMMRegister src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0); + emit(op); + emit_sse_operand(dst, src2); +} + +void Assembler::vfmaps(byte op, XMMRegister dst, XMMRegister src1, + Operand src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW0); + emit(op); + emit_sse_operand(dst, src2); +} + +void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1, + XMMRegister src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1); + emit(op); + emit_sse_operand(dst, src2); +} + +void Assembler::vfmapd(byte op, XMMRegister dst, XMMRegister src1, + Operand src2) { + DCHECK(IsEnabled(FMA3)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, src1, src2, kL128, k66, k0F38, kW1); + emit(op); + emit_sse_operand(dst, src2); +} + void Assembler::vmovd(XMMRegister dst, Register src) { DCHECK(IsEnabled(AVX)); EnsureSpace ensure_space(this); diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h index 7c69b4c4736dff..74cfd0ab850500 100644 --- a/deps/v8/src/codegen/x64/assembler-x64.h +++ b/deps/v8/src/codegen/x64/assembler-x64.h @@ -39,6 +39,7 @@ #include #include +#include #include #include "src/codegen/assembler.h" @@ -155,7 +156,9 @@ enum ScaleFactor : int8_t { times_4 = 2, times_8 = 3, times_int_size = times_4, - times_system_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4, + + times_half_system_pointer_size = times_4, + times_system_pointer_size = times_8, times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4, }; @@ -513,12 +516,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movq_string(Register dst, const StringConstantBase* str); - // Loads a 64-bit immediate into a register. + // Loads a 64-bit immediate into a register, potentially using the constant + // pool. void movq(Register dst, int64_t value) { movq(dst, Immediate64(value)); } void movq(Register dst, uint64_t value) { movq(dst, Immediate64(static_cast(value))); } + // Loads a 64-bit immediate into a register without using the constant pool. + void movq_imm64(Register dst, int64_t value); + void movsxbl(Register dst, Register src); void movsxbl(Register dst, Operand src); void movsxbq(Register dst, Register src); @@ -531,12 +538,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movsxlq(Register dst, Operand src); // Repeated moves. - void repmovsb(); void repmovsw(); void repmovsl() { emit_repmovs(kInt32Size); } void repmovsq() { emit_repmovs(kInt64Size); } + // Repeated store of quadwords (fill RCX quadwords at [RDI] with RAX). + void repstosq(); + // Instruction to load from an immediate 64-bit pointer into RAX. void load_rax(Address value, RelocInfo::Mode rmode); void load_rax(ExternalReference ext); @@ -1295,6 +1304,36 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2); + void vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmaps(0xb8, dst, src1, src2); + } + void vfmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmaps(0xb8, dst, src1, src2); + } + void vfnmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmaps(0xbc, dst, src1, src2); + } + void vfnmadd231ps(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmaps(0xbc, dst, src1, src2); + } + void vfmaps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); + void vfmaps(byte op, XMMRegister dst, XMMRegister src1, Operand src2); + + void vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmapd(0xb8, dst, src1, src2); + } + void vfmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmapd(0xb8, dst, src1, src2); + } + void vfnmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { + vfmapd(0xbc, dst, src1, src2); + } + void vfnmadd231pd(XMMRegister dst, XMMRegister src1, Operand src2) { + vfmapd(0xbc, dst, src1, src2); + } + void vfmapd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); + void vfmapd(byte op, XMMRegister dst, XMMRegister src1, Operand src2); + void vmovd(XMMRegister dst, Register src); void vmovd(XMMRegister dst, Operand src); void vmovd(Register dst, XMMRegister src); @@ -1330,7 +1369,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { impl(opcode, dst, src1, src2); \ } - AVX_SP_3(vsqrt, 0x51) + // vsqrtpd is defined by sqrtpd in SSE2_INSTRUCTION_LIST + AVX_S_3(vsqrt, 0x51) + AVX_3(vsqrtps, 0x51, vps) AVX_S_3(vadd, 0x58) AVX_S_3(vsub, 0x5c) AVX_S_3(vmul, 0x59) diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index 4deeb1bc02df08..d02b95b38e1736 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -218,45 +218,45 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) { void TurboAssembler::LoadTaggedPointerField(Register destination, Operand field_operand) { -#ifdef V8_COMPRESS_POINTERS - DecompressTaggedPointer(destination, field_operand); -#else - mov_tagged(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressTaggedPointer(destination, field_operand); + } else { + mov_tagged(destination, field_operand); + } } void TurboAssembler::LoadAnyTaggedField(Register destination, Operand field_operand, Register scratch) { -#ifdef V8_COMPRESS_POINTERS - DecompressAnyTagged(destination, field_operand, scratch); -#else - mov_tagged(destination, field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DecompressAnyTagged(destination, field_operand, scratch); + } else { + mov_tagged(destination, field_operand); + } } void TurboAssembler::PushTaggedPointerField(Operand field_operand, Register scratch) { -#ifdef V8_COMPRESS_POINTERS - DCHECK(!field_operand.AddressUsesRegister(scratch)); - DecompressTaggedPointer(scratch, field_operand); - Push(scratch); -#else - Push(field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DCHECK(!field_operand.AddressUsesRegister(scratch)); + DecompressTaggedPointer(scratch, field_operand); + Push(scratch); + } else { + Push(field_operand); + } } void TurboAssembler::PushTaggedAnyField(Operand field_operand, Register scratch1, Register scratch2) { -#ifdef V8_COMPRESS_POINTERS - DCHECK(!AreAliased(scratch1, scratch2)); - DCHECK(!field_operand.AddressUsesRegister(scratch1)); - DCHECK(!field_operand.AddressUsesRegister(scratch2)); - DecompressAnyTagged(scratch1, field_operand, scratch2); - Push(scratch1); -#else - Push(field_operand); -#endif + if (COMPRESS_POINTERS_BOOL) { + DCHECK(!AreAliased(scratch1, scratch2)); + DCHECK(!field_operand.AddressUsesRegister(scratch1)); + DCHECK(!field_operand.AddressUsesRegister(scratch2)); + DecompressAnyTagged(scratch1, field_operand, scratch2); + Push(scratch1); + } else { + Push(field_operand); + } } void TurboAssembler::SmiUntagField(Register dst, Operand src) { @@ -265,44 +265,40 @@ void TurboAssembler::SmiUntagField(Register dst, Operand src) { void TurboAssembler::StoreTaggedField(Operand dst_field_operand, Immediate value) { -#ifdef V8_COMPRESS_POINTERS - RecordComment("[ StoreTagged"); - movl(dst_field_operand, value); - RecordComment("]"); -#else - movq(dst_field_operand, value); -#endif + if (COMPRESS_POINTERS_BOOL) { + movl(dst_field_operand, value); + } else { + movq(dst_field_operand, value); + } } void TurboAssembler::StoreTaggedField(Operand dst_field_operand, Register value) { -#ifdef V8_COMPRESS_POINTERS - RecordComment("[ StoreTagged"); - movl(dst_field_operand, value); - RecordComment("]"); -#else - movq(dst_field_operand, value); -#endif + if (COMPRESS_POINTERS_BOOL) { + movl(dst_field_operand, value); + } else { + movq(dst_field_operand, value); + } } void TurboAssembler::DecompressTaggedSigned(Register destination, Operand field_operand) { RecordComment("[ DecompressTaggedSigned"); - movsxlq(destination, field_operand); + movl(destination, field_operand); RecordComment("]"); } void TurboAssembler::DecompressTaggedSigned(Register destination, Register source) { RecordComment("[ DecompressTaggedSigned"); - movsxlq(destination, source); + movl(destination, source); RecordComment("]"); } void TurboAssembler::DecompressTaggedPointer(Register destination, Operand field_operand) { RecordComment("[ DecompressTaggedPointer"); - movsxlq(destination, field_operand); + movl(destination, field_operand); addq(destination, kRootRegister); RecordComment("]"); } @@ -310,30 +306,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, void TurboAssembler::DecompressTaggedPointer(Register destination, Register source) { RecordComment("[ DecompressTaggedPointer"); - movsxlq(destination, source); + movl(destination, source); addq(destination, kRootRegister); RecordComment("]"); } void TurboAssembler::DecompressRegisterAnyTagged(Register destination, Register scratch) { - if (kUseBranchlessPtrDecompressionInGeneratedCode) { - // Branchlessly compute |masked_root|: - // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; - STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32)); - Register masked_root = scratch; - xorq(masked_root, masked_root); - Condition smi = CheckSmi(destination); - cmovq(NegateCondition(smi), masked_root, kRootRegister); - // Now this add operation will either leave the value unchanged if it is - // a smi or add the isolate root if it is a heap object. - addq(destination, masked_root); - } else { - Label done; - JumpIfSmi(destination, &done); - addq(destination, kRootRegister); - bind(&done); - } + addq(destination, kRootRegister); } void TurboAssembler::DecompressAnyTagged(Register destination, @@ -341,7 +321,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register scratch) { DCHECK(!AreAliased(destination, scratch)); RecordComment("[ DecompressAnyTagged"); - movsxlq(destination, field_operand); + movl(destination, field_operand); DecompressRegisterAnyTagged(destination, scratch); RecordComment("]"); } @@ -350,7 +330,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register source, Register scratch) { DCHECK(!AreAliased(destination, scratch)); RecordComment("[ DecompressAnyTagged"); - movsxlq(destination, source); + movl(destination, source); DecompressRegisterAnyTagged(destination, scratch); RecordComment("]"); } @@ -1109,7 +1089,11 @@ Register TurboAssembler::GetSmiConstant(Smi source) { xorl(kScratchRegister, kScratchRegister); return kScratchRegister; } - Move(kScratchRegister, source); + if (SmiValuesAre32Bits()) { + Move(kScratchRegister, source); + } else { + movl(kScratchRegister, Immediate(source)); + } return kScratchRegister; } @@ -1133,20 +1117,47 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) { movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE)); } -void MacroAssembler::SmiTag(Register dst, Register src) { +void MacroAssembler::SmiTag(Register reg) { STATIC_ASSERT(kSmiTag == 0); - if (dst != src) { + DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); + if (COMPRESS_POINTERS_BOOL) { + shll(reg, Immediate(kSmiShift)); + } else { + shlq(reg, Immediate(kSmiShift)); + } +} + +void MacroAssembler::SmiTag(Register dst, Register src) { + DCHECK(dst != src); + if (COMPRESS_POINTERS_BOOL) { + movl(dst, src); + } else { movq(dst, src); } + SmiTag(dst); +} + +void TurboAssembler::SmiUntag(Register reg) { + STATIC_ASSERT(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); - shlq(dst, Immediate(kSmiShift)); + // TODO(v8:7703): Is there a way to avoid this sign extension when pointer + // compression is enabled? + if (COMPRESS_POINTERS_BOOL) { + movsxlq(reg, reg); + } + sarq(reg, Immediate(kSmiShift)); } void TurboAssembler::SmiUntag(Register dst, Register src) { - STATIC_ASSERT(kSmiTag == 0); - if (dst != src) { + DCHECK(dst != src); + if (COMPRESS_POINTERS_BOOL) { + movsxlq(dst, src); + } else { movq(dst, src); } + // TODO(v8:7703): Call SmiUntag(reg) if we can find a way to avoid the extra + // mov when pointer compression is enabled. + STATIC_ASSERT(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); sarq(dst, Immediate(kSmiShift)); } @@ -1158,12 +1169,13 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { movsxlq(dst, dst); } else { DCHECK(SmiValuesAre31Bits()); -#ifdef V8_COMPRESS_POINTERS - movsxlq(dst, src); -#else - movq(dst, src); -#endif - sarq(dst, Immediate(kSmiShift)); + if (COMPRESS_POINTERS_BOOL) { + movsxlq(dst, src); + sarq(dst, Immediate(kSmiShift)); + } else { + movq(dst, src); + sarq(dst, Immediate(kSmiShift)); + } } } @@ -1283,12 +1295,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { return SmiIndex(dst, times_1); } else { DCHECK(SmiValuesAre31Bits()); - if (dst != src) { - mov_tagged(dst, src); - } // We have to sign extend the index register to 64-bit as the SMI might // be negative. - movsxlq(dst, dst); + movsxlq(dst, src); if (shift < kSmiShift) { sarq(dst, Immediate(kSmiShift - shift)); } else if (shift != kSmiShift) { @@ -1423,7 +1432,6 @@ void MacroAssembler::Negpd(XMMRegister dst) { } void MacroAssembler::Cmp(Register dst, Handle source) { - AllowDeferredHandleDereference smi_check; if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { @@ -1433,7 +1441,6 @@ void MacroAssembler::Cmp(Register dst, Handle source) { } void MacroAssembler::Cmp(Operand dst, Handle source) { - AllowDeferredHandleDereference smi_check; if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { @@ -1463,6 +1470,8 @@ void TurboAssembler::Move(Register result, Handle object, RelocInfo::Mode rmode) { if (FLAG_embedded_builtins) { if (root_array_available_ && options().isolate_independent_code) { + // TODO(v8:9706): Fix-it! This load will always uncompress the value + // even when we are loading a compressed embedded object. IndirectLoadConstant(result, object); return; } @@ -1605,26 +1614,20 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { } Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { -#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) - STATIC_ASSERT(kSmiShiftSize == 0); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - - // The builtin_index register contains the builtin index as a Smi. - // Untagging is folded into the indexing operand below (we use times_4 instead - // of times_8 since smis are already shifted by one). - return Operand(kRootRegister, builtin_index, times_4, - IsolateData::builtin_entry_table_offset()); -#else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) - STATIC_ASSERT(kSmiShiftSize == 31); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); + if (SmiValuesAre32Bits()) { + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index); + return Operand(kRootRegister, builtin_index, times_system_pointer_size, + IsolateData::builtin_entry_table_offset()); + } else { + DCHECK(SmiValuesAre31Bits()); - // The builtin_index register contains the builtin index as a Smi. - SmiUntag(builtin_index, builtin_index); - return Operand(kRootRegister, builtin_index, times_8, - IsolateData::builtin_entry_table_offset()); -#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) + // The builtin_index register contains the builtin index as a Smi. + // Untagging is folded into the indexing operand below (we use + // times_half_system_pointer_size since smis are already shifted by one). + return Operand(kRootRegister, builtin_index, times_half_system_pointer_size, + IsolateData::builtin_entry_table_offset()); + } } void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { @@ -1739,7 +1742,11 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) { Movd(dst, src); return; } - if (CpuFeatures::IsSupported(SSE4_1)) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpextrd(dst, src, imm8); + return; + } else if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pextrd(dst, src, imm8); return; @@ -1749,8 +1756,38 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) { shrq(dst, Immediate(32)); } +void TurboAssembler::Pextrw(Register dst, XMMRegister src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpextrw(dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pextrw(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pextrb(Register dst, XMMRegister src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpextrb(dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pextrb(dst, src, imm8); + return; + } +} + void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { - if (CpuFeatures::IsSupported(SSE4_1)) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrd(dst, dst, src, imm8); + return; + } else if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pinsrd(dst, src, imm8); return; @@ -1765,7 +1802,11 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { } void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { - if (CpuFeatures::IsSupported(SSE4_1)) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrd(dst, dst, src, imm8); + return; + } else if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatureScope sse_scope(this, SSE4_1); pinsrd(dst, src, imm8); return; @@ -1779,6 +1820,56 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { } } +void TurboAssembler::Pinsrw(XMMRegister dst, Register src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrw(dst, dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrw(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrw(dst, dst, src, imm8); + return; + } else { + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrw(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pinsrb(XMMRegister dst, Register src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrb(dst, dst, src, imm8); + return; + } else { + DCHECK(CpuFeatures::IsSupported(SSE4_1)); + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrb(dst, src, imm8); + return; + } +} + +void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpinsrb(dst, dst, src, imm8); + return; + } else { + CpuFeatureScope sse_scope(this, SSE4_1); + pinsrb(dst, src, imm8); + return; + } +} + void TurboAssembler::Psllq(XMMRegister dst, byte imm8) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -1819,6 +1910,16 @@ void TurboAssembler::Psrld(XMMRegister dst, byte imm8) { } } +void TurboAssembler::Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpshufd(dst, src, shuffle); + } else { + DCHECK(!IsEnabled(AVX)); + pshufd(dst, src, shuffle); + } +} + void TurboAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); @@ -2278,7 +2379,16 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx); // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected, actual); + Label debug_hook, continue_after_hook; + { + ExternalReference debug_hook_active = + ExternalReference::debug_hook_on_function_call_address(isolate()); + Operand debug_hook_active_operand = + ExternalReferenceAsOperand(debug_hook_active); + cmpb(debug_hook_active_operand, Immediate(0)); + j(not_equal, &debug_hook, Label::kNear); + } + bind(&continue_after_hook); // Clear the new.target register if not given. if (!new_target.is_valid()) { @@ -2302,8 +2412,15 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, DCHECK(flag == JUMP_FUNCTION); JumpCodeObject(rcx); } - bind(&done); } + jmp(&done, Label::kNear); + + // Deferred debug hook. + bind(&debug_hook); + CallDebugOnFunctionCall(function, new_target, expected, actual); + jmp(&continue_after_hook, Label::kNear); + + bind(&done); } void MacroAssembler::InvokePrologue(const ParameterCount& expected, @@ -2368,50 +2485,38 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } } -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual) { - Label skip_hook; - ExternalReference debug_hook_active = - ExternalReference::debug_hook_on_function_call_address(isolate()); - Operand debug_hook_active_operand = - ExternalReferenceAsOperand(debug_hook_active); - cmpb(debug_hook_active_operand, Immediate(0)); - j(equal, &skip_hook); - - { - FrameScope frame(this, - has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); - if (expected.is_reg()) { - SmiTag(expected.reg(), expected.reg()); - Push(expected.reg()); - } - if (actual.is_reg()) { - SmiTag(actual.reg(), actual.reg()); - Push(actual.reg()); - SmiUntag(actual.reg(), actual.reg()); - } - if (new_target.is_valid()) { - Push(new_target); - } - Push(fun); - Push(fun); - Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand()); - CallRuntime(Runtime::kDebugOnFunctionCall); - Pop(fun); - if (new_target.is_valid()) { - Pop(new_target); - } - if (actual.is_reg()) { - Pop(actual.reg()); - SmiUntag(actual.reg(), actual.reg()); - } - if (expected.is_reg()) { - Pop(expected.reg()); - SmiUntag(expected.reg(), expected.reg()); - } +void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual) { + FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); + if (expected.is_reg()) { + SmiTag(expected.reg()); + Push(expected.reg()); + } + if (actual.is_reg()) { + SmiTag(actual.reg()); + Push(actual.reg()); + SmiUntag(actual.reg()); + } + if (new_target.is_valid()) { + Push(new_target); + } + Push(fun); + Push(fun); + Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand()); + CallRuntime(Runtime::kDebugOnFunctionCall); + Pop(fun); + if (new_target.is_valid()) { + Pop(new_target); + } + if (actual.is_reg()) { + Pop(actual.reg()); + SmiUntag(actual.reg()); + } + if (expected.is_reg()) { + Pop(expected.reg()); + SmiUntag(expected.reg()); } - bind(&skip_hook); } void TurboAssembler::StubPrologue(StackFrame::Type type) { @@ -2443,7 +2548,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { popq(rbp); } -#ifdef V8_OS_WIN +#ifdef V8_TARGET_OS_WIN void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { // In windows, we cannot increment the stack size by more than one page // (minimum page size is 4KB) without accessing at least one byte on the @@ -2511,7 +2616,7 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax, void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles) { -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN const int kShadowSpace = 4; arg_stack_space += kShadowSpace; #endif @@ -2615,7 +2720,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() { movq(c_entry_fp_operand, Immediate(0)); } -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN static const int kRegisterPassedArguments = 4; #else static const int kRegisterPassedArguments = 6; @@ -2634,7 +2739,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers // and the caller does not reserve stack slots for them. DCHECK_GE(num_arguments, 0); -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN const int kMinimumStackSlots = kRegisterPassedArguments; if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots; return num_arguments; diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h index 8e7766c7e1946c..f38da45788c162 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.h +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h @@ -152,8 +152,26 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP(Roundsd, roundsd) AVX_OP(Sqrtss, sqrtss) AVX_OP(Sqrtsd, sqrtsd) + AVX_OP(Sqrtpd, sqrtpd) AVX_OP(Ucomiss, ucomiss) AVX_OP(Ucomisd, ucomisd) + AVX_OP(Pshufb, pshufb) + AVX_OP(Paddusb, paddusb) + AVX_OP(Psignd, psignd) + AVX_OP(Pand, pand) + AVX_OP(Por, por) + AVX_OP(Pxor, pxor) + AVX_OP(Psubd, psubd) + AVX_OP(Pslld, pslld) + AVX_OP(Psrad, psrad) + AVX_OP(Psrld, psrld) + AVX_OP(Paddd, paddd) + AVX_OP(Pmulld, pmulld) + AVX_OP(Pminsd, pminsd) + AVX_OP(Pminud, pminud) + AVX_OP(Pmaxsd, pmaxsd) + AVX_OP(Pmaxud, pmaxud) + AVX_OP(Pcmpgtd, pcmpgtd) #undef AVX_OP @@ -314,6 +332,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT); // Convert smi to word-size sign-extended value. + void SmiUntag(Register reg); + // Requires dst != src void SmiUntag(Register dst, Register src); void SmiUntag(Register dst, Operand src); @@ -365,14 +385,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Non-SSE2 instructions. void Pextrd(Register dst, XMMRegister src, int8_t imm8); + void Pextrw(Register dst, XMMRegister src, int8_t imm8); + void Pextrb(Register dst, XMMRegister src, int8_t imm8); void Pinsrd(XMMRegister dst, Register src, int8_t imm8); void Pinsrd(XMMRegister dst, Operand src, int8_t imm8); + void Pinsrw(XMMRegister dst, Register src, int8_t imm8); + void Pinsrw(XMMRegister dst, Operand src, int8_t imm8); + void Pinsrb(XMMRegister dst, Register src, int8_t imm8); + void Pinsrb(XMMRegister dst, Operand src, int8_t imm8); void Psllq(XMMRegister dst, byte imm8); void Psrlq(XMMRegister dst, byte imm8); void Pslld(XMMRegister dst, byte imm8); void Psrld(XMMRegister dst, byte imm8); + void Pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle); + void CompareRoot(Register with, RootIndex index); void CompareRoot(Operand with, RootIndex index); @@ -414,7 +442,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // stack check, do it before calling this function because this function may // write into the newly allocated space. It may also overwrite the given // register's value, in the version that takes a register. -#ifdef V8_OS_WIN +#ifdef V8_TARGET_OS_WIN void AllocateStackSpace(Register bytes_scratch); void AllocateStackSpace(int bytes); #else @@ -647,10 +675,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag); - // On function call, call into the debugger if necessary. - void CheckDebugHook(Register fun, Register new_target, - const ParameterCount& expected, - const ParameterCount& actual); + // On function call, call into the debugger. + void CallDebugOnFunctionCall(Register fun, Register new_target, + const ParameterCount& expected, + const ParameterCount& actual); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. @@ -665,6 +693,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Conversions between tagged smi values and non-tagged integer values. // Tag an word-size value. The result must be known to be a valid smi value. + void SmiTag(Register reg); + // Requires dst != src void SmiTag(Register dst, Register src); // Simple comparison of smis. Both sides must be known smis to use these, @@ -917,7 +947,7 @@ inline Operand NativeContextOperand() { // Provides access to exit frame stack space (not GCed). inline Operand StackSpaceOperand(int index) { -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN const int kShaddowSpace = 4; return Operand(rsp, (index + kShaddowSpace) * kSystemPointerSize); #else diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h index 199571f088defd..181da9d9f3a20d 100644 --- a/deps/v8/src/codegen/x64/register-x64.h +++ b/deps/v8/src/codegen/x64/register-x64.h @@ -88,7 +88,7 @@ constexpr int kNumJSCallerSaved = 5; // Number of registers for which space is reserved in safepoints. constexpr int kNumSafepointRegisters = 16; -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // Windows calling convention constexpr Register arg_reg_1 = rcx; constexpr Register arg_reg_2 = rdx; @@ -100,7 +100,7 @@ constexpr Register arg_reg_1 = rdi; constexpr Register arg_reg_2 = rsi; constexpr Register arg_reg_3 = rdx; constexpr Register arg_reg_4 = rcx; -#endif // _WIN64 +#endif // V8_TARGET_OS_WIN #define DOUBLE_REGISTERS(V) \ V(xmm0) \ diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h index 8ba54e85b42ec1..8af06ae92c8d63 100644 --- a/deps/v8/src/codegen/x64/sse-instr.h +++ b/deps/v8/src/codegen/x64/sse-instr.h @@ -6,6 +6,7 @@ #define V8_CODEGEN_X64_SSE_INSTR_H_ #define SSE2_INSTRUCTION_LIST(V) \ + V(sqrtpd, 66, 0F, 51) \ V(andnpd, 66, 0F, 55) \ V(addpd, 66, 0F, 58) \ V(mulpd, 66, 0F, 59) \ diff --git a/deps/v8/src/common/assert-scope.cc b/deps/v8/src/common/assert-scope.cc index f1fe717cc0983c..5138ce7122129d 100644 --- a/deps/v8/src/common/assert-scope.cc +++ b/deps/v8/src/common/assert-scope.cc @@ -126,8 +126,6 @@ template class PerThreadAssertScope; template class PerThreadAssertScope; template class PerThreadAssertScope; template class PerThreadAssertScope; -template class PerThreadAssertScope; -template class PerThreadAssertScope; template class PerThreadAssertScope; template class PerThreadAssertScope; diff --git a/deps/v8/src/common/assert-scope.h b/deps/v8/src/common/assert-scope.h index 73729400ac6c95..27f411214452f7 100644 --- a/deps/v8/src/common/assert-scope.h +++ b/deps/v8/src/common/assert-scope.h @@ -28,7 +28,6 @@ enum PerThreadAssertType { HEAP_ALLOCATION_ASSERT, HANDLE_ALLOCATION_ASSERT, HANDLE_DEREFERENCE_ASSERT, - DEFERRED_HANDLE_DEREFERENCE_ASSERT, CODE_DEPENDENCY_CHANGE_ASSERT, LAST_PER_THREAD_ASSERT_TYPE }; @@ -145,19 +144,11 @@ using DisallowHandleDereference = using AllowHandleDereference = PerThreadAssertScopeDebugOnly; -// Scope to document where we do not expect deferred handles to be dereferenced. -using DisallowDeferredHandleDereference = - PerThreadAssertScopeDebugOnly; - -// Scope to introduce an exception to DisallowDeferredHandleDereference. -using AllowDeferredHandleDereference = - PerThreadAssertScopeDebugOnly; - -// Scope to document where we do not expect deferred handles to be dereferenced. +// Scope to document where we do not expect code dependencies to change. using DisallowCodeDependencyChange = PerThreadAssertScopeDebugOnly; -// Scope to introduce an exception to DisallowDeferredHandleDereference. +// Scope to introduce an exception to DisallowCodeDependencyChange. using AllowCodeDependencyChange = PerThreadAssertScopeDebugOnly; @@ -243,10 +234,6 @@ extern template class PerThreadAssertScope; extern template class PerThreadAssertScope; extern template class PerThreadAssertScope; extern template class PerThreadAssertScope; -extern template class PerThreadAssertScope; -extern template class PerThreadAssertScope; extern template class PerThreadAssertScope; extern template class PerThreadAssertScope; diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index a0584b95c40475..9d5771f6946380 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -166,13 +166,14 @@ constexpr int kElidedFrameSlots = 0; #endif constexpr int kDoubleSizeLog2 = 3; +constexpr size_t kMaxWasmCodeMB = 1024; +constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB; #if V8_TARGET_ARCH_ARM64 // ARM64 only supports direct calls within a 128 MB range. -constexpr size_t kMaxWasmCodeMB = 128; +constexpr size_t kMaxWasmCodeSpaceSize = 128 * MB; #else -constexpr size_t kMaxWasmCodeMB = 1024; +constexpr size_t kMaxWasmCodeSpaceSize = kMaxWasmCodeMemory; #endif -constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB; #if V8_HOST_ARCH_64_BIT constexpr int kSystemPointerSizeLog2 = 3; @@ -230,7 +231,7 @@ constexpr int kTaggedSizeLog2 = 2; // These types define raw and atomic storage types for tagged values stored // on V8 heap. -using Tagged_t = int32_t; +using Tagged_t = uint32_t; using AtomicTagged_t = base::Atomic32; #else @@ -245,11 +246,6 @@ using AtomicTagged_t = base::AtomicWord; #endif // V8_COMPRESS_POINTERS -// Defines whether the branchless or branchful implementation of pointer -// decompression should be used. -constexpr bool kUseBranchlessPtrDecompressionInRuntime = false; -constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false; - STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2)); STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES); @@ -404,6 +400,7 @@ enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; // Enums used by CEntry. enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs }; enum ArgvMode { kArgvOnStack, kArgvInRegister }; +enum FunctionDescriptorMode { kNoFunctionDescriptor, kHasFunctionDescriptor }; // This constant is used as an undefined value when passing source positions. constexpr int kNoSourcePosition = -1; @@ -795,8 +792,6 @@ enum InlineCacheState { NO_FEEDBACK, // Has never been executed. UNINITIALIZED, - // Has been executed but monomorphic state has been delayed. - PREMONOMORPHIC, // Has been executed and only one receiver type has been seen. MONOMORPHIC, // Check failed due to prototype (or map deprecation). @@ -816,8 +811,6 @@ inline const char* InlineCacheState2String(InlineCacheState state) { return "NOFEEDBACK"; case UNINITIALIZED: return "UNINITIALIZED"; - case PREMONOMORPHIC: - return "PREMONOMORPHIC"; case MONOMORPHIC: return "MONOMORPHIC"; case RECOMPUTE_HANDLER: @@ -1216,6 +1209,10 @@ enum VariableLocation : uint8_t { // immediately initialized upon creation (kCreatedInitialized). enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized }; +// Static variables can only be used with the class in the closest +// class scope as receivers. +enum class IsStaticFlag : uint8_t { kNotStatic, kStatic }; + enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned }; enum class InterpreterPushArgsMode : unsigned { diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h index e3307a525c81fa..41af7b8f18a733 100644 --- a/deps/v8/src/common/message-template.h +++ b/deps/v8/src/common/message-template.h @@ -10,7 +10,6 @@ namespace v8 { namespace internal { -// TODO(913887): fix the use of 'neuter' in these error messages. #define MESSAGE_TEMPLATES(T) \ /* Error */ \ T(None, "") \ @@ -34,7 +33,6 @@ namespace internal { "Derived ArrayBuffer constructor created a buffer which was too small") \ T(ArrayBufferSpeciesThis, \ "ArrayBuffer subclass returned this from species constructor") \ - T(ArrayItemNotType, "array %[%] is not type %") \ T(AwaitNotInAsyncFunction, "await is only valid in async function") \ T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \ T(BadSortComparisonFunction, \ @@ -78,7 +76,7 @@ namespace internal { T(DebuggerType, "Debugger: Parameters have wrong types.") \ T(DeclarationMissingInitializer, "Missing initializer in % declaration") \ T(DefineDisallowed, "Cannot define property %, object is not extensible") \ - T(DetachedOperation, "Cannot perform % on a neutered ArrayBuffer") \ + T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \ T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \ T(ExtendsValueNotConstructor, \ "Class extends value % is not a constructor or null") \ @@ -101,6 +99,7 @@ namespace internal { T(InvalidRegExpExecResult, \ "RegExp exec method returned something other than an Object or null") \ T(InvalidUnit, "Invalid unit argument for %() '%'") \ + T(IterableYieldedNonString, "Iterable yielded % which is not a string") \ T(IteratorResultNotAnObject, "Iterator result % is not an object") \ T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \ T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \ @@ -540,6 +539,7 @@ namespace internal { T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \ T(WasmTrapFuncInvalid, "invalid index into function table") \ T(WasmTrapFuncSigMismatch, "function signature mismatch") \ + T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \ T(WasmTrapTypeError, "wasm function signature contains illegal type") \ T(WasmTrapDataSegmentDropped, "data segment has been dropped") \ T(WasmTrapElemSegmentDropped, "element segment has been dropped") \ @@ -554,7 +554,7 @@ namespace internal { T(DataCloneError, "% could not be cloned.") \ T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \ T(DataCloneErrorDetachedArrayBuffer, \ - "An ArrayBuffer is neutered and could not be cloned.") \ + "An ArrayBuffer is detached and could not be cloned.") \ T(DataCloneErrorSharedArrayBufferTransferred, \ "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \ "transferred.") \ diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h index a8fd7f245cb90c..17239d15c2783a 100644 --- a/deps/v8/src/common/ptr-compr-inl.h +++ b/deps/v8/src/common/ptr-compr-inl.h @@ -29,8 +29,7 @@ V8_INLINE Address GetIsolateRoot
(Address on_heap_addr) { // signed constant instead of 64-bit constant (the problem is that 2Gb looks // like a negative 32-bit value). It's correct because we will never use // leftmost address of V8 heap as |on_heap_addr|. - return RoundDown(on_heap_addr + - kPtrComprIsolateRootBias - 1); + return RoundDown(on_heap_addr); } template <> @@ -54,37 +53,20 @@ V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { template V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast(static_cast(raw_value)); - Address root = GetIsolateRoot(on_heap_addr); - return root + static_cast
(value); + return GetIsolateRoot(on_heap_addr) + static_cast
(raw_value); } // Decompresses any tagged value, preserving both weak- and smi- tags. template V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - if (kUseBranchlessPtrDecompressionInRuntime) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast(static_cast(raw_value)); - // |root_mask| is 0 if the |value| was a smi or -1 otherwise. - Address root_mask = static_cast
(-(value & kSmiTagMask)); - Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr); - return root_or_zero + static_cast
(value); - } else { - return HAS_SMI_TAG(raw_value) - ? DecompressTaggedSigned(raw_value) - : DecompressTaggedPointer(on_heap_addr, raw_value); - } + return DecompressTaggedPointer(on_heap_addr, raw_value); } #ifdef V8_COMPRESS_POINTERS STATIC_ASSERT(kPtrComprHeapReservationSize == Internals::kPtrComprHeapReservationSize); -STATIC_ASSERT(kPtrComprIsolateRootBias == Internals::kPtrComprIsolateRootBias); STATIC_ASSERT(kPtrComprIsolateRootAlignment == Internals::kPtrComprIsolateRootAlignment); diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h index 5b4a74e7e316fb..105d5f1a4f65b0 100644 --- a/deps/v8/src/common/ptr-compr.h +++ b/deps/v8/src/common/ptr-compr.h @@ -14,7 +14,6 @@ namespace internal { // See v8:7703 for details about how pointer compression works. constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB; -constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2; constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB; } // namespace internal diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc index e1d47d30a61285..42d64b66145bf9 100644 --- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc +++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc @@ -6,7 +6,6 @@ #include "src/ast/ast.h" #include "src/base/platform/time.h" -#include "src/base/template-utils.h" #include "src/codegen/compiler.h" #include "src/flags/flags.h" #include "src/handles/global-handles.h" @@ -66,7 +65,7 @@ base::Optional CompilerDispatcher::Enqueue( if (!IsEnabled()) return base::nullopt; - std::unique_ptr job = base::make_unique(new BackgroundCompileTask( + std::unique_ptr job = std::make_unique(new BackgroundCompileTask( allocator_, outer_parse_info, function_name, function_literal, worker_thread_runtime_call_stats_, background_compile_timer_, static_cast(max_stack_size_))); diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc index fbaeaa73f87398..3d2342e9a22bf0 100644 --- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc +++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc @@ -5,7 +5,6 @@ #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h" #include "src/base/atomicops.h" -#include "src/base/template-utils.h" #include "src/codegen/compiler.h" #include "src/codegen/optimized-compilation-info.h" #include "src/execution/isolate.h" @@ -244,14 +243,14 @@ void OptimizingCompileDispatcher::QueueForOptimization( blocked_jobs_++; } else { V8::GetCurrentPlatform()->CallOnWorkerThread( - base::make_unique(isolate_, this)); + std::make_unique(isolate_, this)); } } void OptimizingCompileDispatcher::Unblock() { while (blocked_jobs_ > 0) { V8::GetCurrentPlatform()->CallOnWorkerThread( - base::make_unique(isolate_, this)); + std::make_unique(isolate_, this)); blocked_jobs_--; } } diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS index 50e2af71290003..204c0ba115e3c9 100644 --- a/deps/v8/src/compiler/OWNERS +++ b/deps/v8/src/compiler/OWNERS @@ -8,11 +8,12 @@ tebbi@chromium.org neis@chromium.org mvstanton@chromium.org mslekova@chromium.org +jgruber@chromium.org per-file wasm-*=ahaas@chromium.org per-file wasm-*=bbudge@chromium.org per-file wasm-*=binji@chromium.org -per-file wasm-*=clemensh@chromium.org +per-file wasm-*=clemensb@chromium.org per-file wasm-*=gdeepti@chromium.org per-file int64-lowering.*=ahaas@chromium.org diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index 7a72be80284c05..e6c5568af03a41 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -23,10 +23,9 @@ namespace internal { namespace compiler { // static -FieldAccess AccessBuilder::ForExternalTaggedValue() { - FieldAccess access = {kUntaggedBase, 0, - MaybeHandle(), MaybeHandle(), - Type::Any(), MachineType::AnyTagged(), +FieldAccess AccessBuilder::ForExternalIntPtr() { + FieldAccess access = {kUntaggedBase, 0, MaybeHandle(), + MaybeHandle(), Type::Any(), MachineType::IntPtr(), kNoWriteBarrier}; return access; } @@ -109,7 +108,6 @@ FieldAccess AccessBuilder::ForJSObjectElements() { return access; } - // static FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map, int index) { @@ -185,7 +183,6 @@ FieldAccess AccessBuilder::ForJSFunctionContext() { return access; } - // static FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() { FieldAccess access = { @@ -296,7 +293,6 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() { return access; } - // static FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() { FieldAccess access = { @@ -478,7 +474,6 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) { return access; } - // static FieldAccess AccessBuilder::ForJSIteratorResultDone() { FieldAccess access = { @@ -489,7 +484,6 @@ FieldAccess AccessBuilder::ForJSIteratorResultDone() { return access; } - // static FieldAccess AccessBuilder::ForJSIteratorResultValue() { FieldAccess access = { @@ -540,7 +534,6 @@ FieldAccess AccessBuilder::ForJSRegExpSource() { return access; } - // static FieldAccess AccessBuilder::ForFixedArrayLength() { FieldAccess access = {kTaggedBase, @@ -600,7 +593,6 @@ FieldAccess AccessBuilder::ForMapBitField3() { return access; } - // static FieldAccess AccessBuilder::ForMapDescriptors() { FieldAccess access = { @@ -611,7 +603,6 @@ FieldAccess AccessBuilder::ForMapDescriptors() { return access; } - // static FieldAccess AccessBuilder::ForMapInstanceType() { FieldAccess access = { @@ -621,7 +612,6 @@ FieldAccess AccessBuilder::ForMapInstanceType() { return access; } - // static FieldAccess AccessBuilder::ForMapPrototype() { FieldAccess access = { @@ -810,7 +800,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorString() { // static FieldAccess AccessBuilder::ForJSStringIteratorIndex() { FieldAccess access = {kTaggedBase, - JSStringIterator::kNextIndexOffset, + JSStringIterator::kIndexOffset, Handle(), MaybeHandle(), TypeCache::Get()->kStringLengthType, @@ -829,7 +819,6 @@ FieldAccess AccessBuilder::ForArgumentsLength() { return access; } - // static FieldAccess AccessBuilder::ForArgumentsCallee() { FieldAccess access = { @@ -840,7 +829,6 @@ FieldAccess AccessBuilder::ForArgumentsCallee() { return access; } - // static FieldAccess AccessBuilder::ForFixedArraySlot( size_t index, WriteBarrierKind write_barrier_kind) { @@ -852,7 +840,6 @@ FieldAccess AccessBuilder::ForFixedArraySlot( return access; } - // static FieldAccess AccessBuilder::ForCellValue() { FieldAccess access = {kTaggedBase, Cell::kValueOffset, @@ -937,7 +924,7 @@ ElementAccess AccessBuilder::ForStackArgument() { ElementAccess access = { kUntaggedBase, CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize, - Type::NonInternal(), MachineType::AnyTagged(), + Type::NonInternal(), MachineType::Pointer(), WriteBarrierKind::kNoWriteBarrier}; return access; } diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h index 231e75f819587b..4aa69e3726e0fc 100644 --- a/deps/v8/src/compiler/access-builder.h +++ b/deps/v8/src/compiler/access-builder.h @@ -24,11 +24,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final // =========================================================================== // Access to external values (based on external references). - // Provides access to a tagged field identified by an external reference. - static FieldAccess ForExternalTaggedValue(); - - // Provides access to an uint8 field identified by an external reference. - static FieldAccess ForExternalUint8Value(); + // Provides access to an IntPtr field identified by an external reference. + static FieldAccess ForExternalIntPtr(); // =========================================================================== // Access to heap object fields and elements (based on tagged pointer). diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc index 269ef903751ee7..dcdd1de831a4f7 100644 --- a/deps/v8/src/compiler/access-info.cc +++ b/deps/v8/src/compiler/access-info.cc @@ -31,9 +31,9 @@ bool CanInlinePropertyAccess(Handle map) { // We can inline property access to prototypes of all primitives, except // the special Oddball ones that have no wrapper counterparts (i.e. Null, // Undefined and TheHole). - STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_TYPE); + STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_HEAP_OBJECT_TYPE); if (map->IsBooleanMap()) return true; - if (map->instance_type() < LAST_PRIMITIVE_TYPE) return true; + if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true; return map->IsJSObjectMap() && !map->is_dictionary_map() && !map->has_named_interceptor() && // TODO(verwaest): Whitelist contexts to which we have access. @@ -323,8 +323,8 @@ bool AccessInfoFactory::ComputeElementAccessInfos( PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( Handle receiver_map, Handle map, MaybeHandle holder, - int descriptor, AccessMode access_mode) const { - DCHECK_NE(descriptor, DescriptorArray::kNotFound); + InternalIndex descriptor, AccessMode access_mode) const { + DCHECK(descriptor.is_found()); Handle descriptors(map->instance_descriptors(), isolate()); PropertyDetails const details = descriptors->GetDetails(descriptor); int index = descriptors->GetFieldIndex(descriptor); @@ -351,6 +351,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( descriptor)); } else if (details_representation.IsDouble()) { field_type = type_cache_->kFloat64; + if (!FLAG_unbox_double_fields) { + unrecorded_dependencies.push_back( + dependencies()->FieldRepresentationDependencyOffTheRecord( + map_ref, descriptor)); + } } else if (details_representation.IsHeapObject()) { // Extract the field type from the property details (make sure its // representation is TaggedPointer to reflect the heap object case). @@ -408,9 +413,9 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( Handle receiver_map, Handle name, Handle map, - MaybeHandle holder, int descriptor, + MaybeHandle holder, InternalIndex descriptor, AccessMode access_mode) const { - DCHECK_NE(descriptor, DescriptorArray::kNotFound); + DCHECK(descriptor.is_found()); Handle descriptors(map->instance_descriptors(), isolate()); SLOW_DCHECK(descriptor == descriptors->Search(*name, *map)); if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) { @@ -497,8 +502,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( while (true) { // Lookup the named property on the {map}. Handle descriptors(map->instance_descriptors(), isolate()); - int const number = descriptors->Search(*name, *map); - if (number != DescriptorArray::kNotFound) { + InternalIndex const number = descriptors->Search(*name, *map); + if (number.is_found()) { PropertyDetails const details = descriptors->GetDetails(number); if (access_mode == AccessMode::kStore || access_mode == AccessMode::kStoreInLiteral) { @@ -762,7 +767,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( } Handle transition_map(transition, isolate()); - int const number = transition_map->LastAdded(); + InternalIndex const number = transition_map->LastAdded(); PropertyDetails const details = transition_map->instance_descriptors().GetDetails(number); // Don't bother optimizing stores to read-only properties. @@ -789,6 +794,12 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( transition_map_ref, number)); } else if (details_representation.IsDouble()) { field_type = type_cache_->kFloat64; + if (!FLAG_unbox_double_fields) { + transition_map_ref.SerializeOwnDescriptor(number); + unrecorded_dependencies.push_back( + dependencies()->FieldRepresentationDependencyOffTheRecord( + transition_map_ref, number)); + } } else if (details_representation.IsHeapObject()) { // Extract the field type from the property details (make sure its // representation is TaggedPointer to reflect the heap object case). diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h index e2f6e6d453da7f..59101e2cc90621 100644 --- a/deps/v8/src/compiler/access-info.h +++ b/deps/v8/src/compiler/access-info.h @@ -204,11 +204,11 @@ class AccessInfoFactory final { PropertyAccessInfo ComputeDataFieldAccessInfo(Handle receiver_map, Handle map, MaybeHandle holder, - int descriptor, + InternalIndex descriptor, AccessMode access_mode) const; PropertyAccessInfo ComputeAccessorDescriptorAccessInfo( Handle receiver_map, Handle name, Handle map, - MaybeHandle holder, int descriptor, + MaybeHandle holder, InternalIndex descriptor, AccessMode access_mode) const; void MergePropertyAccessInfos(ZoneVector infos, diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index 65a569d755b1fd..3fe5361083895a 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -44,7 +44,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { UNREACHABLE(); } - Operand InputImmediate(size_t index) { + Operand InputImmediate(size_t index) const { return ToImmediate(instr_->InputAt(index)); } @@ -111,7 +111,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { return InputOffset(&first_index); } - Operand ToImmediate(InstructionOperand* operand) { + Operand ToImmediate(InstructionOperand* operand) const { Constant constant = ToConstant(operand); switch (constant.type()) { case Constant::kInt32: @@ -153,9 +153,6 @@ class ArmOperandConverter final : public InstructionOperandConverter { NeonMemOperand NeonInputOperand(size_t first_index) { const size_t index = first_index; switch (AddressingModeField::decode(instr_->opcode())) { - case kMode_Offset_RR: - return NeonMemOperand(InputRegister(index + 0), - InputRegister(index + 1)); case kMode_Operand2_R: return NeonMemOperand(InputRegister(index + 0)); default: @@ -309,9 +306,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, - ArmOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, + ArmOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -320,10 +317,10 @@ void EmitWordLoadPoisoningIfNeeded( } } -void ComputePoisonedAddressForLoad( - CodeGenerator* codegen, InstructionCode opcode, - ArmOperandConverter& i, // NOLINT(runtime/references) - Register address) { +void ComputePoisonedAddressForLoad(CodeGenerator* codegen, + InstructionCode opcode, + ArmOperandConverter const& i, + Register address) { DCHECK_EQ(kMemoryAccessPoisoned, static_cast(MiscField::decode(opcode))); switch (AddressingModeField::decode(opcode)) { @@ -1798,6 +1795,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } + case kArmF32x4Sqrt: { + QwNeonRegister dst = i.OutputSimd128Register(); + QwNeonRegister src1 = i.InputSimd128Register(0); + DCHECK_EQ(dst, q0); + DCHECK_EQ(src1, q0); +#define S_FROM_Q(reg, lane) SwVfpRegister::from_code(reg.code() * 4 + lane) + __ vsqrt(S_FROM_Q(dst, 0), S_FROM_Q(src1, 0)); + __ vsqrt(S_FROM_Q(dst, 1), S_FROM_Q(src1, 1)); + __ vsqrt(S_FROM_Q(dst, 2), S_FROM_Q(src1, 2)); + __ vsqrt(S_FROM_Q(dst, 3), S_FROM_Q(src1, 3)); +#undef S_FROM_Q + break; + } case kArmF32x4RecipApprox: { __ vrecpe(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; @@ -1919,14 +1929,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4Shl: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon32, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 32. + __ and_(shift, i.InputRegister(1), Operand(31)); + __ vdup(Neon32, tmp, shift); __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kArmI32x4ShrS: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon32, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 32. + __ and_(shift, i.InputRegister(1), Operand(31)); + __ vdup(Neon32, tmp, shift); __ vneg(Neon32, tmp, tmp); __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -1998,7 +2014,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4ShrU: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon32, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 32. + __ and_(shift, i.InputRegister(1), Operand(31)); + __ vdup(Neon32, tmp, shift); __ vneg(Neon32, tmp, tmp); __ vshl(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2029,7 +2048,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI16x8ExtractLane: { - __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16, + __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU16, i.InputInt8(1)); break; } @@ -2054,14 +2073,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI16x8Shl: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon16, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 16. + __ and_(shift, i.InputRegister(1), Operand(15)); + __ vdup(Neon16, tmp, shift); __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kArmI16x8ShrS: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon16, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 16. + __ and_(shift, i.InputRegister(1), Operand(15)); + __ vdup(Neon16, tmp, shift); __ vneg(Neon16, tmp, tmp); __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2142,7 +2167,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI16x8ShrU: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon16, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 16. + __ and_(shift, i.InputRegister(1), Operand(15)); + __ vdup(Neon16, tmp, shift); __ vneg(Neon16, tmp, tmp); __ vshl(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2186,7 +2214,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI8x16ExtractLane: { - __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8, + __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonU8, i.InputInt8(1)); break; } @@ -2201,6 +2229,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16Shl: { QwNeonRegister tmp = i.TempSimd128Register(0); + Register shift = i.TempRegister(1); + // Take shift value modulo 8. + __ and_(shift, i.InputRegister(1), Operand(7)); __ vdup(Neon8, tmp, i.InputRegister(1)); __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2208,7 +2239,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16ShrS: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon8, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 8. + __ and_(shift, i.InputRegister(1), Operand(7)); + __ vdup(Neon8, tmp, shift); __ vneg(Neon8, tmp, tmp); __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); @@ -2275,7 +2309,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16ShrU: { QwNeonRegister tmp = i.TempSimd128Register(0); - __ vdup(Neon8, tmp, i.InputRegister(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 8. + __ and_(shift, i.InputRegister(1), Operand(7)); + __ vdup(Neon8, tmp, shift); __ vneg(Neon8, tmp, tmp); __ vshl(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h index 3551e26aea8832..d398ec0ed6e2f2 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h +++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h @@ -135,6 +135,7 @@ namespace compiler { V(ArmF32x4UConvertI32x4) \ V(ArmF32x4Abs) \ V(ArmF32x4Neg) \ + V(ArmF32x4Sqrt) \ V(ArmF32x4RecipApprox) \ V(ArmF32x4RecipSqrtApprox) \ V(ArmF32x4Add) \ diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc index 1d7cf61dfe7374..92be55dcc3d662 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc @@ -115,6 +115,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArmF32x4UConvertI32x4: case kArmF32x4Abs: case kArmF32x4Neg: + case kArmF32x4Sqrt: case kArmF32x4RecipApprox: case kArmF32x4RecipSqrtApprox: case kArmF32x4Add: diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc index ce74faa4a62422..303648051f8d85 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/base/bits.h" #include "src/base/enum-set.h" +#include "src/base/iterator.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -94,7 +94,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { ArmOperandGenerator g(selector); - InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()}; selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), arraysize(temps), temps); @@ -352,6 +352,26 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode, } } +// Adds the base and offset into a register, then change the addressing +// mode of opcode_return to use this register. Certain instructions, e.g. +// vld1 and vst1, when given two registers, will post-increment the offset, i.e. +// perform the operation at base, then add offset to base. What we intend is to +// access at (base+offset). +void EmitAddBeforeS128LoadStore(InstructionSelector* selector, + InstructionCode* opcode_return, + size_t* input_count_return, + InstructionOperand* inputs) { + DCHECK(*opcode_return == kArmVld1S128 || *opcode_return == kArmVst1S128); + ArmOperandGenerator g(selector); + InstructionOperand addr = g.TempRegister(); + InstructionCode op = kArmAdd; + op |= AddressingModeField::encode(kMode_Operand2_R); + selector->Emit(op, 1, &addr, 2, inputs); + *opcode_return |= AddressingModeField::encode(kMode_Operand2_R); + *input_count_return -= 1; + inputs[0] = addr; +} + void EmitLoad(InstructionSelector* selector, InstructionCode opcode, InstructionOperand* output, Node* base, Node* index) { ArmOperandGenerator g(selector); @@ -368,7 +388,11 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode, input_count = 3; } else { inputs[1] = g.UseRegister(index); - opcode |= AddressingModeField::encode(kMode_Offset_RR); + if (opcode == kArmVld1S128) { + EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[0]); + } else { + opcode |= AddressingModeField::encode(kMode_Offset_RR); + } } selector->Emit(opcode, 1, output, input_count, inputs); } @@ -386,7 +410,12 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode, input_count = 4; } else { inputs[input_count++] = g.UseRegister(index); - opcode |= AddressingModeField::encode(kMode_Offset_RR); + if (opcode == kArmVst1S128) { + // Inputs are value, base, index, only care about base and index. + EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]); + } else { + opcode |= AddressingModeField::encode(kMode_Offset_RR); + } } selector->Emit(opcode, 0, nullptr, input_count, inputs); } @@ -596,8 +625,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp); return; } - case MachineRepresentation::kFloat64: - case MachineRepresentation::kSimd128: { + case MachineRepresentation::kFloat64: { // Compute the address of the least-significant byte of the FP value. // We assume that the base node is unlikely to be an encodable immediate // or the result of a shift operation, so only consider the addressing @@ -623,13 +651,10 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { if (CpuFeatures::IsSupported(NEON)) { // With NEON we can load directly from the calculated address. - InstructionCode op = load_rep == MachineRepresentation::kFloat64 - ? kArmVld1F64 - : kArmVld1S128; + InstructionCode op = kArmVld1F64; op |= AddressingModeField::encode(kMode_Operand2_R); Emit(op, g.DefineAsRegister(node), addr); } else { - DCHECK_NE(MachineRepresentation::kSimd128, load_rep); // Load both halves and move to an FP register. InstructionOperand fp_lo = g.TempRegister(); InstructionOperand fp_hi = g.TempRegister(); @@ -670,8 +695,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) { EmitStore(this, kArmStr, input_count, inputs, index); return; } - case MachineRepresentation::kFloat64: - case MachineRepresentation::kSimd128: { + case MachineRepresentation::kFloat64: { if (CpuFeatures::IsSupported(NEON)) { InstructionOperand address = g.TempRegister(); { @@ -697,13 +721,10 @@ void InstructionSelector::VisitUnalignedStore(Node* node) { inputs[input_count++] = g.UseRegister(value); inputs[input_count++] = address; - InstructionCode op = store_rep == MachineRepresentation::kFloat64 - ? kArmVst1F64 - : kArmVst1S128; + InstructionCode op = kArmVst1F64; op |= AddressingModeField::encode(kMode_Operand2_R); Emit(op, 0, nullptr, input_count, inputs); } else { - DCHECK_NE(MachineRepresentation::kSimd128, store_rep); // Store a 64-bit floating point value using two 32-bit integer stores. // Computing the store address here would require three live temporary // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after @@ -942,7 +963,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) { uint32_t lsb = m.right().Value(); Int32BinopMatcher mleft(m.left().node()); if (mleft.right().HasValue()) { - uint32_t value = (mleft.right().Value() >> lsb) << lsb; + uint32_t value = static_cast(mleft.right().Value() >> lsb) + << lsb; uint32_t width = base::bits::CountPopulation(value); uint32_t msb = base::bits::CountLeadingZeros32(value); if ((width != 0) && (msb + width + lsb == 32)) { @@ -1119,6 +1141,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { VisitRR(this, kArmRev, node); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitInt32Add(Node* node) { @@ -2513,6 +2539,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #undef SIMD_VISIT_BINOP #undef SIMD_BINOP_LIST +void InstructionSelector::VisitF32x4Sqrt(Node* node) { + ArmOperandGenerator g(this); + // Use fixed registers in the lower 8 Q-registers so we can directly access + // mapped registers S0-S31. + Emit(kArmF32x4Sqrt, g.DefineAsFixed(node, q0), + g.UseFixed(node->InputAt(0), q0)); +} + void InstructionSelector::VisitF32x4Div(Node* node) { ArmOperandGenerator g(this); // Use fixed registers in the lower 8 Q-registers so we can directly access diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index 66ca7f6cf0cf35..6f65c905dd136b 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -376,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - Arm64OperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + Arm64OperandConverter const& i) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -389,6 +389,36 @@ void EmitWordLoadPoisoningIfNeeded( } } +void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode, + Arm64OperandConverter* i, VRegister output_reg) { + const MemoryAccessMode access_mode = + static_cast(MiscField::decode(opcode)); + AddressingMode address_mode = AddressingModeField::decode(opcode); + if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) { + UseScratchRegisterScope temps(codegen->tasm()); + Register address = temps.AcquireX(); + switch (address_mode) { + case kMode_MRI: // Fall through. + case kMode_MRR: + codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1)); + break; + case kMode_Operand2_R_LSL_I: + codegen->tasm()->Add(address, i->InputRegister(0), + i->InputOperand2_64(1)); + break; + default: + // Note: we don't need poisoning for kMode_Root loads as those loads + // target a fixed offset from root register which is set once when + // initializing the vm. + UNREACHABLE(); + } + codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister)); + codegen->tasm()->Ldr(output_reg, MemOperand(address)); + } else { + codegen->tasm()->Ldr(output_reg, i->MemoryOperand()); + } +} + } // namespace #define ASSEMBLE_SHIFT(asm_instr, width) \ @@ -1198,6 +1228,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Sxtw: __ Sxtw(i.OutputRegister(), i.InputRegister32(0)); break; + case kArm64Sbfx: + __ Sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1), + i.InputInt6(2)); + break; case kArm64Sbfx32: __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1), i.InputInt5(2)); @@ -1586,6 +1620,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Str: __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); break; + case kArm64StrCompressTagged: + __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); + break; case kArm64DecompressSigned: { __ DecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0)); break; @@ -1599,13 +1636,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64LdrS: - __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand()); + EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S()); break; case kArm64StrS: __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1)); break; case kArm64LdrD: - __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); + EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister()); break; case kArm64StrD: __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1)); @@ -1616,9 +1653,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64StrQ: __ Str(i.InputSimd128Register(0), i.MemoryOperand(1)); break; - case kArm64StrCompressTagged: - __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); - break; case kArm64DmbIsh: __ Dmb(InnerShareable, BarrierAll); break; @@ -1794,6 +1828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D); SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D); + SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D); SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D); SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D); SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D); @@ -1818,6 +1853,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(0).V2D()); break; } + case kArm64F64x2Qfma: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmla(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(), + i.InputSimd128Register(2).V2D()); + break; + } + case kArm64F64x2Qfms: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmls(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(), + i.InputSimd128Register(2).V2D()); + break; + } case kArm64F32x4Splat: { __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0); break; @@ -1840,6 +1887,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S); SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S); SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S); + SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S); SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S); SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S); SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S); @@ -1867,6 +1915,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(0).V4S()); break; } + case kArm64F32x4Qfma: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmla(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(), + i.InputSimd128Register(2).V4S()); + break; + } + case kArm64F32x4Qfms: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ Fmls(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(), + i.InputSimd128Register(2).V4S()); + break; + } case kArm64I64x2Splat: { __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0)); break; @@ -1888,14 +1948,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D); case kArm64I64x2Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V2D(), i.InputRegister64(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 64. + __ And(shift, i.InputRegister64(1), 63); + __ Dup(tmp.V2D(), shift); __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), tmp.V2D()); break; } case kArm64I64x2ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V2D(), i.InputRegister64(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 64. + __ And(shift, i.InputRegister64(1), 63); + __ Dup(tmp.V2D(), shift); __ Neg(tmp.V2D(), tmp.V2D()); __ Sshl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), tmp.V2D()); @@ -1903,6 +1969,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D); SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D); + case kArm64I64x2Mul: { + UseScratchRegisterScope scope(tasm()); + VRegister dst = i.OutputSimd128Register(); + VRegister src1 = i.InputSimd128Register(0); + VRegister src2 = i.InputSimd128Register(1); + VRegister tmp1 = scope.AcquireSameSizeAs(dst); + VRegister tmp2 = scope.AcquireSameSizeAs(dst); + VRegister tmp3 = i.ToSimd128Register(instr->TempAt(0)); + + // This 2x64-bit multiplication is performed with several 32-bit + // multiplications. + + // 64-bit numbers x and y, can be represented as: + // x = a + 2^32(b) + // y = c + 2^32(d) + + // A 64-bit multiplication is: + // x * y = ac + 2^32(ad + bc) + 2^64(bd) + // note: `2^64(bd)` can be ignored, the value is too large to fit in + // 64-bits. + + // This sequence implements a 2x64bit multiply, where the registers + // `src1` and `src2` are split up into 32-bit components: + // src1 = |d|c|b|a| + // src2 = |h|g|f|e| + // + // src1 * src2 = |cg + 2^32(ch + dg)|ae + 2^32(af + be)| + + // Reverse the 32-bit elements in the 64-bit words. + // tmp2 = |g|h|e|f| + __ Rev64(tmp2.V4S(), src2.V4S()); + + // Calculate the high half components. + // tmp2 = |dg|ch|be|af| + __ Mul(tmp2.V4S(), tmp2.V4S(), src1.V4S()); + + // Extract the low half components of src1. + // tmp1 = |c|a| + __ Xtn(tmp1.V2S(), src1.V2D()); + + // Sum the respective high half components. + // tmp2 = |dg+ch|be+af||dg+ch|be+af| + __ Addp(tmp2.V4S(), tmp2.V4S(), tmp2.V4S()); + + // Extract the low half components of src2. + // tmp3 = |g|e| + __ Xtn(tmp3.V2S(), src2.V2D()); + + // Shift the high half components, into the high half. + // dst = |dg+ch << 32|be+af << 32| + __ Shll(dst.V2D(), tmp2.V2S(), 32); + + // Multiply the low components together, and accumulate with the high + // half. + // dst = |dst[1] + cg|dst[0] + ae| + __ Umlal(dst.V2D(), tmp3.V2S(), tmp1.V2S()); + + break; + } SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D); case kArm64I64x2Ne: { VRegister dst = i.OutputSimd128Register().V2D(); @@ -1915,7 +2040,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D); case kArm64I64x2ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V2D(), i.InputRegister64(1)); + Register shift = i.TempRegister(1); + // Take shift value modulo 64. + __ And(shift, i.InputRegister64(1), 63); + __ Dup(tmp.V2D(), shift); __ Neg(tmp.V2D(), tmp.V2D()); __ Ushl(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(), tmp.V2D()); @@ -1947,14 +2075,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S); case kArm64I32x4Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V4S(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 32. + __ And(shift, i.InputRegister32(1), 31); + __ Dup(tmp.V4S(), shift); __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), tmp.V4S()); break; } case kArm64I32x4ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V4S(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 32. + __ And(shift, i.InputRegister32(1), 31); + __ Dup(tmp.V4S(), shift); __ Neg(tmp.V4S(), tmp.V4S()); __ Sshl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), tmp.V4S()); @@ -1981,7 +2115,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H); case kArm64I32x4ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V4S(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 32. + __ And(shift, i.InputRegister32(1), 31); + __ Dup(tmp.V4S(), shift); __ Neg(tmp.V4S(), tmp.V4S()); __ Ushl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(), tmp.V4S()); @@ -1996,7 +2133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I16x8ExtractLane: { - __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(), + __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(), i.InputInt8(1)); break; } @@ -2014,14 +2151,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H); case kArm64I16x8Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V8H(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 16. + __ And(shift, i.InputRegister32(1), 15); + __ Dup(tmp.V8H(), shift); __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), tmp.V8H()); break; } case kArm64I16x8ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V8H(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 16. + __ And(shift, i.InputRegister32(1), 15); + __ Dup(tmp.V8H(), shift); __ Neg(tmp.V8H(), tmp.V8H()); __ Sshl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), tmp.V8H()); @@ -2070,7 +2213,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArm64I16x8ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V8H(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 16. + __ And(shift, i.InputRegister32(1), 15); + __ Dup(tmp.V8H(), shift); __ Neg(tmp.V8H(), tmp.V8H()); __ Ushl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(), tmp.V8H()); @@ -2101,7 +2247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I8x16ExtractLane: { - __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(), + __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(), i.InputInt8(1)); break; } @@ -2117,14 +2263,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B); case kArm64I8x16Shl: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V16B(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 8. + __ And(shift, i.InputRegister32(1), 7); + __ Dup(tmp.V16B(), shift); __ Sshl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), tmp.V16B()); break; } case kArm64I8x16ShrS: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V16B(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 8. + __ And(shift, i.InputRegister32(1), 7); + __ Dup(tmp.V16B(), shift); __ Neg(tmp.V16B(), tmp.V16B()); __ Sshl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), tmp.V16B()); @@ -2163,7 +2315,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B); case kArm64I8x16ShrU: { VRegister tmp = i.TempSimd128Register(0); - __ Dup(tmp.V16B(), i.InputRegister32(1)); + Register shift = i.TempRegister32(1); + // Take shift value modulo 8. + __ And(shift, i.InputRegister32(1), 7); + __ Dup(tmp.V16B(), shift); __ Neg(tmp.V16B(), tmp.V16B()); __ Ushl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), tmp.V16B()); @@ -2277,6 +2432,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1).V16B(), i.InputInt4(2)); break; } + case kArm64S8x16Swizzle: { + __ Tbl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(), + i.InputSimd128Register(1).V16B()); + break; + } case kArm64S8x16Shuffle: { Simd128Register dst = i.OutputSimd128Register().V16B(), src0 = i.InputSimd128Register(0).V16B(), diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h index 4b56e402c15efe..880a3fbf9e0a3a 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -70,6 +70,7 @@ namespace compiler { V(Arm64Sxtb) \ V(Arm64Sxth) \ V(Arm64Sxtw) \ + V(Arm64Sbfx) \ V(Arm64Sbfx32) \ V(Arm64Ubfx) \ V(Arm64Ubfx32) \ @@ -175,6 +176,7 @@ namespace compiler { V(Arm64F64x2ReplaceLane) \ V(Arm64F64x2Abs) \ V(Arm64F64x2Neg) \ + V(Arm64F64x2Sqrt) \ V(Arm64F64x2Add) \ V(Arm64F64x2Sub) \ V(Arm64F64x2Mul) \ @@ -185,6 +187,8 @@ namespace compiler { V(Arm64F64x2Ne) \ V(Arm64F64x2Lt) \ V(Arm64F64x2Le) \ + V(Arm64F64x2Qfma) \ + V(Arm64F64x2Qfms) \ V(Arm64F32x4Splat) \ V(Arm64F32x4ExtractLane) \ V(Arm64F32x4ReplaceLane) \ @@ -192,6 +196,7 @@ namespace compiler { V(Arm64F32x4UConvertI32x4) \ V(Arm64F32x4Abs) \ V(Arm64F32x4Neg) \ + V(Arm64F32x4Sqrt) \ V(Arm64F32x4RecipApprox) \ V(Arm64F32x4RecipSqrtApprox) \ V(Arm64F32x4Add) \ @@ -205,6 +210,8 @@ namespace compiler { V(Arm64F32x4Ne) \ V(Arm64F32x4Lt) \ V(Arm64F32x4Le) \ + V(Arm64F32x4Qfma) \ + V(Arm64F32x4Qfms) \ V(Arm64I64x2Splat) \ V(Arm64I64x2ExtractLane) \ V(Arm64I64x2ReplaceLane) \ @@ -213,6 +220,7 @@ namespace compiler { V(Arm64I64x2ShrS) \ V(Arm64I64x2Add) \ V(Arm64I64x2Sub) \ + V(Arm64I64x2Mul) \ V(Arm64I64x2Eq) \ V(Arm64I64x2Ne) \ V(Arm64I64x2GtS) \ @@ -331,6 +339,7 @@ namespace compiler { V(Arm64S8x16TransposeLeft) \ V(Arm64S8x16TransposeRight) \ V(Arm64S8x16Concat) \ + V(Arm64S8x16Swizzle) \ V(Arm64S8x16Shuffle) \ V(Arm64S32x2Reverse) \ V(Arm64S16x4Reverse) \ diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index 7cba2d50ea0059..b0f92029684703 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -71,6 +71,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64Sxth: case kArm64Sxth32: case kArm64Sxtw: + case kArm64Sbfx: case kArm64Sbfx32: case kArm64Ubfx: case kArm64Ubfx32: @@ -142,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F64x2ReplaceLane: case kArm64F64x2Abs: case kArm64F64x2Neg: + case kArm64F64x2Sqrt: case kArm64F64x2Add: case kArm64F64x2Sub: case kArm64F64x2Mul: @@ -152,6 +154,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F64x2Ne: case kArm64F64x2Lt: case kArm64F64x2Le: + case kArm64F64x2Qfma: + case kArm64F64x2Qfms: case kArm64F32x4Splat: case kArm64F32x4ExtractLane: case kArm64F32x4ReplaceLane: @@ -159,6 +163,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F32x4UConvertI32x4: case kArm64F32x4Abs: case kArm64F32x4Neg: + case kArm64F32x4Sqrt: case kArm64F32x4RecipApprox: case kArm64F32x4RecipSqrtApprox: case kArm64F32x4Add: @@ -172,6 +177,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64F32x4Ne: case kArm64F32x4Lt: case kArm64F32x4Le: + case kArm64F32x4Qfma: + case kArm64F32x4Qfms: case kArm64I64x2Splat: case kArm64I64x2ExtractLane: case kArm64I64x2ReplaceLane: @@ -180,6 +187,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64I64x2ShrS: case kArm64I64x2Add: case kArm64I64x2Sub: + case kArm64I64x2Mul: case kArm64I64x2Eq: case kArm64I64x2Ne: case kArm64I64x2GtS: @@ -298,6 +306,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64S8x16TransposeLeft: case kArm64S8x16TransposeRight: case kArm64S8x16Concat: + case kArm64S8x16Swizzle: case kArm64S8x16Shuffle: case kArm64S32x2Reverse: case kArm64S16x4Reverse: @@ -439,6 +448,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { case kArm64Clz: case kArm64Clz32: + case kArm64Sbfx: case kArm64Sbfx32: case kArm64Sxtb32: case kArm64Sxth32: diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 4abbd68c49a4a5..53a289fe6a664f 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -153,7 +153,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { Arm64OperandGenerator g(selector); - InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()}; selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), arraysize(temps), temps); @@ -499,6 +499,7 @@ void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode, Arm64OperandGenerator g(selector); Matcher m(node); if (m.right().HasValue() && (m.right().Value() < 0) && + (m.right().Value() > std::numeric_limits::min()) && g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) { selector->Emit(negate_opcode, g.DefineAsRegister(node), g.UseRegister(m.left().node()), @@ -627,9 +628,24 @@ void InstructionSelector::VisitLoad(Node* node) { #else UNREACHABLE(); #endif +#ifdef V8_COMPRESS_POINTERS + case MachineRepresentation::kTaggedSigned: + opcode = kArm64LdrDecompressTaggedSigned; + immediate_mode = kLoadStoreImm32; + break; + case MachineRepresentation::kTaggedPointer: + opcode = kArm64LdrDecompressTaggedPointer; + immediate_mode = kLoadStoreImm32; + break; + case MachineRepresentation::kTagged: + opcode = kArm64LdrDecompressAnyTagged; + immediate_mode = kLoadStoreImm32; + break; +#else case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTagged: // Fall through. +#endif case MachineRepresentation::kWord64: opcode = kArm64Ldr; immediate_mode = kLoadStoreImm64; @@ -723,7 +739,7 @@ void InstructionSelector::VisitStore(Node* node) { case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressed: #ifdef V8_COMPRESS_POINTERS - opcode = kArm64StrW; + opcode = kArm64StrCompressTagged; immediate_mode = kLoadStoreImm32; break; #else @@ -731,7 +747,11 @@ void InstructionSelector::VisitStore(Node* node) { #endif case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kTagged: + opcode = kArm64StrCompressTagged; + immediate_mode = + COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 : kLoadStoreImm64; + break; case MachineRepresentation::kWord64: opcode = kArm64Str; immediate_mode = kLoadStoreImm64; @@ -770,6 +790,10 @@ void InstructionSelector::VisitProtectedStore(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + // Architecture supports unaligned access, therefore VisitLoad is used instead void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); } @@ -1048,7 +1072,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) { if (mleft.right().HasValue() && mleft.right().Value() != 0) { // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is // shifted into the least-significant bits. - uint32_t mask = (mleft.right().Value() >> lsb) << lsb; + uint32_t mask = static_cast(mleft.right().Value() >> lsb) + << lsb; unsigned mask_width = base::bits::CountPopulation(mask); unsigned mask_msb = base::bits::CountLeadingZeros32(mask); if ((mask_msb + mask_width + lsb) == 32) { @@ -1091,7 +1116,8 @@ void InstructionSelector::VisitWord64Shr(Node* node) { if (mleft.right().HasValue() && mleft.right().Value() != 0) { // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is // shifted into the least-significant bits. - uint64_t mask = (mleft.right().Value() >> lsb) << lsb; + uint64_t mask = static_cast(mleft.right().Value() >> lsb) + << lsb; unsigned mask_width = base::bits::CountPopulation(mask); unsigned mask_msb = base::bits::CountLeadingZeros64(mask); if ((mask_msb + mask_width + lsb) == 64) { @@ -1240,7 +1266,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) { V(Float32Max, kArm64Float32Max) \ V(Float64Max, kArm64Float64Max) \ V(Float32Min, kArm64Float32Min) \ - V(Float64Min, kArm64Float64Min) + V(Float64Min, kArm64Float64Min) \ + V(S8x16Swizzle, kArm64S8x16Swizzle) #define RR_VISITOR(Name, opcode) \ void InstructionSelector::Visit##Name(Node* node) { \ @@ -1572,9 +1599,22 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { return; } EmitLoad(this, value, opcode, immediate_mode, rep, node); - } else { - VisitRR(this, kArm64Sxtw, node); + return; + } + + if (value->opcode() == IrOpcode::kWord32Sar && CanCover(node, value)) { + Int32BinopMatcher m(value); + if (m.right().HasValue()) { + Arm64OperandGenerator g(this); + // Mask the shift amount, to keep the same semantics as Word32Sar. + int right = m.right().Value() & 0x1F; + Emit(kArm64Sbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()), + g.TempImmediate(right), g.TempImmediate(32 - right)); + return; + } } + + VisitRR(this, kArm64Sxtw, node); } void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { @@ -1830,31 +1870,6 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode, selector->EmitWithContinuation(opcode, left, right, cont); } -// Shared routine for multiple word compare operations. -void VisitWordCompare(InstructionSelector* selector, Node* node, - InstructionCode opcode, FlagsContinuation* cont, - ImmediateMode immediate_mode) { - Arm64OperandGenerator g(selector); - - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - - // If one of the two inputs is an immediate, make sure it's on the right. - if (!g.CanBeImmediate(right, immediate_mode) && - g.CanBeImmediate(left, immediate_mode)) { - cont->Commute(); - std::swap(left, right); - } - - if (g.CanBeImmediate(right, immediate_mode)) { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), - cont); - } else { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), - cont); - } -} - // This function checks whether we can convert: // ((a b) cmp 0), b. // to: @@ -1986,9 +2001,35 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector, selector->EmitWithContinuation(opcode, value, cont); } +template +struct CbzOrTbzMatchTrait {}; + +template <> +struct CbzOrTbzMatchTrait<32> { + using IntegralType = uint32_t; + using BinopMatcher = Int32BinopMatcher; + static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord32And; + static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch32; + static constexpr ArchOpcode kCompareAndBranchOpcode = + kArm64CompareAndBranch32; + static constexpr unsigned kSignBit = kWSignBit; +}; + +template <> +struct CbzOrTbzMatchTrait<64> { + using IntegralType = uint64_t; + using BinopMatcher = Int64BinopMatcher; + static constexpr IrOpcode::Value kAndOpcode = IrOpcode::kWord64And; + static constexpr ArchOpcode kTestAndBranchOpcode = kArm64TestAndBranch; + static constexpr ArchOpcode kCompareAndBranchOpcode = kArm64CompareAndBranch; + static constexpr unsigned kSignBit = kXSignBit; +}; + // Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node} // against {value}, depending on the condition. -bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, +template +bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, + typename CbzOrTbzMatchTrait::IntegralType value, Node* user, FlagsCondition cond, FlagsContinuation* cont) { // Branch poisoning requires flags to be set, so when it's enabled for // a particular branch, we shouldn't be applying the cbz/tbz optimization. @@ -2007,28 +2048,33 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, if (cont->IsDeoptimize()) return false; Arm64OperandGenerator g(selector); cont->Overwrite(MapForTbz(cond)); - Int32Matcher m(node); - if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) { - // SignedLessThan(Float64ExtractHighWord32(x), 0) and - // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) essentially - // check the sign bit of a 64-bit floating point value. - InstructionOperand temp = g.TempRegister(); - selector->Emit(kArm64U64MoveFloat64, temp, - g.UseRegister(node->InputAt(0))); - selector->EmitWithContinuation(kArm64TestAndBranch, temp, - g.TempImmediate(63), cont); - return true; + + if (N == 32) { + Int32Matcher m(node); + if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) { + // SignedLessThan(Float64ExtractHighWord32(x), 0) and + // SignedGreaterThanOrEqual(Float64ExtractHighWord32(x), 0) + // essentially check the sign bit of a 64-bit floating point value. + InstructionOperand temp = g.TempRegister(); + selector->Emit(kArm64U64MoveFloat64, temp, + g.UseRegister(node->InputAt(0))); + selector->EmitWithContinuation(kArm64TestAndBranch, temp, + g.TempImmediate(kDSignBit), cont); + return true; + } } - selector->EmitWithContinuation(kArm64TestAndBranch32, g.UseRegister(node), - g.TempImmediate(31), cont); + + selector->EmitWithContinuation( + CbzOrTbzMatchTrait::kTestAndBranchOpcode, g.UseRegister(node), + g.TempImmediate(CbzOrTbzMatchTrait::kSignBit), cont); return true; } case kEqual: case kNotEqual: { - if (node->opcode() == IrOpcode::kWord32And) { + if (node->opcode() == CbzOrTbzMatchTrait::kAndOpcode) { // Emit a tbz/tbnz if we are comparing with a single-bit mask: - // Branch(Word32Equal(Word32And(x, 1 << N), 1 << N), true, false) - Int32BinopMatcher m_and(node); + // Branch(WordEqual(WordAnd(x, 1 << N), 1 << N), true, false) + typename CbzOrTbzMatchTrait::BinopMatcher m_and(node); if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) && m_and.right().Is(value) && selector->CanCover(user, node)) { Arm64OperandGenerator g(selector); @@ -2036,7 +2082,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, // the opposite here so negate the condition. cont->Negate(); selector->EmitWithContinuation( - kArm64TestAndBranch32, g.UseRegister(m_and.left().node()), + CbzOrTbzMatchTrait::kTestAndBranchOpcode, + g.UseRegister(m_and.left().node()), g.TempImmediate(base::bits::CountTrailingZeros(value)), cont); return true; } @@ -2048,7 +2095,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, if (value != 0) return false; Arm64OperandGenerator g(selector); cont->Overwrite(MapForCbz(cond)); - EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32, + EmitBranchOrDeoptimize(selector, + CbzOrTbzMatchTrait::kCompareAndBranchOpcode, g.UseRegister(node), cont); return true; } @@ -2057,20 +2105,50 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value, } } +// Shared routine for multiple word compare operations. +void VisitWordCompare(InstructionSelector* selector, Node* node, + InstructionCode opcode, FlagsContinuation* cont, + ImmediateMode immediate_mode) { + Arm64OperandGenerator g(selector); + + Node* left = node->InputAt(0); + Node* right = node->InputAt(1); + + // If one of the two inputs is an immediate, make sure it's on the right. + if (!g.CanBeImmediate(right, immediate_mode) && + g.CanBeImmediate(left, immediate_mode)) { + cont->Commute(); + std::swap(left, right); + } + + if (opcode == kArm64Cmp && !cont->IsPoisoned()) { + Int64Matcher m(right); + if (m.HasValue()) { + if (TryEmitCbzOrTbz<64>(selector, left, m.Value(), node, + cont->condition(), cont)) { + return; + } + } + } + + VisitCompare(selector, opcode, g.UseRegister(left), + g.UseOperand(right, immediate_mode), cont); +} + void VisitWord32Compare(InstructionSelector* selector, Node* node, FlagsContinuation* cont) { Int32BinopMatcher m(node); FlagsCondition cond = cont->condition(); if (!cont->IsPoisoned()) { if (m.right().HasValue()) { - if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node, - cond, cont)) { + if (TryEmitCbzOrTbz<32>(selector, m.left().node(), m.right().Value(), + node, cond, cont)) { return; } } else if (m.left().HasValue()) { FlagsCondition commuted_cond = CommuteFlagsCondition(cond); - if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node, - commuted_cond, cont)) { + if (TryEmitCbzOrTbz<32>(selector, m.right().node(), m.left().Value(), + node, commuted_cond, cont)) { return; } } @@ -2378,13 +2456,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) { return VisitWordCompare(this, left, kArm64Tst, cont, kLogical64Imm); } - // Merge the Word64Equal(x, 0) comparison into a cbz instruction. - if ((cont->IsBranch() || cont->IsDeoptimize()) && - !cont->IsPoisoned()) { - EmitBranchOrDeoptimize(this, kArm64CompareAndBranch, - g.UseRegister(left), cont); - return; - } } return VisitWordCompare(this, value, kArm64Cmp, cont, kArithmeticImm); } @@ -3054,10 +3125,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { #define SIMD_UNOP_LIST(V) \ V(F64x2Abs, kArm64F64x2Abs) \ V(F64x2Neg, kArm64F64x2Neg) \ + V(F64x2Sqrt, kArm64F64x2Sqrt) \ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \ V(F32x4Abs, kArm64F32x4Abs) \ V(F32x4Neg, kArm64F32x4Neg) \ + V(F32x4Sqrt, kArm64F32x4Sqrt) \ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \ V(I64x2Neg, kArm64I64x2Neg) \ @@ -3236,6 +3309,14 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #undef SIMD_VISIT_BINOP #undef SIMD_BINOP_LIST +void InstructionSelector::VisitI64x2Mul(Node* node) { + Arm64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kArm64I64x2Mul, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), + arraysize(temps), temps); +} + void InstructionSelector::VisitS128Select(Node* node) { Arm64OperandGenerator g(this); Emit(kArm64S128Select, g.DefineSameAsFirst(node), @@ -3243,6 +3324,19 @@ void InstructionSelector::VisitS128Select(Node* node) { g.UseRegister(node->InputAt(2))); } +#define VISIT_SIMD_QFMOP(op) \ + void InstructionSelector::Visit##op(Node* node) { \ + Arm64OperandGenerator g(this); \ + Emit(kArm64##op, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ + g.UseRegister(node->InputAt(2))); \ + } +VISIT_SIMD_QFMOP(F64x2Qfma) +VISIT_SIMD_QFMOP(F64x2Qfms) +VISIT_SIMD_QFMOP(F32x4Qfma) +VISIT_SIMD_QFMOP(F32x4Qfms) +#undef VISIT_SIMD_QFMOP + namespace { struct ShuffleEntry { diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h index 2bfb009980dcf8..530dc0a8136fc0 100644 --- a/deps/v8/src/compiler/backend/code-generator-impl.h +++ b/deps/v8/src/compiler/backend/code-generator-impl.h @@ -26,7 +26,7 @@ class InstructionOperandConverter { // -- Instruction operand accesses with conversions -------------------------- - Register InputRegister(size_t index) { + Register InputRegister(size_t index) const { return ToRegister(instr_->InputAt(index)); } @@ -96,7 +96,7 @@ class InstructionOperandConverter { return ToRpoNumber(instr_->InputAt(index)); } - Register OutputRegister(size_t index = 0) { + Register OutputRegister(size_t index = 0) const { return ToRegister(instr_->OutputAt(index)); } @@ -130,7 +130,7 @@ class InstructionOperandConverter { return ToConstant(op).ToRpoNumber(); } - Register ToRegister(InstructionOperand* op) { + Register ToRegister(InstructionOperand* op) const { return LocationOperand::cast(op)->GetRegister(); } @@ -146,7 +146,7 @@ class InstructionOperandConverter { return LocationOperand::cast(op)->GetSimd128Register(); } - Constant ToConstant(InstructionOperand* op) { + Constant ToConstant(InstructionOperand* op) const { if (op->IsImmediate()) { return gen_->instructions()->GetImmediate(ImmediateOperand::cast(op)); } diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index e7702bcdf625d2..43eb4a1f15a590 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -4,7 +4,7 @@ #include "src/compiler/backend/code-generator.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/optimized-compilation-info.h" diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h index e9ebf675905dbd..d56b1edae0e0a8 100644 --- a/deps/v8/src/compiler/backend/code-generator.h +++ b/deps/v8/src/compiler/backend/code-generator.h @@ -5,6 +5,8 @@ #ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_H_ #define V8_COMPILER_BACKEND_CODE_GENERATOR_H_ +#include + #include "src/base/optional.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/safepoint-table.h" diff --git a/deps/v8/src/compiler/backend/frame-elider.cc b/deps/v8/src/compiler/backend/frame-elider.cc index 064501b0971b06..293fc9352c4d7f 100644 --- a/deps/v8/src/compiler/backend/frame-elider.cc +++ b/deps/v8/src/compiler/backend/frame-elider.cc @@ -4,7 +4,7 @@ #include "src/compiler/backend/frame-elider.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 4542da643b4b87..068268a3da4940 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -479,17 +479,18 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ } -#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ - do { \ - Register dst = i.OutputRegister(); \ - Operand src = i.InputOperand(0); \ - Register tmp = i.TempRegister(0); \ - __ mov(tmp, Immediate(1)); \ - __ xor_(dst, dst); \ - __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \ - __ opcode(kScratchDoubleReg, src); \ - __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \ - __ cmov(zero, dst, tmp); \ +#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ + do { \ + Register dst = i.OutputRegister(); \ + Operand src = i.InputOperand(0); \ + Register tmp = i.TempRegister(0); \ + XMMRegister tmp_simd = i.TempSimd128Register(1); \ + __ mov(tmp, Immediate(1)); \ + __ xor_(dst, dst); \ + __ Pxor(tmp_simd, tmp_simd); \ + __ opcode(tmp_simd, src); \ + __ Ptest(tmp_simd, tmp_simd); \ + __ cmov(zero, dst, tmp); \ } while (false) void CodeGenerator::AssembleDeconstructFrame() { @@ -1266,16 +1267,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kSSEFloat32Abs: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 33); - __ andps(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 33); + __ andps(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat32Neg: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 31); - __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 31); + __ xorps(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat32Round: { @@ -1444,16 +1447,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEFloat64Abs: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 1); - __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 1); + __ andpd(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat64Neg: { // TODO(bmeurer): Use 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 63); - __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 63); + __ xorpd(i.OutputDoubleRegister(), tmp); break; } case kSSEFloat64Sqrt: @@ -1476,13 +1481,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cvttss2si(i.OutputRegister(), i.InputOperand(0)); break; case kSSEFloat32ToUint32: - __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg); + __ Cvttss2ui(i.OutputRegister(), i.InputOperand(0), + i.TempSimd128Register(0)); break; case kSSEFloat64ToInt32: __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); break; case kSSEFloat64ToUint32: - __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0), kScratchDoubleReg); + __ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0), + i.TempSimd128Register(0)); break; case kSSEInt32ToFloat32: __ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0)); @@ -1577,34 +1584,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kAVXFloat32Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 33); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 33); CpuFeatureScope avx_scope(tasm(), AVX); - __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kAVXFloat32Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 31); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 31); CpuFeatureScope avx_scope(tasm(), AVX); - __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kAVXFloat64Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 1); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psrlq(tmp, 1); CpuFeatureScope avx_scope(tasm(), AVX); - __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kAVXFloat64Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 63); + XMMRegister tmp = i.TempSimd128Register(0); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 63); CpuFeatureScope avx_scope(tasm(), AVX); - __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0)); + __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0)); break; } case kSSEFloat64SilenceNaN: @@ -1825,6 +1836,164 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kSSEF64x2Splat: { + DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + XMMRegister dst = i.OutputSimd128Register(); + __ shufpd(dst, dst, 0x0); + break; + } + case kAVXF64x2Splat: { + CpuFeatureScope avx_scope(tasm(), AVX); + XMMRegister src = i.InputDoubleRegister(0); + __ vshufpd(i.OutputSimd128Register(), src, src, 0x0); + break; + } + case kSSEF64x2ExtractLane: { + DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); + XMMRegister dst = i.OutputDoubleRegister(); + int8_t lane = i.InputInt8(1); + if (lane != 0) { + DCHECK_LT(lane, 4); + __ shufpd(dst, dst, lane); + } + break; + } + case kAVXF64x2ExtractLane: { + CpuFeatureScope avx_scope(tasm(), AVX); + XMMRegister dst = i.OutputDoubleRegister(); + XMMRegister src = i.InputSimd128Register(0); + int8_t lane = i.InputInt8(1); + if (lane == 0) { + if (dst != src) __ vmovapd(dst, src); + } else { + DCHECK_LT(lane, 4); + __ vshufpd(dst, src, src, lane); + } + break; + } + case kSSEF64x2ReplaceLane: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + XMMRegister dst = i.OutputSimd128Register(); + int8_t lane = i.InputInt8(1); + DoubleRegister rep = i.InputDoubleRegister(2); + + // insertps takes a mask which contains (high to low): + // - 2 bit specifying source float element to copy + // - 2 bit specifying destination float element to write to + // - 4 bits specifying which elements of the destination to zero + DCHECK_LT(lane, 2); + if (lane == 0) { + __ insertps(dst, rep, 0b00000000); + __ insertps(dst, rep, 0b01010000); + } else { + __ insertps(dst, rep, 0b00100000); + __ insertps(dst, rep, 0b01110000); + } + break; + } + case kAVXF64x2ReplaceLane: { + CpuFeatureScope avx_scope(tasm(), AVX); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(0); + int8_t lane = i.InputInt8(1); + DoubleRegister rep = i.InputDoubleRegister(2); + + DCHECK_LT(lane, 2); + if (lane == 0) { + __ vinsertps(dst, src, rep, 0b00000000); + __ vinsertps(dst, src, rep, 0b01010000); + } else { + __ vinsertps(dst, src, rep, 0b10100000); + __ vinsertps(dst, src, rep, 0b11110000); + } + break; + } + case kIA32F64x2Sqrt: { + __ Sqrtpd(i.OutputSimd128Register(), i.InputOperand(0)); + break; + } + case kIA32F64x2Add: { + __ Addpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Sub: { + __ Subpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Mul: { + __ Mulpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Div: { + __ Divpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Min: { + Operand src1 = i.InputOperand(1); + XMMRegister dst = i.OutputSimd128Register(), + src = i.InputSimd128Register(0), + tmp = i.TempSimd128Register(0); + // The minpd instruction doesn't propagate NaNs and +0's in its first + // operand. Perform minpd in both orders, merge the resuls, and adjust. + __ Movapd(tmp, src1); + __ Minpd(tmp, tmp, src); + __ Minpd(dst, src, src1); + // propagate -0's and NaNs, which may be non-canonical. + __ Orpd(tmp, dst); + // Canonicalize NaNs by quieting and clearing the payload. + __ Cmpunordpd(dst, dst, tmp); + __ Orpd(tmp, dst); + __ Psrlq(dst, 13); + __ Andnpd(dst, tmp); + break; + } + case kIA32F64x2Max: { + Operand src1 = i.InputOperand(1); + XMMRegister dst = i.OutputSimd128Register(), + src = i.InputSimd128Register(0), + tmp = i.TempSimd128Register(0); + // The maxpd instruction doesn't propagate NaNs and +0's in its first + // operand. Perform maxpd in both orders, merge the resuls, and adjust. + __ Movapd(tmp, src1); + __ Maxpd(tmp, tmp, src); + __ Maxpd(dst, src, src1); + // Find discrepancies. + __ Xorpd(dst, tmp); + // Propagate NaNs, which may be non-canonical. + __ Orpd(tmp, dst); + // Propagate sign discrepancy and (subtle) quiet NaNs. + __ Subpd(tmp, tmp, dst); + // Canonicalize NaNs by clearing the payload. Sign is non-deterministic. + __ Cmpunordpd(dst, dst, tmp); + __ Psrlq(dst, 13); + __ Andnpd(dst, tmp); + break; + } + case kIA32F64x2Eq: { + __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Ne: { + __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Lt: { + __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } + case kIA32F64x2Le: { + __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(0), + i.InputOperand(1)); + break; + } case kSSEF32x4Splat: { DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); XMMRegister dst = i.OutputSimd128Register(); @@ -1951,6 +2120,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputOperand(0)); break; } + case kSSEF32x4Sqrt: { + __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } + case kAVXF32x4Sqrt: { + CpuFeatureScope avx_scope(tasm(), AVX); + __ vsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kIA32F32x4RecipApprox: { __ Rcpps(i.OutputSimd128Register(), i.InputOperand(0)); break; @@ -2212,28 +2390,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI32x4Shl: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ pslld(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4Shl: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI32x4ShrS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ psrad(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4ShrS: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2430,14 +2620,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI32x4ShrU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ psrld(i.OutputSimd128Register(), tmp); break; } case kAVXI32x4ShrU: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ and_(shift, 31); + __ movd(tmp, shift); __ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2514,7 +2710,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kIA32I16x8ExtractLane: { Register dst = i.OutputRegister(); __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsx_w(dst, dst); break; } case kSSEI16x8ReplaceLane: { @@ -2553,28 +2748,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI16x8Shl: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ psllw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8Shl: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } case kSSEI16x8ShrS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ psraw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8ShrS: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2745,14 +2952,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kSSEI16x8ShrU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ psrlw(i.OutputSimd128Register(), tmp); break; } case kAVXI16x8ShrU: { CpuFeatureScope avx_scope(tasm(), AVX); XMMRegister tmp = i.TempSimd128Register(0); - __ movd(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ and_(shift, 15); + __ movd(tmp, shift); __ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0), tmp); break; } @@ -2875,7 +3088,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kIA32I8x16ExtractLane: { Register dst = i.OutputRegister(); __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsx_b(dst, dst); break; } case kSSEI8x16ReplaceLane: { @@ -2919,6 +3131,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register shift = i.InputRegister(1); Register tmp = i.ToRegister(instr->TempAt(0)); XMMRegister tmp_simd = i.TempSimd128Register(1); + // Take shift value modulo 8. + __ and_(shift, 7); // Mask off the unwanted bits before word-shifting. __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); __ mov(tmp, shift); @@ -2938,6 +3152,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register shift = i.InputRegister(1); Register tmp = i.ToRegister(instr->TempAt(0)); XMMRegister tmp_simd = i.TempSimd128Register(1); + // Take shift value modulo 8. + __ and_(shift, 7); // Mask off the unwanted bits before word-shifting. __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); __ mov(tmp, shift); @@ -2959,6 +3175,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpckhbw(kScratchDoubleReg, dst); __ punpcklbw(dst, dst); __ mov(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ and_(tmp, 7); __ add(tmp, Immediate(8)); __ movd(tmp_simd, tmp); __ psraw(kScratchDoubleReg, tmp_simd); @@ -3223,6 +3441,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpckhbw(kScratchDoubleReg, dst); __ punpcklbw(dst, dst); __ mov(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ and_(tmp, 7); __ add(tmp, Immediate(8)); __ movd(tmp_simd, tmp); __ psrlw(kScratchDoubleReg, tmp_simd); @@ -3365,6 +3585,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vxorps(dst, kScratchDoubleReg, i.InputSimd128Register(2)); break; } + case kIA32S8x16Swizzle: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister mask = i.TempSimd128Register(0); + + // Out-of-range indices should return 0, add 112 so that any value > 15 + // saturates to 128 (top bit set), so pshufb will zero that lane. + __ Move(mask, (uint32_t)0x70707070); + __ Pshufd(mask, mask, 0x0); + __ Paddusb(mask, i.InputSimd128Register(1)); + __ Pshufb(dst, mask); + break; + } case kIA32S8x16Shuffle: { XMMRegister dst = i.OutputSimd128Register(); Operand src0 = i.InputOperand(0); diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h index 7530c716b85c0c..a77fb8cd372edc 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h +++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h @@ -116,6 +116,23 @@ namespace compiler { V(IA32PushSimd128) \ V(IA32Poke) \ V(IA32Peek) \ + V(SSEF64x2Splat) \ + V(AVXF64x2Splat) \ + V(SSEF64x2ExtractLane) \ + V(AVXF64x2ExtractLane) \ + V(SSEF64x2ReplaceLane) \ + V(AVXF64x2ReplaceLane) \ + V(IA32F64x2Sqrt) \ + V(IA32F64x2Add) \ + V(IA32F64x2Sub) \ + V(IA32F64x2Mul) \ + V(IA32F64x2Div) \ + V(IA32F64x2Min) \ + V(IA32F64x2Max) \ + V(IA32F64x2Eq) \ + V(IA32F64x2Ne) \ + V(IA32F64x2Lt) \ + V(IA32F64x2Le) \ V(SSEF32x4Splat) \ V(AVXF32x4Splat) \ V(SSEF32x4ExtractLane) \ @@ -129,6 +146,8 @@ namespace compiler { V(AVXF32x4Abs) \ V(SSEF32x4Neg) \ V(AVXF32x4Neg) \ + V(SSEF32x4Sqrt) \ + V(AVXF32x4Sqrt) \ V(IA32F32x4RecipApprox) \ V(IA32F32x4RecipSqrtApprox) \ V(SSEF32x4Add) \ @@ -313,6 +332,7 @@ namespace compiler { V(AVXS128Xor) \ V(SSES128Select) \ V(AVXS128Select) \ + V(IA32S8x16Swizzle) \ V(IA32S8x16Shuffle) \ V(IA32S32x4Swizzle) \ V(IA32S32x4Shuffle) \ diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc index c2097a6691fd1b..287eb49a4803aa 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc @@ -97,6 +97,23 @@ int InstructionScheduler::GetTargetInstructionFlags( case kAVXFloat32Neg: case kIA32BitcastFI: case kIA32BitcastIF: + case kSSEF64x2Splat: + case kAVXF64x2Splat: + case kSSEF64x2ExtractLane: + case kAVXF64x2ExtractLane: + case kSSEF64x2ReplaceLane: + case kAVXF64x2ReplaceLane: + case kIA32F64x2Sqrt: + case kIA32F64x2Add: + case kIA32F64x2Sub: + case kIA32F64x2Mul: + case kIA32F64x2Div: + case kIA32F64x2Min: + case kIA32F64x2Max: + case kIA32F64x2Eq: + case kIA32F64x2Ne: + case kIA32F64x2Lt: + case kIA32F64x2Le: case kSSEF32x4Splat: case kAVXF32x4Splat: case kSSEF32x4ExtractLane: @@ -110,6 +127,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kAVXF32x4Abs: case kSSEF32x4Neg: case kAVXF32x4Neg: + case kSSEF32x4Sqrt: + case kAVXF32x4Sqrt: case kIA32F32x4RecipApprox: case kIA32F32x4RecipSqrtApprox: case kSSEF32x4Add: @@ -294,6 +313,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kAVXS128Xor: case kSSES128Select: case kAVXS128Select: + case kIA32S8x16Swizzle: case kIA32S8x16Shuffle: case kIA32S32x4Swizzle: case kIA32S32x4Shuffle: diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index ebef39a93a65ec..a24727aba20f26 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -200,12 +200,27 @@ namespace { void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) { IA32OperandGenerator g(selector); - InstructionOperand temps[] = {g.TempRegister()}; Node* input = node->InputAt(0); // We have to use a byte register as input to movsxb. InstructionOperand input_op = opcode == kIA32Movsxbl ? g.UseFixed(input, eax) : g.Use(input); - selector->Emit(opcode, g.DefineAsRegister(node), input_op, arraysize(temps), + selector->Emit(opcode, g.DefineAsRegister(node), input_op); +} + +void VisitROWithTemp(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempRegister()}; + selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)), + arraysize(temps), temps); +} + +void VisitROWithTempSimd(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + IA32OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempSimd128Register()}; + selector->Emit(opcode, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); } @@ -231,10 +246,13 @@ void VisitRROFloat(InstructionSelector* selector, Node* node, void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input, ArchOpcode avx_opcode, ArchOpcode sse_opcode) { IA32OperandGenerator g(selector); + InstructionOperand temps[] = {g.TempSimd128Register()}; if (selector->IsSupported(AVX)) { - selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input)); + selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input), + arraysize(temps), temps); } else { - selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input)); + selector->Emit(sse_opcode, g.DefineSameAsFirst(node), + g.UseUniqueRegister(input), arraysize(temps), temps); } } @@ -804,12 +822,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) { V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \ - V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \ V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \ - V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \ V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \ - V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \ - V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \ V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \ V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \ V(BitcastFloat32ToInt32, kIA32BitcastFI) \ @@ -819,7 +833,15 @@ void InstructionSelector::VisitWord32Ror(Node* node) { V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \ V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \ V(SignExtendWord8ToInt32, kIA32Movsxbl) \ - V(SignExtendWord16ToInt32, kIA32Movsxwl) + V(SignExtendWord16ToInt32, kIA32Movsxwl) \ + V(F64x2Sqrt, kIA32F64x2Sqrt) + +#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) + +#define RO_WITH_TEMP_SIMD_OP_LIST(V) \ + V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \ + V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \ + V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) #define RR_OP_LIST(V) \ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \ @@ -841,13 +863,23 @@ void InstructionSelector::VisitWord32Ror(Node* node) { V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ - V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) + V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \ + V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \ + V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \ + V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \ + V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \ + V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \ + V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \ + V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \ + V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le) #define FLOAT_UNOP_LIST(V) \ V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \ - V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) + V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \ + V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ + V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg) #define RO_VISITOR(Name, opcode) \ void InstructionSelector::Visit##Name(Node* node) { \ @@ -857,6 +889,22 @@ RO_OP_LIST(RO_VISITOR) #undef RO_VISITOR #undef RO_OP_LIST +#define RO_WITH_TEMP_VISITOR(Name, opcode) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitROWithTemp(this, node, opcode); \ + } +RO_WITH_TEMP_OP_LIST(RO_WITH_TEMP_VISITOR) +#undef RO_WITH_TEMP_VISITOR +#undef RO_WITH_TEMP_OP_LIST + +#define RO_WITH_TEMP_SIMD_VISITOR(Name, opcode) \ + void InstructionSelector::Visit##Name(Node* node) { \ + VisitROWithTempSimd(this, node, opcode); \ + } +RO_WITH_TEMP_SIMD_OP_LIST(RO_WITH_TEMP_SIMD_VISITOR) +#undef RO_WITH_TEMP_SIMD_VISITOR +#undef RO_WITH_TEMP_SIMD_OP_LIST + #define RR_VISITOR(Name, opcode) \ void InstructionSelector::Visit##Name(Node* node) { \ VisitRR(this, node, opcode); \ @@ -890,6 +938,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { Emit(kIA32Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitInt32Add(Node* node) { IA32OperandGenerator g(this); @@ -1971,6 +2023,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { #define SIMD_UNOP_PREFIX_LIST(V) \ V(F32x4Abs) \ V(F32x4Neg) \ + V(F32x4Sqrt) \ V(S128Not) #define SIMD_ANYTRUE_LIST(V) \ @@ -1995,6 +2048,43 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { V(I8x16ShrS) \ V(I8x16ShrU) +void InstructionSelector::VisitF64x2Min(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0)); + InstructionOperand operand1 = g.UseUnique(node->InputAt(1)); + + if (IsSupported(AVX)) { + Emit(kIA32F64x2Min, g.DefineAsRegister(node), operand0, operand1, + arraysize(temps), temps); + } else { + Emit(kIA32F64x2Min, g.DefineSameAsFirst(node), operand0, operand1, + arraysize(temps), temps); + } +} + +void InstructionSelector::VisitF64x2Max(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0)); + InstructionOperand operand1 = g.UseUnique(node->InputAt(1)); + if (IsSupported(AVX)) { + Emit(kIA32F64x2Max, g.DefineAsRegister(node), operand0, operand1, + arraysize(temps), temps); + } else { + Emit(kIA32F64x2Max, g.DefineSameAsFirst(node), operand0, operand1, + arraysize(temps), temps); + } +} + +void InstructionSelector::VisitF64x2Splat(Node* node) { + VisitRRSimd(this, node, kAVXF64x2Splat, kSSEF64x2Splat); +} + +void InstructionSelector::VisitF64x2ExtractLane(Node* node) { + VisitRRISimd(this, node, kAVXF64x2ExtractLane, kSSEF64x2ExtractLane); +} + void InstructionSelector::VisitF32x4Splat(Node* node) { VisitRRSimd(this, node, kAVXF32x4Splat, kSSEF32x4Splat); } @@ -2086,6 +2176,28 @@ VISIT_SIMD_REPLACE_LANE(F32x4) #undef VISIT_SIMD_REPLACE_LANE #undef SIMD_INT_TYPES +// The difference between this and VISIT_SIMD_REPLACE_LANE is that this forces +// operand2 to be UseRegister, because the codegen relies on insertps using +// registers. +// TODO(v8:9764) Remove this UseRegister requirement +#define VISIT_SIMD_REPLACE_LANE_USE_REG(Type) \ + void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ + IA32OperandGenerator g(this); \ + InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \ + InstructionOperand operand1 = \ + g.UseImmediate(OpParameter(node->op())); \ + InstructionOperand operand2 = g.UseRegister(node->InputAt(1)); \ + if (IsSupported(AVX)) { \ + Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \ + operand1, operand2); \ + } else { \ + Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \ + operand1, operand2); \ + } \ + } +VISIT_SIMD_REPLACE_LANE_USE_REG(F64x2) +#undef VISIT_SIMD_REPLACE_LANE_USE_REG + #define VISIT_SIMD_SHIFT(Opcode) \ void InstructionSelector::Visit##Opcode(Node* node) { \ VisitRROSimdShift(this, node, kAVX##Opcode, kSSE##Opcode); \ @@ -2132,12 +2244,12 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE) #undef VISIT_SIMD_ANYTRUE #undef SIMD_ANYTRUE_LIST -#define VISIT_SIMD_ALLTRUE(Opcode) \ - void InstructionSelector::Visit##Opcode(Node* node) { \ - IA32OperandGenerator g(this); \ - InstructionOperand temps[] = {g.TempRegister()}; \ - Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)), \ - arraysize(temps), temps); \ +#define VISIT_SIMD_ALLTRUE(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + IA32OperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \ + Emit(kIA32##Opcode, g.DefineAsRegister(node), \ + g.UseUnique(node->InputAt(0)), arraysize(temps), temps); \ } SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE) #undef VISIT_SIMD_ALLTRUE @@ -2489,6 +2601,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) { Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps); } +void InstructionSelector::VisitS8x16Swizzle(Node* node) { + IA32OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kIA32S8x16Swizzle, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), + arraysize(temps), temps); +} + // static MachineOperatorBuilder::Flags InstructionSelector::SupportedMachineOperatorFlags() { diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc index dc66813740b3ee..d4920cd575ac1e 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.cc +++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc @@ -4,7 +4,7 @@ #include "src/compiler/backend/instruction-scheduler.h" -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/base/utils/random-number-generator.h" #include "src/execution/isolate.h" diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h index a3f62e7ba40c45..13ea049eba401f 100644 --- a/deps/v8/src/compiler/backend/instruction-selector-impl.h +++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h @@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) { // Helper struct containing data about a table or lookup switch. class SwitchInfo { public: - SwitchInfo(ZoneVector& cases, // NOLINT(runtime/references) - int32_t min_value, int32_t max_value, BasicBlock* default_branch) + SwitchInfo(ZoneVector const& cases, int32_t min_value, + int32_t max_value, BasicBlock* default_branch) : cases_(cases), min_value_(min_value), max_value_(max_value), @@ -193,17 +193,6 @@ class OperandGenerator { reg.code(), GetVReg(node))); } - InstructionOperand UseExplicit(LinkageLocation location) { - MachineRepresentation rep = InstructionSequence::DefaultRepresentation(); - if (location.IsRegister()) { - return ExplicitOperand(LocationOperand::REGISTER, rep, - location.AsRegister()); - } else { - return ExplicitOperand(LocationOperand::STACK_SLOT, rep, - location.GetLocation()); - } - } - InstructionOperand UseImmediate(int immediate) { return sequence()->AddImmediate(Constant(immediate)); } @@ -275,6 +264,16 @@ class OperandGenerator { InstructionOperand::kInvalidVirtualRegister); } + template + InstructionOperand TempFpRegister(FPRegType reg) { + UnallocatedOperand op = + UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER, reg.code(), + sequence()->NextVirtualRegister()); + sequence()->MarkAsRepresentation(MachineRepresentation::kSimd128, + op.virtual_register()); + return op; + } + InstructionOperand TempImmediate(int32_t imm) { return sequence()->AddImmediate(Constant(imm)); } diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 43193ec2b110e9..e165c6c6a9e93a 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -6,7 +6,7 @@ #include -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/tick-counter.h" #include "src/compiler/backend/instruction-selector-impl.h" @@ -1439,6 +1439,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsWord64(node), VisitWord64ReverseBits(node); case IrOpcode::kWord64ReverseBytes: return MarkAsWord64(node), VisitWord64ReverseBytes(node); + case IrOpcode::kSimd128ReverseBytes: + return MarkAsSimd128(node), VisitSimd128ReverseBytes(node); case IrOpcode::kInt64AbsWithOverflow: return MarkAsWord64(node), VisitInt64AbsWithOverflow(node); case IrOpcode::kWord64Equal: @@ -1502,7 +1504,7 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kUint64Mod: return MarkAsWord64(node), VisitUint64Mod(node); case IrOpcode::kBitcastTaggedToWord: - case IrOpcode::kBitcastTaggedSignedToWord: + case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: return MarkAsRepresentation(MachineType::PointerRepresentation(), node), VisitBitcastTaggedToWord(node); case IrOpcode::kBitcastWordToTagged: @@ -1857,6 +1859,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF64x2Abs(node); case IrOpcode::kF64x2Neg: return MarkAsSimd128(node), VisitF64x2Neg(node); + case IrOpcode::kF64x2Sqrt: + return MarkAsSimd128(node), VisitF64x2Sqrt(node); case IrOpcode::kF64x2Add: return MarkAsSimd128(node), VisitF64x2Add(node); case IrOpcode::kF64x2Sub: @@ -1877,6 +1881,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF64x2Lt(node); case IrOpcode::kF64x2Le: return MarkAsSimd128(node), VisitF64x2Le(node); + case IrOpcode::kF64x2Qfma: + return MarkAsSimd128(node), VisitF64x2Qfma(node); + case IrOpcode::kF64x2Qfms: + return MarkAsSimd128(node), VisitF64x2Qfms(node); case IrOpcode::kF32x4Splat: return MarkAsSimd128(node), VisitF32x4Splat(node); case IrOpcode::kF32x4ExtractLane: @@ -1891,6 +1899,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF32x4Abs(node); case IrOpcode::kF32x4Neg: return MarkAsSimd128(node), VisitF32x4Neg(node); + case IrOpcode::kF32x4Sqrt: + return MarkAsSimd128(node), VisitF32x4Sqrt(node); case IrOpcode::kF32x4RecipApprox: return MarkAsSimd128(node), VisitF32x4RecipApprox(node); case IrOpcode::kF32x4RecipSqrtApprox: @@ -1917,6 +1927,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF32x4Lt(node); case IrOpcode::kF32x4Le: return MarkAsSimd128(node), VisitF32x4Le(node); + case IrOpcode::kF32x4Qfma: + return MarkAsSimd128(node), VisitF32x4Qfma(node); + case IrOpcode::kF32x4Qfms: + return MarkAsSimd128(node), VisitF32x4Qfms(node); case IrOpcode::kI64x2Splat: return MarkAsSimd128(node), VisitI64x2Splat(node); case IrOpcode::kI64x2ExtractLane: @@ -2137,6 +2151,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitS128Not(node); case IrOpcode::kS128Select: return MarkAsSimd128(node), VisitS128Select(node); + case IrOpcode::kS8x16Swizzle: + return MarkAsSimd128(node), VisitS8x16Swizzle(node); case IrOpcode::kS8x16Shuffle: return MarkAsSimd128(node), VisitS8x16Shuffle(node); case IrOpcode::kS1x2AnyTrue: @@ -2286,8 +2302,8 @@ void InstructionSelector::VisitFloat64Tanh(Node* node) { VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh); } -void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw, - InstructionOperand& index_operand) { +void InstructionSelector::EmitTableSwitch( + const SwitchInfo& sw, InstructionOperand const& index_operand) { OperandGenerator g(this); size_t input_count = 2 + sw.value_range(); DCHECK_LE(sw.value_range(), std::numeric_limits::max() - 2); @@ -2304,8 +2320,8 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw, Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr); } -void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw, - InstructionOperand& value_operand) { +void InstructionSelector::EmitLookupSwitch( + const SwitchInfo& sw, InstructionOperand const& value_operand) { OperandGenerator g(this); std::vector cases = sw.CasesSortedByOriginalOrder(); size_t input_count = 2 + sw.case_count() * 2; @@ -2322,7 +2338,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw, } void InstructionSelector::EmitBinarySearchSwitch( - const SwitchInfo& sw, InstructionOperand& value_operand) { + const SwitchInfo& sw, InstructionOperand const& value_operand) { OperandGenerator g(this); size_t input_count = 2 + sw.case_count() * 2; DCHECK_LE(sw.case_count(), (std::numeric_limits::max() - 2) / 2); @@ -2607,21 +2623,25 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { #if !V8_TARGET_ARCH_X64 #if !V8_TARGET_ARCH_ARM64 +#if !V8_TARGET_ARCH_IA32 void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); } -void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); } +#endif // !V8_TARGET_ARCH_IA32 void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } @@ -2630,6 +2650,7 @@ void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); } @@ -2639,8 +2660,11 @@ void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); } #endif // !V8_TARGET_ARCH_ARM64 -void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2MinS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2MaxS(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); } @@ -2786,10 +2810,17 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { // Select the appropriate opcode based on the call type. InstructionCode opcode = kArchNop; switch (call_descriptor->kind()) { - case CallDescriptor::kCallAddress: - opcode = kArchCallCFunction | MiscField::encode(static_cast( - call_descriptor->ParameterCount())); + case CallDescriptor::kCallAddress: { + int misc_field = static_cast(call_descriptor->ParameterCount()); +#if defined(_AIX) + // Highest misc_field bit is used on AIX to indicate if a CFunction call + // has function descriptor or not. + misc_field |= call_descriptor->HasFunctionDescriptor() + << kHasFunctionDescriptorBitShift; +#endif + opcode = kArchCallCFunction | MiscField::encode(misc_field); break; + } case CallDescriptor::kCallCodeObject: opcode = kArchCallCodeObject | MiscField::encode(flags); break; diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index eb3e0984272a30..e951c90f953f04 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -502,15 +502,12 @@ class V8_EXPORT_PRIVATE InstructionSelector final { FeedbackSource const& feedback, Node* frame_state); - void EmitTableSwitch( - const SwitchInfo& sw, - InstructionOperand& index_operand); // NOLINT(runtime/references) - void EmitLookupSwitch( - const SwitchInfo& sw, - InstructionOperand& value_operand); // NOLINT(runtime/references) - void EmitBinarySearchSwitch( - const SwitchInfo& sw, - InstructionOperand& value_operand); // NOLINT(runtime/references) + void EmitTableSwitch(const SwitchInfo& sw, + InstructionOperand const& index_operand); + void EmitLookupSwitch(const SwitchInfo& sw, + InstructionOperand const& value_operand); + void EmitBinarySearchSwitch(const SwitchInfo& sw, + InstructionOperand const& value_operand); void TryRename(InstructionOperand* op); int GetRename(int virtual_register); diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index 06158b0c72e851..076f1b596e2859 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -168,7 +168,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) { return os << "[immediate:" << imm.indexed_value() << "]"; } } - case InstructionOperand::EXPLICIT: case InstructionOperand::ALLOCATED: { LocationOperand allocated = LocationOperand::cast(op); if (op.IsStackSlot()) { @@ -192,9 +191,6 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) { os << "[" << Simd128Register::from_code(allocated.register_code()) << "|R"; } - if (allocated.IsExplicit()) { - os << "|E"; - } switch (allocated.representation()) { case MachineRepresentation::kNone: os << "|-"; @@ -294,17 +290,6 @@ void ParallelMove::PrepareInsertAfter( if (replacement != nullptr) move->set_source(replacement->source()); } -ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep, - int index) - : LocationOperand(EXPLICIT, kind, rep, index) { - DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep), - GetRegConfig()->IsAllocatableGeneralCode(index)); - DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32, - GetRegConfig()->IsAllocatableFloatCode(index)); - DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64), - GetRegConfig()->IsAllocatableDoubleCode(index)); -} - Instruction::Instruction(InstructionCode opcode) : opcode_(opcode), bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) | diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index f5f7f64c51e50d..462b0daf6b9c72 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -43,9 +43,8 @@ class V8_EXPORT_PRIVATE InstructionOperand { CONSTANT, IMMEDIATE, // Location operand kinds. - EXPLICIT, ALLOCATED, - FIRST_LOCATION_OPERAND_KIND = EXPLICIT + FIRST_LOCATION_OPERAND_KIND = ALLOCATED // Location operand kinds must be last. }; @@ -68,11 +67,6 @@ class V8_EXPORT_PRIVATE InstructionOperand { // embedded directly in instructions, e.g. small integers and on some // platforms Objects. INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE) - // ExplicitOperands do not participate in register allocation. They are - // created by the instruction selector for direct access to registers and - // stack slots, completely bypassing the register allocator. They are never - // associated with a virtual register - INSTRUCTION_OPERAND_PREDICATE(Explicit, EXPLICIT) // AllocatedOperands are registers or stack slots that are assigned by the // register allocator and are always associated with a virtual register. INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED) @@ -515,19 +509,6 @@ class LocationOperand : public InstructionOperand { using IndexField = BitField64; }; -class V8_EXPORT_PRIVATE ExplicitOperand - : public NON_EXPORTED_BASE(LocationOperand) { - public: - ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index); - - static ExplicitOperand* New(Zone* zone, LocationKind kind, - MachineRepresentation rep, int index) { - return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index)); - } - - INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT) -}; - class AllocatedOperand : public LocationOperand { public: AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index) @@ -643,7 +624,7 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const { } return InstructionOperand::KindField::update( LocationOperand::RepresentationField::update(this->value_, canonical), - LocationOperand::EXPLICIT); + LocationOperand::ALLOCATED); } return this->value_; } @@ -776,11 +757,11 @@ class V8_EXPORT_PRIVATE Instruction final { public: size_t OutputCount() const { return OutputCountField::decode(bit_field_); } const InstructionOperand* OutputAt(size_t i) const { - DCHECK(i < OutputCount()); + DCHECK_LT(i, OutputCount()); return &operands_[i]; } InstructionOperand* OutputAt(size_t i) { - DCHECK(i < OutputCount()); + DCHECK_LT(i, OutputCount()); return &operands_[i]; } @@ -790,21 +771,21 @@ class V8_EXPORT_PRIVATE Instruction final { size_t InputCount() const { return InputCountField::decode(bit_field_); } const InstructionOperand* InputAt(size_t i) const { - DCHECK(i < InputCount()); + DCHECK_LT(i, InputCount()); return &operands_[OutputCount() + i]; } InstructionOperand* InputAt(size_t i) { - DCHECK(i < InputCount()); + DCHECK_LT(i, InputCount()); return &operands_[OutputCount() + i]; } size_t TempCount() const { return TempCountField::decode(bit_field_); } const InstructionOperand* TempAt(size_t i) const { - DCHECK(i < TempCount()); + DCHECK_LT(i, TempCount()); return &operands_[OutputCount() + InputCount() + i]; } InstructionOperand* TempAt(size_t i) { - DCHECK(i < TempCount()); + DCHECK_LT(i, TempCount()); return &operands_[OutputCount() + InputCount() + i]; } @@ -826,7 +807,8 @@ class V8_EXPORT_PRIVATE Instruction final { size_t output_count, InstructionOperand* outputs, size_t input_count, InstructionOperand* inputs, size_t temp_count, InstructionOperand* temps) { - DCHECK_LE(0, opcode); + // TODO(9872) + // DCHECK_LE(0, opcode); DCHECK(output_count == 0 || outputs != nullptr); DCHECK(input_count == 0 || inputs != nullptr); DCHECK(temp_count == 0 || temps != nullptr); diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc index dfb917a58c444b..ee195bf51e16df 100644 --- a/deps/v8/src/compiler/backend/jump-threading.cc +++ b/deps/v8/src/compiler/backend/jump-threading.cc @@ -69,11 +69,11 @@ bool IsBlockWithBranchPoisoning(InstructionSequence* code, } // namespace bool JumpThreading::ComputeForwarding(Zone* local_zone, - ZoneVector& result, + ZoneVector* result, InstructionSequence* code, bool frame_at_start) { ZoneStack stack(local_zone); - JumpThreadingState state = {false, result, stack}; + JumpThreadingState state = {false, *result, stack}; state.Clear(code->InstructionBlockCount()); // Iterate over the blocks forward, pushing the blocks onto the stack. @@ -135,15 +135,15 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone, } #ifdef DEBUG - for (RpoNumber num : result) { + for (RpoNumber num : *result) { DCHECK(num.IsValid()); } #endif if (FLAG_trace_turbo_jt) { - for (int i = 0; i < static_cast(result.size()); i++) { + for (int i = 0; i < static_cast(result->size()); i++) { TRACE("B%d ", i); - int to = result[i].ToInt(); + int to = (*result)[i].ToInt(); if (i != to) { TRACE("-> B%d\n", to); } else { @@ -156,7 +156,7 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone, } void JumpThreading::ApplyForwarding(Zone* local_zone, - ZoneVector& result, + ZoneVector const& result, InstructionSequence* code) { if (!FLAG_turbo_jt) return; diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h index ce60ebcb2e3423..ce9e3949249e74 100644 --- a/deps/v8/src/compiler/backend/jump-threading.h +++ b/deps/v8/src/compiler/backend/jump-threading.h @@ -17,17 +17,14 @@ class V8_EXPORT_PRIVATE JumpThreading { public: // Compute the forwarding map of basic blocks to their ultimate destination. // Returns {true} if there is at least one block that is forwarded. - static bool ComputeForwarding( - Zone* local_zone, - ZoneVector& result, // NOLINT(runtime/references) - InstructionSequence* code, bool frame_at_start); + static bool ComputeForwarding(Zone* local_zone, ZoneVector* result, + InstructionSequence* code, bool frame_at_start); // Rewrite the instructions to forward jumps and branches. // May also negate some branches. - static void ApplyForwarding( - Zone* local_zone, - ZoneVector& forwarding, // NOLINT(runtime/references) - InstructionSequence* code); + static void ApplyForwarding(Zone* local_zone, + ZoneVector const& forwarding, + InstructionSequence* code); }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc index 239075392afb81..ee23402e69bf38 100644 --- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc +++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -265,34 +265,33 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU( - bool& predicate, // NOLINT(runtime/references) - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, + FlagsCondition condition) { switch (condition) { case kEqual: - predicate = true; + *predicate = true; return EQ; case kNotEqual: - predicate = false; + *predicate = false; return EQ; case kUnsignedLessThan: - predicate = true; + *predicate = true; return OLT; case kUnsignedGreaterThanOrEqual: - predicate = false; + *predicate = false; return OLT; case kUnsignedLessThanOrEqual: - predicate = true; + *predicate = true; return OLE; case kUnsignedGreaterThan: - predicate = false; + *predicate = false; return OLE; case kUnorderedEqual: case kUnorderedNotEqual: - predicate = true; + *predicate = true; break; default: - predicate = true; + *predicate = true; break; } UNREACHABLE(); @@ -303,9 +302,9 @@ FPUCondition FlagsConditionToConditionCmpFPU( << "\""; \ UNIMPLEMENTED(); -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + MipsOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -780,12 +779,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); - Label return_location; - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + Label start_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + // from start_call to return address. + int offset = 40; +#if V8_HOST_ARCH_MIPS + if (__ emit_debug_code()) { + offset += 16; + } +#endif + if (isWasmCapiFunction) { // Put the return address in a stack slot. - __ LoadAddress(kScratchReg, &return_location); - __ sw(kScratchReg, - MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(kScratchReg, ra); + __ bind(&start_call); + __ nal(); + __ nop(); + __ Addu(ra, ra, offset - 8); // 8 = nop + nal + __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(ra, kScratchReg); } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); @@ -794,7 +806,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } - __ bind(&return_location); + if (isWasmCapiFunction) { + CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); + } + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack @@ -1179,7 +1194,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroSingleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { @@ -1239,7 +1254,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroDoubleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { __ Move(kDoubleRegZero, 0.0); @@ -2038,6 +2053,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); break; } + case kMipsF32x4Sqrt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kMipsF32x4RecipApprox: { CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -3026,7 +3046,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } else if (instr->arch_opcode() == kMipsCmpS || instr->arch_opcode() == kMipsCmpD) { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ BranchTrueF(tlabel); } else { @@ -3116,7 +3136,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, case kMipsCmpS: case kMipsCmpD: { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); } else { @@ -3314,7 +3334,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, __ Move(kDoubleRegZero, 0.0); } bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (!IsMipsArchVariant(kMips32r6)) { __ li(result, Operand(1)); if (predicate) { diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h index e8020d9e895661..af0774f4688441 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h +++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h @@ -159,6 +159,7 @@ namespace compiler { V(MipsI32x4MinU) \ V(MipsF32x4Abs) \ V(MipsF32x4Neg) \ + V(MipsF32x4Sqrt) \ V(MipsF32x4RecipApprox) \ V(MipsF32x4RecipSqrtApprox) \ V(MipsF32x4Add) \ diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc index 4e6aef52f49f70..ba17ad25819cab 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc @@ -54,6 +54,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMipsF32x4Div: case kMipsF32x4Ne: case kMipsF32x4Neg: + case kMipsF32x4Sqrt: case kMipsF32x4RecipApprox: case kMipsF32x4RecipSqrtApprox: case kMipsF32x4ReplaceLane: diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc index bb47262c6c32db..7ee5c7c2c77d04 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/base/bits.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" @@ -781,6 +780,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitWord32Ctz(Node* node) { MipsOperandGenerator g(this); Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); @@ -2015,6 +2018,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \ V(F32x4Abs, kMipsF32x4Abs) \ V(F32x4Neg, kMipsF32x4Neg) \ + V(F32x4Sqrt, kMipsF32x4Sqrt) \ V(F32x4RecipApprox, kMipsF32x4RecipApprox) \ V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \ V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \ diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index 5682bed71a42cf..9cec463e875b1f 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -278,42 +278,41 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU( - bool& predicate, // NOLINT(runtime/references) - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, + FlagsCondition condition) { switch (condition) { case kEqual: - predicate = true; + *predicate = true; return EQ; case kNotEqual: - predicate = false; + *predicate = false; return EQ; case kUnsignedLessThan: - predicate = true; + *predicate = true; return OLT; case kUnsignedGreaterThanOrEqual: - predicate = false; + *predicate = false; return OLT; case kUnsignedLessThanOrEqual: - predicate = true; + *predicate = true; return OLE; case kUnsignedGreaterThan: - predicate = false; + *predicate = false; return OLE; case kUnorderedEqual: case kUnorderedNotEqual: - predicate = true; + *predicate = true; break; default: - predicate = true; + *predicate = true; break; } UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + MipsOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -758,12 +757,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); - Label return_location; - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + Label start_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + // from start_call to return address. + int offset = 48; +#if V8_HOST_ARCH_MIPS64 + if (__ emit_debug_code()) { + offset += 16; + } +#endif + if (isWasmCapiFunction) { // Put the return address in a stack slot. - __ LoadAddress(kScratchReg, &return_location); - __ sd(kScratchReg, - MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(kScratchReg, ra); + __ bind(&start_call); + __ nal(); + __ nop(); + __ Daddu(ra, ra, offset - 8); // 8 = nop + nal + __ sd(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mov(ra, kScratchReg); } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); @@ -772,7 +784,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } - __ bind(&return_location); + if (isWasmCapiFunction) { + CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); + } + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack @@ -1276,7 +1291,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroSingleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { @@ -1339,7 +1354,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( FPURegister right = i.InputOrZeroDoubleRegister(1); bool predicate; FPUCondition cc = - FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition()); + FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); if ((left == kDoubleRegZero || right == kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { __ Move(kDoubleRegZero, 0.0); @@ -2233,6 +2248,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } + case kMips64F32x4Sqrt: { + CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kMips64I32x4Neg: { CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); @@ -3151,7 +3171,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } else if (instr->arch_opcode() == kMips64CmpS || instr->arch_opcode() == kMips64CmpD) { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ BranchTrueF(tlabel); } else { @@ -3261,7 +3281,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, case kMips64CmpS: case kMips64CmpD: { bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (predicate) { __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); } else { @@ -3470,7 +3490,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, __ Move(kDoubleRegZero, 0.0); } bool predicate; - FlagsConditionToConditionCmpFPU(predicate, condition); + FlagsConditionToConditionCmpFPU(&predicate, condition); if (kArchVariant != kMips64r6) { __ li(result, Operand(1)); if (predicate) { diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h index edc8924757d11d..bcf3532b5725f3 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h +++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h @@ -189,6 +189,7 @@ namespace compiler { V(Mips64I32x4MinU) \ V(Mips64F32x4Abs) \ V(Mips64F32x4Neg) \ + V(Mips64F32x4Sqrt) \ V(Mips64F32x4RecipApprox) \ V(Mips64F32x4RecipSqrtApprox) \ V(Mips64F32x4Add) \ diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index 880b424c416e8b..fe2d33d1db5865 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -82,6 +82,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMips64F32x4Div: case kMips64F32x4Ne: case kMips64F32x4Neg: + case kMips64F32x4Sqrt: case kMips64F32x4RecipApprox: case kMips64F32x4RecipSqrtApprox: case kMips64F32x4ReplaceLane: diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index 9c717ab1e91aa9..dfc0ff5badf17b 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/base/bits.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" @@ -823,6 +822,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitWord32Ctz(Node* node) { Mips64OperandGenerator g(this); Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); @@ -2678,6 +2681,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \ V(F32x4Abs, kMips64F32x4Abs) \ V(F32x4Neg, kMips64F32x4Neg) \ + V(F32x4Sqrt, kMips64F32x4Sqrt) \ V(F32x4RecipApprox, kMips64F32x4RecipApprox) \ V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \ diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index 5c69bc34a12ee0..964f88881678da 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -263,9 +263,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, Instruction* instr, - PPCOperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, + PPCOperandConverter const& i) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { @@ -1020,11 +1019,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #endif break; case kArchCallCFunction: { - int const num_parameters = MiscField::decode(instr->opcode()); + int misc_field = MiscField::decode(instr->opcode()); + int num_parameters = misc_field; + bool has_function_descriptor = false; Label start_call; bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); +#if defined(_AIX) + // AIX/PPC64BE Linux uses a function descriptor + int kNumParametersMask = kHasFunctionDescriptorBitMask - 1; + num_parameters = kNumParametersMask & misc_field; + has_function_descriptor = + (misc_field & kHasFunctionDescriptorBitMask) != 0; + // AIX emits 2 extra Load instructions under CallCFunctionHelper + // due to having function descriptor. + constexpr int offset = 11 * kInstrSize; +#else constexpr int offset = 9 * kInstrSize; +#endif if (isWasmCapiFunction) { __ mflr(r0); __ bind(&start_call); @@ -1036,16 +1048,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_parameters); + __ CallCFunction(ref, num_parameters, has_function_descriptor); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_parameters); + __ CallCFunction(func, num_parameters, has_function_descriptor); } // TODO(miladfar): In the above block, kScratchReg must be populated with // the strictly-correct PC, which is the return address at this spot. The - // offset is set to 36 (9 * kInstrSize) right now, which is counted from - // where we are binding to the label and ends at this spot. If failed, - // replace it with the correct offset suggested. More info on f5ab7d3. + // offset is set to 36 (9 * kInstrSize) on pLinux and 44 on AIX, which is + // counted from where we are binding to the label and ends at this spot. + // If failed, replace it with the correct offset suggested. More info on + // f5ab7d3. if (isWasmCapiFunction) CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index ef8490a7265398..2ffd6495d72e1d 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -926,6 +926,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + // TODO(miladfar): Implement the ppc selector for reversing SIMD bytes. + // Check if the input node is a Load and do a Load Reverse at once. + UNIMPLEMENTED(); +} + void InstructionSelector::VisitInt32Add(Node* node) { VisitBinop(this, node, kPPC_Add32, kInt16Imm); } @@ -2283,6 +2289,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); } diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.cc b/deps/v8/src/compiler/backend/register-allocator-verifier.cc index 53349c9c2b46df..17e0b8ca755fb3 100644 --- a/deps/v8/src/compiler/backend/register-allocator-verifier.cc +++ b/deps/v8/src/compiler/backend/register-allocator-verifier.cc @@ -92,7 +92,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier( void RegisterAllocatorVerifier::VerifyInput( const OperandConstraint& constraint) { CHECK_NE(kSameAsFirst, constraint.type_); - if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) { + if (constraint.type_ != kImmediate) { CHECK_NE(InstructionOperand::kInvalidVirtualRegister, constraint.virtual_register_); } @@ -102,14 +102,12 @@ void RegisterAllocatorVerifier::VerifyTemp( const OperandConstraint& constraint) { CHECK_NE(kSameAsFirst, constraint.type_); CHECK_NE(kImmediate, constraint.type_); - CHECK_NE(kExplicit, constraint.type_); CHECK_NE(kConstant, constraint.type_); } void RegisterAllocatorVerifier::VerifyOutput( const OperandConstraint& constraint) { CHECK_NE(kImmediate, constraint.type_); - CHECK_NE(kExplicit, constraint.type_); CHECK_NE(InstructionOperand::kInvalidVirtualRegister, constraint.virtual_register_); } @@ -149,8 +147,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op, constraint->type_ = kConstant; constraint->value_ = ConstantOperand::cast(op)->virtual_register(); constraint->virtual_register_ = constraint->value_; - } else if (op->IsExplicit()) { - constraint->type_ = kExplicit; } else if (op->IsImmediate()) { const ImmediateOperand* imm = ImmediateOperand::cast(op); int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value() @@ -235,9 +231,6 @@ void RegisterAllocatorVerifier::CheckConstraint( case kFPRegister: CHECK_WITH_MSG(op->IsFPRegister(), caller_info_); return; - case kExplicit: - CHECK_WITH_MSG(op->IsExplicit(), caller_info_); - return; case kFixedRegister: case kRegisterAndSlot: CHECK_WITH_MSG(op->IsRegister(), caller_info_); @@ -503,8 +496,7 @@ void RegisterAllocatorVerifier::VerifyGapMoves() { instr_constraint.operand_constraints_; size_t count = 0; for (size_t i = 0; i < instr->InputCount(); ++i, ++count) { - if (op_constraints[count].type_ == kImmediate || - op_constraints[count].type_ == kExplicit) { + if (op_constraints[count].type_ == kImmediate) { continue; } int virtual_register = op_constraints[count].virtual_register_; diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.h b/deps/v8/src/compiler/backend/register-allocator-verifier.h index 68e69c0d1648f6..7110c2eb42c6f4 100644 --- a/deps/v8/src/compiler/backend/register-allocator-verifier.h +++ b/deps/v8/src/compiler/backend/register-allocator-verifier.h @@ -188,7 +188,6 @@ class RegisterAllocatorVerifier final : public ZoneObject { kRegisterOrSlot, kRegisterOrSlotFP, kRegisterOrSlotOrConstant, - kExplicit, kSameAsFirst, kRegisterAndSlot }; diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc index 21eef0485c5952..945554eb32361c 100644 --- a/deps/v8/src/compiler/backend/register-allocator.cc +++ b/deps/v8/src/compiler/backend/register-allocator.cc @@ -6,7 +6,7 @@ #include -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/base/small-vector.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/tick-counter.h" @@ -317,7 +317,6 @@ UsePositionHintType UsePosition::HintTypeForOperand( switch (op.kind()) { case InstructionOperand::CONSTANT: case InstructionOperand::IMMEDIATE: - case InstructionOperand::EXPLICIT: return UsePositionHintType::kNone; case InstructionOperand::UNALLOCATED: return UsePositionHintType::kUnresolved; @@ -797,12 +796,13 @@ LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const { return start_search->end(); } -LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) const { +LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) { UseInterval* start_search = FirstSearchIntervalForPosition(position); while (start_search->start() < position) { start_search = start_search->next(); } - return start_search->start(); + next_start_ = start_search->start(); + return next_start_; } LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const { @@ -1940,8 +1940,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { // Handle fixed input operands of second instruction. for (size_t i = 0; i < second->InputCount(); i++) { InstructionOperand* input = second->InputAt(i); - if (input->IsImmediate() || input->IsExplicit()) { - continue; // Ignore immediates and explicitly reserved registers. + if (input->IsImmediate()) { + continue; // Ignore immediates. } UnallocatedOperand* cur_input = UnallocatedOperand::cast(input); if (cur_input->HasFixedPolicy()) { @@ -2323,8 +2323,8 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, for (size_t i = 0; i < instr->InputCount(); i++) { InstructionOperand* input = instr->InputAt(i); - if (input->IsImmediate() || input->IsExplicit()) { - continue; // Ignore immediates and explicitly reserved registers. + if (input->IsImmediate()) { + continue; // Ignore immediates. } LifetimePosition use_pos; if (input->IsUnallocated() && @@ -2504,10 +2504,10 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, predecessor_hint_preference |= kNotDeferredBlockPreference; } - // - Prefer hints from allocated (or explicit) operands. + // - Prefer hints from allocated operands. // - // Already-allocated or explicit operands are typically assigned using - // the parallel moves on the last instruction. For example: + // Already-allocated operands are typically assigned using the parallel + // moves on the last instruction. For example: // // gap (v101 = [x0|R|w32]) (v100 = v101) // ArchJmp @@ -2515,7 +2515,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, // phi: v100 = v101 v102 // // We have already found the END move, so look for a matching START move - // from an allocated (or explicit) operand. + // from an allocated operand. // // Note that we cannot simply look up data()->live_ranges()[vreg] here // because the live ranges are still being built when this function is @@ -2527,7 +2527,7 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, for (MoveOperands* move : *moves) { InstructionOperand& to = move->destination(); if (predecessor_hint->Equals(to)) { - if (move->source().IsAllocated() || move->source().IsExplicit()) { + if (move->source().IsAllocated()) { predecessor_hint_preference |= kMoveIsAllocatedPreference; } break; @@ -3095,11 +3095,11 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, : RegisterAllocator(data, kind), unhandled_live_ranges_(local_zone), active_live_ranges_(local_zone), - inactive_live_ranges_(local_zone), + inactive_live_ranges_(num_registers(), InactiveLiveRangeQueue(local_zone), + local_zone), next_active_ranges_change_(LifetimePosition::Invalid()), next_inactive_ranges_change_(LifetimePosition::Invalid()) { active_live_ranges().reserve(8); - inactive_live_ranges().reserve(8); } void LinearScanAllocator::MaybeSpillPreviousRanges(LiveRange* begin_range, @@ -3143,15 +3143,15 @@ void LinearScanAllocator::MaybeUndoPreviousSplit(LiveRange* range) { } } -void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live, +void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet* to_be_live, LifetimePosition position, SpillMode spill_mode) { for (auto it = active_live_ranges().begin(); it != active_live_ranges().end();) { LiveRange* active_range = *it; TopLevelLiveRange* toplevel = (*it)->TopLevel(); - auto found = to_be_live.find({toplevel, kUnassignedRegister}); - if (found == to_be_live.end()) { + auto found = to_be_live->find({toplevel, kUnassignedRegister}); + if (found == to_be_live->end()) { // Is not contained in {to_be_live}, spill it. // Fixed registers are exempt from this. They might have been // added from inactive at the block boundary but we know that @@ -3207,7 +3207,7 @@ void LinearScanAllocator::SpillNotLiveRanges(RangeWithRegisterSet& to_be_live, } else { // This range is contained in {to_be_live}, so we can keep it. int expected_register = (*found).expected_register; - to_be_live.erase(found); + to_be_live->erase(found); if (expected_register == active_range->assigned_register()) { // Was life and in correct register, simply pass through. TRACE("Keeping %d:%d in %s\n", toplevel->vreg(), @@ -3238,31 +3238,22 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range, // give reloading registers pecedence. That way we would compute the // intersection for the entire future. LifetimePosition new_end = range->End(); - for (const auto inactive : inactive_live_ranges()) { - if (kSimpleFPAliasing || !check_fp_aliasing()) { - if (inactive->assigned_register() != reg) continue; - } else { - bool conflict = inactive->assigned_register() == reg; - if (!conflict) { - int alias_base_index = -1; - int aliases = data()->config()->GetAliases(range->representation(), reg, - inactive->representation(), - &alias_base_index); - DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); - while (aliases-- && !conflict) { - int aliased_reg = alias_base_index + aliases; - if (aliased_reg == reg) { - conflict = true; - } - } - } - if (!conflict) continue; + for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) { + if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) { + continue; } - for (auto interval = inactive->first_interval(); interval != nullptr; - interval = interval->next()) { - if (interval->start() > new_end) break; - if (interval->end() <= range->Start()) continue; - if (new_end > interval->start()) new_end = interval->start(); + for (const auto cur_inactive : inactive_live_ranges(cur_reg)) { + if (!kSimpleFPAliasing && check_fp_aliasing() && + !data()->config()->AreAliases(cur_inactive->representation(), cur_reg, + range->representation(), reg)) { + continue; + } + for (auto interval = cur_inactive->first_interval(); interval != nullptr; + interval = interval->next()) { + if (interval->start() > new_end) break; + if (interval->end() <= range->Start()) continue; + if (new_end > interval->start()) new_end = interval->start(); + } } } if (new_end != range->End()) { @@ -3275,8 +3266,8 @@ LiveRange* LinearScanAllocator::AssignRegisterOnReload(LiveRange* range, return range; } -void LinearScanAllocator::ReloadLiveRanges(RangeWithRegisterSet& to_be_live, - LifetimePosition position) { +void LinearScanAllocator::ReloadLiveRanges( + RangeWithRegisterSet const& to_be_live, LifetimePosition position) { // Assumption: All ranges in {to_be_live} are currently spilled and there are // no conflicting registers in the active ranges. // The former is ensured by SpillNotLiveRanges, the latter is by construction @@ -3558,11 +3549,17 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode, Min(updated->End(), next_active_ranges_change_); }); } - for (auto inactive : inactive_live_ranges()) { - split_conflicting(range, inactive, [this](LiveRange* updated) { - next_inactive_ranges_change_ = - Min(updated->End(), next_inactive_ranges_change_); - }); + for (int reg = 0; reg < num_registers(); ++reg) { + if ((kSimpleFPAliasing || !check_fp_aliasing()) && + reg != range->assigned_register()) { + continue; + } + for (auto inactive : inactive_live_ranges(reg)) { + split_conflicting(range, inactive, [this](LiveRange* updated) { + next_inactive_ranges_change_ = + Min(updated->End(), next_inactive_ranges_change_); + }); + } } }; if (mode() == GENERAL_REGISTERS) { @@ -3600,12 +3597,14 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode, } } else { // Remove all ranges. - for (auto it = inactive_live_ranges().begin(); - it != inactive_live_ranges().end();) { - if ((*it)->TopLevel()->IsDeferredFixed()) { - it = inactive_live_ranges().erase(it); - } else { - ++it; + for (int reg = 0; reg < num_registers(); ++reg) { + for (auto it = inactive_live_ranges(reg).begin(); + it != inactive_live_ranges(reg).end();) { + if ((*it)->TopLevel()->IsDeferredFixed()) { + it = inactive_live_ranges(reg).erase(it); + } else { + ++it; + } } } } @@ -3636,7 +3635,9 @@ bool LinearScanAllocator::HasNonDeferredPredecessor(InstructionBlock* block) { void LinearScanAllocator::AllocateRegisters() { DCHECK(unhandled_live_ranges().empty()); DCHECK(active_live_ranges().empty()); - DCHECK(inactive_live_ranges().empty()); + for (int reg = 0; reg < num_registers(); ++reg) { + DCHECK(inactive_live_ranges(reg).empty()); + } SplitAndSpillRangesDefinedByMemoryOperand(); data()->ResetSpillState(); @@ -3853,7 +3854,7 @@ void LinearScanAllocator::AllocateRegisters() { } if (!no_change_required) { - SpillNotLiveRanges(to_be_live, next_block_boundary, spill_mode); + SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode); ReloadLiveRanges(to_be_live, next_block_boundary); } @@ -3941,9 +3942,10 @@ void LinearScanAllocator::AddToActive(LiveRange* range) { void LinearScanAllocator::AddToInactive(LiveRange* range) { TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(), range->relative_id()); - inactive_live_ranges().push_back(range); next_inactive_ranges_change_ = std::min( next_inactive_ranges_change_, range->NextStartAfter(range->Start())); + DCHECK(range->HasRegisterAssigned()); + inactive_live_ranges(range->assigned_register()).insert(range); } void LinearScanAllocator::AddToUnhandled(LiveRange* range) { @@ -3966,30 +3968,36 @@ ZoneVector::iterator LinearScanAllocator::ActiveToHandled( ZoneVector::iterator LinearScanAllocator::ActiveToInactive( const ZoneVector::iterator it, LifetimePosition position) { LiveRange* range = *it; - inactive_live_ranges().push_back(range); TRACE("Moving live range %d:%d from active to inactive\n", (range)->TopLevel()->vreg(), range->relative_id()); + LifetimePosition next_active = range->NextStartAfter(position); next_inactive_ranges_change_ = - std::min(next_inactive_ranges_change_, range->NextStartAfter(position)); + std::min(next_inactive_ranges_change_, next_active); + DCHECK(range->HasRegisterAssigned()); + inactive_live_ranges(range->assigned_register()).insert(range); return active_live_ranges().erase(it); } -ZoneVector::iterator LinearScanAllocator::InactiveToHandled( - ZoneVector::iterator it) { +LinearScanAllocator::InactiveLiveRangeQueue::iterator +LinearScanAllocator::InactiveToHandled(InactiveLiveRangeQueue::iterator it) { + LiveRange* range = *it; TRACE("Moving live range %d:%d from inactive to handled\n", - (*it)->TopLevel()->vreg(), (*it)->relative_id()); - return inactive_live_ranges().erase(it); + range->TopLevel()->vreg(), range->relative_id()); + int reg = range->assigned_register(); + return inactive_live_ranges(reg).erase(it); } -ZoneVector::iterator LinearScanAllocator::InactiveToActive( - ZoneVector::iterator it, LifetimePosition position) { +LinearScanAllocator::InactiveLiveRangeQueue::iterator +LinearScanAllocator::InactiveToActive(InactiveLiveRangeQueue::iterator it, + LifetimePosition position) { LiveRange* range = *it; active_live_ranges().push_back(range); TRACE("Moving live range %d:%d from inactive to active\n", range->TopLevel()->vreg(), range->relative_id()); next_active_ranges_change_ = std::min(next_active_ranges_change_, range->NextEndAfter(position)); - return inactive_live_ranges().erase(it); + int reg = range->assigned_register(); + return inactive_live_ranges(reg).erase(it); } void LinearScanAllocator::ForwardStateTo(LifetimePosition position) { @@ -4012,18 +4020,25 @@ void LinearScanAllocator::ForwardStateTo(LifetimePosition position) { if (position >= next_inactive_ranges_change_) { next_inactive_ranges_change_ = LifetimePosition::MaxPosition(); - for (auto it = inactive_live_ranges().begin(); - it != inactive_live_ranges().end();) { - LiveRange* cur_inactive = *it; - if (cur_inactive->End() <= position) { - it = InactiveToHandled(it); - } else if (cur_inactive->Covers(position)) { - it = InactiveToActive(it, position); - } else { - next_inactive_ranges_change_ = - std::min(next_inactive_ranges_change_, - cur_inactive->NextStartAfter(position)); - ++it; + for (int reg = 0; reg < num_registers(); ++reg) { + ZoneVector reorder(data()->allocation_zone()); + for (auto it = inactive_live_ranges(reg).begin(); + it != inactive_live_ranges(reg).end();) { + LiveRange* cur_inactive = *it; + if (cur_inactive->End() <= position) { + it = InactiveToHandled(it); + } else if (cur_inactive->Covers(position)) { + it = InactiveToActive(it, position); + } else { + next_inactive_ranges_change_ = + std::min(next_inactive_ranges_change_, + cur_inactive->NextStartAfter(position)); + it = inactive_live_ranges(reg).erase(it); + reorder.push_back(cur_inactive); + } + } + for (LiveRange* range : reorder) { + inactive_live_ranges(reg).insert(range); } } } @@ -4094,31 +4109,34 @@ void LinearScanAllocator::FindFreeRegistersForRange( } } - for (LiveRange* cur_inactive : inactive_live_ranges()) { - DCHECK(cur_inactive->End() > range->Start()); - int cur_reg = cur_inactive->assigned_register(); - // No need to carry out intersections, when this register won't be - // interesting to this range anyway. - // TODO(mtrofin): extend to aliased ranges, too. - if ((kSimpleFPAliasing || !check_fp_aliasing()) && - positions[cur_reg] < range->Start()) { - continue; - } - - LifetimePosition next_intersection = cur_inactive->FirstIntersection(range); - if (!next_intersection.IsValid()) continue; - if (kSimpleFPAliasing || !check_fp_aliasing()) { - positions[cur_reg] = Min(positions[cur_reg], next_intersection); - TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg), - Min(positions[cur_reg], next_intersection).value()); - } else { - int alias_base_index = -1; - int aliases = data()->config()->GetAliases( - cur_inactive->representation(), cur_reg, rep, &alias_base_index); - DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); - while (aliases--) { - int aliased_reg = alias_base_index + aliases; - positions[aliased_reg] = Min(positions[aliased_reg], next_intersection); + for (int cur_reg = 0; cur_reg < num_regs; ++cur_reg) { + for (LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) { + DCHECK_GT(cur_inactive->End(), range->Start()); + CHECK_EQ(cur_inactive->assigned_register(), cur_reg); + // No need to carry out intersections, when this register won't be + // interesting to this range anyway. + // TODO(mtrofin): extend to aliased ranges, too. + if ((kSimpleFPAliasing || !check_fp_aliasing()) && + positions[cur_reg] <= cur_inactive->NextStart()) { + break; + } + LifetimePosition next_intersection = + cur_inactive->FirstIntersection(range); + if (!next_intersection.IsValid()) continue; + if (kSimpleFPAliasing || !check_fp_aliasing()) { + positions[cur_reg] = std::min(positions[cur_reg], next_intersection); + TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg), + positions[cur_reg].value()); + } else { + int alias_base_index = -1; + int aliases = data()->config()->GetAliases( + cur_inactive->representation(), cur_reg, rep, &alias_base_index); + DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); + while (aliases--) { + int aliased_reg = alias_base_index + aliases; + positions[aliased_reg] = + std::min(positions[aliased_reg], next_intersection); + } } } } @@ -4337,46 +4355,46 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current, } } - for (LiveRange* range : inactive_live_ranges()) { - DCHECK(range->End() > current->Start()); - int cur_reg = range->assigned_register(); - bool is_fixed = range->TopLevel()->IsFixed(); - - // Don't perform costly intersections if they are guaranteed to not update - // block_pos or use_pos. - // TODO(mtrofin): extend to aliased ranges, too. - if ((kSimpleFPAliasing || !check_fp_aliasing())) { - if (is_fixed) { - if (block_pos[cur_reg] < range->Start()) continue; - } else { - if (use_pos[cur_reg] < range->Start()) continue; + for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) { + for (LiveRange* range : inactive_live_ranges(cur_reg)) { + DCHECK(range->End() > current->Start()); + DCHECK_EQ(range->assigned_register(), cur_reg); + bool is_fixed = range->TopLevel()->IsFixed(); + + // Don't perform costly intersections if they are guaranteed to not update + // block_pos or use_pos. + // TODO(mtrofin): extend to aliased ranges, too. + if ((kSimpleFPAliasing || !check_fp_aliasing())) { + DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]); + if (block_pos[cur_reg] <= range->NextStart()) break; + if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue; } - } - LifetimePosition next_intersection = range->FirstIntersection(current); - if (!next_intersection.IsValid()) continue; + LifetimePosition next_intersection = range->FirstIntersection(current); + if (!next_intersection.IsValid()) continue; - if (kSimpleFPAliasing || !check_fp_aliasing()) { - if (is_fixed) { - block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); - use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); - } else { - use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection); - } - } else { - int alias_base_index = -1; - int aliases = data()->config()->GetAliases( - range->representation(), cur_reg, rep, &alias_base_index); - DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); - while (aliases--) { - int aliased_reg = alias_base_index + aliases; + if (kSimpleFPAliasing || !check_fp_aliasing()) { if (is_fixed) { - block_pos[aliased_reg] = - Min(block_pos[aliased_reg], next_intersection); - use_pos[aliased_reg] = - Min(block_pos[aliased_reg], use_pos[aliased_reg]); + block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); + use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); } else { - use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection); + use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection); + } + } else { + int alias_base_index = -1; + int aliases = data()->config()->GetAliases( + range->representation(), cur_reg, rep, &alias_base_index); + DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); + while (aliases--) { + int aliased_reg = alias_base_index + aliases; + if (is_fixed) { + block_pos[aliased_reg] = + Min(block_pos[aliased_reg], next_intersection); + use_pos[aliased_reg] = + Min(block_pos[aliased_reg], use_pos[aliased_reg]); + } else { + use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection); + } } } } @@ -4490,40 +4508,38 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current, it = ActiveToHandled(it); } - for (auto it = inactive_live_ranges().begin(); - it != inactive_live_ranges().end();) { - LiveRange* range = *it; - DCHECK(range->End() > current->Start()); - if (range->TopLevel()->IsFixed()) { - ++it; - continue; - } + for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) { if (kSimpleFPAliasing || !check_fp_aliasing()) { - if (range->assigned_register() != reg) { + if (cur_reg != reg) continue; + } + for (auto it = inactive_live_ranges(cur_reg).begin(); + it != inactive_live_ranges(cur_reg).end();) { + LiveRange* range = *it; + if (!kSimpleFPAliasing && check_fp_aliasing() && + !data()->config()->AreAliases(current->representation(), reg, + range->representation(), cur_reg)) { ++it; continue; } - } else { - if (!data()->config()->AreAliases(current->representation(), reg, - range->representation(), - range->assigned_register())) { + DCHECK(range->End() > current->Start()); + if (range->TopLevel()->IsFixed()) { ++it; continue; } - } - LifetimePosition next_intersection = range->FirstIntersection(current); - if (next_intersection.IsValid()) { - UsePosition* next_pos = range->NextRegisterPosition(current->Start()); - if (next_pos == nullptr) { - SpillAfter(range, split_pos, spill_mode); + LifetimePosition next_intersection = range->FirstIntersection(current); + if (next_intersection.IsValid()) { + UsePosition* next_pos = range->NextRegisterPosition(current->Start()); + if (next_pos == nullptr) { + SpillAfter(range, split_pos, spill_mode); + } else { + next_intersection = Min(next_intersection, next_pos->pos()); + SpillBetween(range, split_pos, next_intersection, spill_mode); + } + it = InactiveToHandled(it); } else { - next_intersection = Min(next_intersection, next_pos->pos()); - SpillBetween(range, split_pos, next_intersection, spill_mode); + ++it; } - it = InactiveToHandled(it); - } else { - ++it; } } } diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h index bc7b09d147dd06..17d664e5077897 100644 --- a/deps/v8/src/compiler/backend/register-allocator.h +++ b/deps/v8/src/compiler/backend/register-allocator.h @@ -335,7 +335,11 @@ class RegisterAllocationData final : public ZoneObject { return result; } - void ResetSpillState() { spill_state_.clear(); } + void ResetSpillState() { + for (auto& state : spill_state_) { + state.clear(); + } + } TickCounter* tick_counter() { return tick_counter_; } @@ -626,9 +630,10 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) { bool ShouldBeAllocatedBefore(const LiveRange* other) const; bool CanCover(LifetimePosition position) const; bool Covers(LifetimePosition position) const; - LifetimePosition NextStartAfter(LifetimePosition position) const; + LifetimePosition NextStartAfter(LifetimePosition position); LifetimePosition NextEndAfter(LifetimePosition position) const; LifetimePosition FirstIntersection(LiveRange* other) const; + LifetimePosition NextStart() const { return next_start_; } void VerifyChildStructure() const { VerifyIntervals(); @@ -689,6 +694,8 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) { // Cache the last position splintering stopped at. mutable UsePosition* splitting_pointer_; LiveRangeBundle* bundle_ = nullptr; + // Next interval start, relative to the current linear scan position. + LifetimePosition next_start_; DISALLOW_COPY_AND_ASSIGN(LiveRange); }; @@ -1298,29 +1305,39 @@ class LinearScanAllocator final : public RegisterAllocator { LifetimePosition begin_pos, LiveRange* end_range); void MaybeUndoPreviousSplit(LiveRange* range); - void SpillNotLiveRanges( - RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) - LifetimePosition position, SpillMode spill_mode); + void SpillNotLiveRanges(RangeWithRegisterSet* to_be_live, + LifetimePosition position, SpillMode spill_mode); LiveRange* AssignRegisterOnReload(LiveRange* range, int reg); - void ReloadLiveRanges( - RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) - LifetimePosition position); + void ReloadLiveRanges(RangeWithRegisterSet const& to_be_live, + LifetimePosition position); void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block); bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred( const InstructionBlock* block); bool HasNonDeferredPredecessor(InstructionBlock* block); - struct LiveRangeOrdering { + struct UnhandledLiveRangeOrdering { bool operator()(const LiveRange* a, const LiveRange* b) const { return a->ShouldBeAllocatedBefore(b); } }; - using LiveRangeQueue = ZoneMultiset; - LiveRangeQueue& unhandled_live_ranges() { return unhandled_live_ranges_; } + + struct InactiveLiveRangeOrdering { + bool operator()(const LiveRange* a, const LiveRange* b) const { + return a->NextStart() < b->NextStart(); + } + }; + + using UnhandledLiveRangeQueue = + ZoneMultiset; + using InactiveLiveRangeQueue = + ZoneMultiset; + UnhandledLiveRangeQueue& unhandled_live_ranges() { + return unhandled_live_ranges_; + } ZoneVector& active_live_ranges() { return active_live_ranges_; } - ZoneVector& inactive_live_ranges() { - return inactive_live_ranges_; + InactiveLiveRangeQueue& inactive_live_ranges(int reg) { + return inactive_live_ranges_[reg]; } void SetLiveRangeAssignedRegister(LiveRange* range, int reg); @@ -1333,10 +1350,10 @@ class LinearScanAllocator final : public RegisterAllocator { ZoneVector::iterator it); ZoneVector::iterator ActiveToInactive( ZoneVector::iterator it, LifetimePosition position); - ZoneVector::iterator InactiveToHandled( - ZoneVector::iterator it); - ZoneVector::iterator InactiveToActive( - ZoneVector::iterator it, LifetimePosition position); + InactiveLiveRangeQueue::iterator InactiveToHandled( + InactiveLiveRangeQueue::iterator it); + InactiveLiveRangeQueue::iterator InactiveToActive( + InactiveLiveRangeQueue::iterator it, LifetimePosition position); void ForwardStateTo(LifetimePosition position); @@ -1386,9 +1403,9 @@ class LinearScanAllocator final : public RegisterAllocator { void PrintRangeOverview(std::ostream& os); - LiveRangeQueue unhandled_live_ranges_; + UnhandledLiveRangeQueue unhandled_live_ranges_; ZoneVector active_live_ranges_; - ZoneVector inactive_live_ranges_; + ZoneVector inactive_live_ranges_; // Approximate at what position the set of ranges will change next. // Used to avoid scanning for updates even if none are present. diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index 4c2d862fc44a1b..d0f97eca57b08c 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -1246,9 +1246,8 @@ void AdjustStackPointerForTailCall( } } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, Instruction* instr, - S390OperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, + S390OperandConverter const& i) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index 7f3277fc68d831..7b002fe6d3bdfa 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/base/adapters.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -436,68 +435,64 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode, #endif template -void GenerateRightOperands( - InstructionSelector* selector, Node* node, Node* right, - InstructionCode& opcode, // NOLINT(runtime/references) - OperandModes& operand_mode, // NOLINT(runtime/references) - InstructionOperand* inputs, - size_t& input_count, // NOLINT(runtime/references) - CanCombineWithLoad canCombineWithLoad) { +void GenerateRightOperands(InstructionSelector* selector, Node* node, + Node* right, InstructionCode* opcode, + OperandModes* operand_mode, + InstructionOperand* inputs, size_t* input_count, + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); - if ((operand_mode & OperandMode::kAllowImmediate) && - g.CanBeImmediate(right, operand_mode)) { - inputs[input_count++] = g.UseImmediate(right); + if ((*operand_mode & OperandMode::kAllowImmediate) && + g.CanBeImmediate(right, *operand_mode)) { + inputs[(*input_count)++] = g.UseImmediate(right); // Can only be RI or RRI - operand_mode &= OperandMode::kAllowImmediate; - } else if (operand_mode & OperandMode::kAllowMemoryOperand) { + *operand_mode &= OperandMode::kAllowImmediate; + } else if (*operand_mode & OperandMode::kAllowMemoryOperand) { NodeMatcher mright(right); if (mright.IsLoad() && selector->CanCover(node, right) && canCombineWithLoad(SelectLoadOpcode(right))) { AddressingMode mode = g.GetEffectiveAddressMemoryOperand( - right, inputs, &input_count, OpcodeImmMode(opcode)); - opcode |= AddressingModeField::encode(mode); - operand_mode &= ~OperandMode::kAllowImmediate; - if (operand_mode & OperandMode::kAllowRM) - operand_mode &= ~OperandMode::kAllowDistinctOps; - } else if (operand_mode & OperandMode::kAllowRM) { - DCHECK(!(operand_mode & OperandMode::kAllowRRM)); - inputs[input_count++] = g.UseAnyExceptImmediate(right); + right, inputs, input_count, OpcodeImmMode(*opcode)); + *opcode |= AddressingModeField::encode(mode); + *operand_mode &= ~OperandMode::kAllowImmediate; + if (*operand_mode & OperandMode::kAllowRM) + *operand_mode &= ~OperandMode::kAllowDistinctOps; + } else if (*operand_mode & OperandMode::kAllowRM) { + DCHECK(!(*operand_mode & OperandMode::kAllowRRM)); + inputs[(*input_count)++] = g.UseAnyExceptImmediate(right); // Can not be Immediate - operand_mode &= + *operand_mode &= ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps; - } else if (operand_mode & OperandMode::kAllowRRM) { - DCHECK(!(operand_mode & OperandMode::kAllowRM)); - inputs[input_count++] = g.UseAnyExceptImmediate(right); + } else if (*operand_mode & OperandMode::kAllowRRM) { + DCHECK(!(*operand_mode & OperandMode::kAllowRM)); + inputs[(*input_count)++] = g.UseAnyExceptImmediate(right); // Can not be Immediate - operand_mode &= ~OperandMode::kAllowImmediate; + *operand_mode &= ~OperandMode::kAllowImmediate; } else { UNREACHABLE(); } } else { - inputs[input_count++] = g.UseRegister(right); + inputs[(*input_count)++] = g.UseRegister(right); // Can only be RR or RRR - operand_mode &= OperandMode::kAllowRRR; + *operand_mode &= OperandMode::kAllowRRR; } } template -void GenerateBinOpOperands( - InstructionSelector* selector, Node* node, Node* left, Node* right, - InstructionCode& opcode, // NOLINT(runtime/references) - OperandModes& operand_mode, // NOLINT(runtime/references) - InstructionOperand* inputs, - size_t& input_count, // NOLINT(runtime/references) - CanCombineWithLoad canCombineWithLoad) { +void GenerateBinOpOperands(InstructionSelector* selector, Node* node, + Node* left, Node* right, InstructionCode* opcode, + OperandModes* operand_mode, + InstructionOperand* inputs, size_t* input_count, + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); // left is always register InstructionOperand const left_input = g.UseRegister(left); - inputs[input_count++] = left_input; + inputs[(*input_count)++] = left_input; if (left == right) { - inputs[input_count++] = left_input; + inputs[(*input_count)++] = left_input; // Can only be RR or RRR - operand_mode &= OperandMode::kAllowRRR; + *operand_mode &= OperandMode::kAllowRRR; } else { GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs, input_count, canCombineWithLoad); @@ -575,8 +570,8 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node, size_t output_count = 0; Node* input = node->InputAt(0); - GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs, - input_count, canCombineWithLoad); + GenerateRightOperands(selector, node, input, &opcode, &operand_mode, inputs, + &input_count, canCombineWithLoad); bool input_is_word32 = ProduceWord32Result(input); @@ -631,8 +626,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node, std::swap(left, right); } - GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode, - inputs, input_count, canCombineWithLoad); + GenerateBinOpOperands(selector, node, left, right, &opcode, &operand_mode, + inputs, &input_count, canCombineWithLoad); bool left_is_word32 = ProduceWord32Result(left); @@ -1175,6 +1170,12 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + // TODO(miladfar): Implement the s390 selector for reversing SIMD bytes. + // Check if the input node is a Load and do a Load Reverse at once. + UNIMPLEMENTED(); +} + template static inline bool TryMatchNegFromSub(InstructionSelector* selector, Node* node) { @@ -2691,6 +2692,8 @@ void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); } diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index a4f82b153b6387..44da872f26d0a4 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -361,7 +361,6 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap { void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - X64OperandConverter& i, // NOLINT(runtime/references) int pc) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); @@ -370,9 +369,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, } } -void EmitWordLoadPoisoningIfNeeded( - CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - X64OperandConverter& i) { // NOLINT(runtime/references) +void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, + InstructionCode opcode, Instruction* instr, + X64OperandConverter const& i) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -1876,30 +1875,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg); break; case kX64Movsxbl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxbl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxbl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxbl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxbq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxbq); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxbq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxbq); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movb: { - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); size_t index = 0; Operand operand = i.MemoryOperand(&index); if (HasImmediateInput(instr, index)) { @@ -1911,29 +1910,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Movsxwl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxwl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxwl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxwl); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxwq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxwq); break; case kX64Movzxwq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxwq); __ AssertZeroExtended(i.OutputRegister()); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movw: { - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); size_t index = 0; Operand operand = i.MemoryOperand(&index); if (HasImmediateInput(instr, index)) { @@ -1945,7 +1944,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Movl: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { if (HasAddressingMode(instr)) { __ movl(i.OutputRegister(), i.MemoryOperand()); @@ -1969,7 +1968,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxlq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxlq); EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; @@ -2021,7 +2020,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64Movq: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { __ movq(i.OutputRegister(), i.MemoryOperand()); } else { @@ -2036,7 +2035,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movss: - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { __ Movss(i.OutputDoubleRegister(), i.MemoryOperand()); } else { @@ -2046,7 +2045,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kX64Movsd: { - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); @@ -2069,7 +2068,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64Movdqu: { CpuFeatureScope sse_scope(tasm(), SSSE3); - EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); + EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand()); } else { @@ -2293,6 +2292,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ movq(i.OutputDoubleRegister(), kScratchRegister); break; } + case kX64F64x2Sqrt: { + __ Sqrtpd(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kX64F64x2Add: { ASSEMBLE_SSE_BINOP(addpd); break; @@ -2350,22 +2353,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64F64x2Eq: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64F64x2Ne: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64F64x2Lt: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64F64x2Le: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Qfma: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movapd(tmp, i.InputSimd128Register(2)); + __ mulpd(tmp, i.InputSimd128Register(1)); + __ addpd(i.OutputSimd128Register(), tmp); + } + break; + } + case kX64F64x2Qfms: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movapd(tmp, i.InputSimd128Register(2)); + __ mulpd(tmp, i.InputSimd128Register(1)); + __ subpd(i.OutputSimd128Register(), tmp); + } break; } // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below @@ -2445,6 +2474,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kX64F32x4Sqrt: { + __ sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0)); + break; + } case kX64F32x4RecipApprox: { __ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; @@ -2538,6 +2571,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } + case kX64F32x4Qfma: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movaps(tmp, i.InputSimd128Register(2)); + __ mulps(tmp, i.InputSimd128Register(1)); + __ addps(i.OutputSimd128Register(), tmp); + } + break; + } + case kX64F32x4Qfms: { + if (CpuFeatures::IsSupported(FMA3)) { + CpuFeatureScope fma3_scope(tasm(), FMA3); + __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1), + i.InputSimd128Register(2)); + } else { + XMMRegister tmp = i.TempSimd128Register(0); + __ movaps(tmp, i.InputSimd128Register(2)); + __ mulps(tmp, i.InputSimd128Register(1)); + __ subps(i.OutputSimd128Register(), tmp); + } + break; + } case kX64I64x2Splat: { CpuFeatureScope sse_scope(tasm(), SSE3); XMMRegister dst = i.OutputSimd128Register(); @@ -2577,7 +2636,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I64x2Shl: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 8. + __ andq(shift, Immediate(63)); + __ movq(tmp, shift); __ psllq(i.OutputSimd128Register(), tmp); break; } @@ -2588,6 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); Register tmp = i.ToRegister(instr->TempAt(0)); + // Modulo 64 not required as sarq_cl will mask cl to 6 bits. // lower quadword __ pextrq(tmp, src, 0x0); @@ -2640,15 +2703,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (CpuFeatures::IsSupported(SSE4_2)) { CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(1); + XMMRegister src0 = i.InputSimd128Register(0); + XMMRegister src1 = i.InputSimd128Register(1); XMMRegister tmp = i.TempSimd128Register(0); - DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + DCHECK_EQ(tmp, xmm0); - __ movaps(tmp, src); - __ pcmpgtq(src, dst); - __ blendvpd(tmp, dst); // implicit use of xmm0 as mask - __ movaps(dst, tmp); + __ movaps(tmp, src1); + __ pcmpgtq(tmp, src0); + __ movaps(dst, src1); + __ blendvpd(dst, src0); // implicit use of xmm0 as mask } else { CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); @@ -2689,11 +2752,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister src = i.InputSimd128Register(1); XMMRegister tmp = i.TempSimd128Register(0); DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + DCHECK_EQ(tmp, xmm0); __ movaps(tmp, src); - __ pcmpgtq(src, dst); - __ blendvpd(dst, tmp); // implicit use of xmm0 as mask + __ pcmpgtq(tmp, dst); + __ blendvpd(dst, src); // implicit use of xmm0 as mask break; } case kX64I64x2Eq: { @@ -2732,7 +2795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I64x2ShrU: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 64. + __ andq(shift, Immediate(63)); + __ movq(tmp, shift); __ psrlq(i.OutputSimd128Register(), tmp); break; } @@ -2740,24 +2806,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope_4_2(tasm(), SSE4_2); CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); - XMMRegister src = i.InputSimd128Register(1); - XMMRegister src_tmp = i.TempSimd128Register(0); - XMMRegister dst_tmp = i.TempSimd128Register(1); - DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + XMMRegister src0 = i.InputSimd128Register(0); + XMMRegister src1 = i.InputSimd128Register(1); + XMMRegister tmp0 = i.TempSimd128Register(0); + XMMRegister tmp1 = i.TempSimd128Register(1); + DCHECK_EQ(tmp1, xmm0); - __ movaps(src_tmp, src); - __ movaps(dst_tmp, dst); + __ movaps(dst, src1); + __ movaps(tmp0, src0); - __ pcmpeqd(src, src); - __ psllq(src, 63); + __ pcmpeqd(tmp1, tmp1); + __ psllq(tmp1, 63); - __ pxor(dst_tmp, src); - __ pxor(src, src_tmp); + __ pxor(tmp0, tmp1); + __ pxor(tmp1, dst); - __ pcmpgtq(src, dst_tmp); - __ blendvpd(src_tmp, dst); // implicit use of xmm0 as mask - __ movaps(dst, src_tmp); + __ pcmpgtq(tmp1, tmp0); + __ blendvpd(dst, src0); // implicit use of xmm0 as mask break; } case kX64I64x2MaxU: { @@ -2765,22 +2830,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope_4_1(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - XMMRegister src_tmp = i.TempSimd128Register(0); - XMMRegister dst_tmp = i.TempSimd128Register(1); + XMMRegister dst_tmp = i.TempSimd128Register(0); + XMMRegister tmp = i.TempSimd128Register(1); DCHECK_EQ(dst, i.InputSimd128Register(0)); - DCHECK_EQ(src, xmm0); + DCHECK_EQ(tmp, xmm0); - __ movaps(src_tmp, src); __ movaps(dst_tmp, dst); - __ pcmpeqd(src, src); - __ psllq(src, 63); + __ pcmpeqd(tmp, tmp); + __ psllq(tmp, 63); - __ pxor(dst_tmp, src); - __ pxor(src, src_tmp); + __ pxor(dst_tmp, tmp); + __ pxor(tmp, src); - __ pcmpgtq(src, dst_tmp); - __ blendvpd(dst, src_tmp); // implicit use of xmm0 as mask + __ pcmpgtq(tmp, dst_tmp); + __ blendvpd(dst, src); // implicit use of xmm0 as mask break; } case kX64I64x2GtU: { @@ -2820,11 +2884,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I32x4Splat: { XMMRegister dst = i.OutputSimd128Register(); if (HasRegisterInput(instr, 0)) { - __ movd(dst, i.InputRegister(0)); + __ Movd(dst, i.InputRegister(0)); } else { - __ movd(dst, i.InputOperand(0)); + __ Movd(dst, i.InputOperand(0)); } - __ pshufd(dst, dst, 0x0); + __ Pshufd(dst, dst, 0x0); break; } case kX64I32x4ExtractLane: { @@ -2878,28 +2942,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(0); if (dst == src) { - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psignd(dst, kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psignd(dst, kScratchDoubleReg); } else { - __ pxor(dst, dst); - __ psubd(dst, src); + __ Pxor(dst, dst); + __ Psubd(dst, src); } break; } case kX64I32x4Shl: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); - __ pslld(i.OutputSimd128Register(), tmp); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ andq(shift, Immediate(31)); + __ Movq(tmp, shift); + __ Pslld(i.OutputSimd128Register(), tmp); break; } case kX64I32x4ShrS: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); - __ psrad(i.OutputSimd128Register(), tmp); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ andq(shift, Immediate(31)); + __ Movq(tmp, shift); + __ Psrad(i.OutputSimd128Register(), tmp); break; } case kX64I32x4Add: { - __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4AddHoriz: { @@ -2908,45 +2978,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I32x4Sub: { - __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4Mul: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4MinS: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4MaxS: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4Eq: { - __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4Ne: { XMMRegister tmp = i.TempSimd128Register(0); - __ pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); - __ pcmpeqd(tmp, tmp); - __ pxor(i.OutputSimd128Register(), tmp); + __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pcmpeqd(tmp, tmp); + __ Pxor(i.OutputSimd128Register(), tmp); break; } case kX64I32x4GtS: { - __ pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4GeS: { CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - __ pminsd(dst, src); - __ pcmpeqd(dst, src); + __ Pminsd(dst, src); + __ Pcmpeqd(dst, src); break; } case kX64I32x4UConvertF32x4: { @@ -2992,18 +3062,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I32x4ShrU: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); - __ psrld(i.OutputSimd128Register(), tmp); + Register shift = i.InputRegister(1); + // Take shift value modulo 32. + __ andq(shift, Immediate(31)); + __ Movq(tmp, shift); + __ Psrld(i.OutputSimd128Register(), tmp); break; } case kX64I32x4MinU: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4MaxU: { CpuFeatureScope sse_scope(tasm(), SSE4_1); - __ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I32x4GtU: { @@ -3011,18 +3084,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); XMMRegister tmp = i.TempSimd128Register(0); - __ pmaxud(dst, src); - __ pcmpeqd(dst, src); - __ pcmpeqd(tmp, tmp); - __ pxor(dst, tmp); + __ Pmaxud(dst, src); + __ Pcmpeqd(dst, src); + __ Pcmpeqd(tmp, tmp); + __ Pxor(dst, tmp); break; } case kX64I32x4GeU: { CpuFeatureScope sse_scope(tasm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = i.InputSimd128Register(1); - __ pminud(dst, src); - __ pcmpeqd(dst, src); + __ Pminud(dst, src); + __ Pcmpeqd(dst, src); break; } case kX64S128Zero: { @@ -3044,17 +3117,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64I16x8ExtractLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); Register dst = i.OutputRegister(); - __ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsxwl(dst, dst); + __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1)); break; } case kX64I16x8ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); if (HasRegisterInput(instr, 2)) { - __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2), + __ Pinsrw(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { - __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + __ Pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); } break; } @@ -3085,13 +3157,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I16x8Shl: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ andq(shift, Immediate(15)); + __ movq(tmp, shift); __ psllw(i.OutputSimd128Register(), tmp); break; } case kX64I16x8ShrS: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ andq(shift, Immediate(15)); + __ movq(tmp, shift); __ psraw(i.OutputSimd128Register(), tmp); break; } @@ -3173,7 +3251,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kX64I16x8ShrU: { XMMRegister tmp = i.TempSimd128Register(0); - __ movq(tmp, i.InputRegister(1)); + Register shift = i.InputRegister(1); + // Take shift value modulo 16. + __ andq(shift, Immediate(15)); + __ movq(tmp, shift); __ psrlw(i.OutputSimd128Register(), tmp); break; } @@ -3230,28 +3311,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSSE3); XMMRegister dst = i.OutputSimd128Register(); if (HasRegisterInput(instr, 0)) { - __ movd(dst, i.InputRegister(0)); + __ Movd(dst, i.InputRegister(0)); } else { - __ movd(dst, i.InputOperand(0)); + __ Movd(dst, i.InputOperand(0)); } - __ xorps(kScratchDoubleReg, kScratchDoubleReg); - __ pshufb(dst, kScratchDoubleReg); + __ Xorps(kScratchDoubleReg, kScratchDoubleReg); + __ Pshufb(dst, kScratchDoubleReg); break; } case kX64I8x16ExtractLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); Register dst = i.OutputRegister(); - __ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1)); - __ movsxbl(dst, dst); + __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1)); break; } case kX64I8x16ReplaceLane: { CpuFeatureScope sse_scope(tasm(), SSE4_1); if (HasRegisterInput(instr, 2)) { - __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2), + __ Pinsrb(i.OutputSimd128Register(), i.InputRegister(2), i.InputInt8(1)); } else { - __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + __ Pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); } break; } @@ -3279,15 +3359,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // Temp registers for shift mask andadditional moves to XMM registers. Register tmp = i.ToRegister(instr->TempAt(0)); XMMRegister tmp_simd = i.TempSimd128Register(1); + Register shift = i.InputRegister(1); // Mask off the unwanted bits before word-shifting. __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg); - __ movq(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ andq(shift, Immediate(7)); + __ movq(tmp, shift); __ addq(tmp, Immediate(8)); __ movq(tmp_simd, tmp); __ psrlw(kScratchDoubleReg, tmp_simd); __ packuswb(kScratchDoubleReg, kScratchDoubleReg); __ pand(dst, kScratchDoubleReg); - __ movq(tmp_simd, i.InputRegister(1)); + __ movq(tmp_simd, shift); __ psllw(dst, tmp_simd); break; } @@ -3302,6 +3385,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpcklbw(dst, dst); // Prepare shift value __ movq(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ andq(tmp, Immediate(7)); __ addq(tmp, Immediate(8)); __ movq(tmp_simd, tmp); __ psraw(kScratchDoubleReg, tmp_simd); @@ -3414,6 +3499,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ punpcklbw(dst, dst); // Prepare shift value __ movq(tmp, i.InputRegister(1)); + // Take shift value modulo 8. + __ andq(tmp, Immediate(7)); __ addq(tmp, Immediate(8)); __ movq(tmp_simd, tmp); __ psrlw(kScratchDoubleReg, tmp_simd); @@ -3422,7 +3509,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I8x16AddSaturateU: { - __ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ Paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } case kX64I8x16SubSaturateU: { @@ -3487,10 +3574,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64S128Select: { // Mask used here is stored in dst. XMMRegister dst = i.OutputSimd128Register(); - __ movaps(kScratchDoubleReg, i.InputSimd128Register(1)); - __ xorps(kScratchDoubleReg, i.InputSimd128Register(2)); - __ andps(dst, kScratchDoubleReg); - __ xorps(dst, i.InputSimd128Register(2)); + __ Movaps(kScratchDoubleReg, i.InputSimd128Register(1)); + __ Xorps(kScratchDoubleReg, i.InputSimd128Register(2)); + __ Andps(dst, kScratchDoubleReg); + __ Xorps(dst, i.InputSimd128Register(2)); + break; + } + case kX64S8x16Swizzle: { + CpuFeatureScope sse_scope(tasm(), SSSE3); + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister mask = i.TempSimd128Register(0); + + // Out-of-range indices should return 0, add 112 so that any value > 15 + // saturates to 128 (top bit set), so pshufb will zero that lane. + __ Move(mask, static_cast(0x70707070)); + __ Pshufd(mask, mask, 0x0); + __ Paddusb(mask, i.InputSimd128Register(1)); + __ Pshufb(dst, mask); break; } case kX64S8x16Shuffle: { @@ -3507,10 +3608,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } SetupShuffleMaskOnStack(tasm(), mask); - __ pshufb(dst, Operand(rsp, 0)); + __ Pshufb(dst, Operand(rsp, 0)); } else { // two input operands DCHECK_EQ(6, instr->InputCount()); - ASSEMBLE_SIMD_INSTR(movups, kScratchDoubleReg, 0); + ASSEMBLE_SIMD_INSTR(Movups, kScratchDoubleReg, 0); uint32_t mask[4] = {}; for (int j = 5; j > 1; j--) { uint32_t lanes = i.InputUint32(j); @@ -3520,13 +3621,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } } SetupShuffleMaskOnStack(tasm(), mask); - __ pshufb(kScratchDoubleReg, Operand(rsp, 0)); + __ Pshufb(kScratchDoubleReg, Operand(rsp, 0)); uint32_t mask1[4] = {}; if (instr->InputAt(1)->IsSimd128Register()) { XMMRegister src1 = i.InputSimd128Register(1); if (src1 != dst) __ movups(dst, src1); } else { - __ movups(dst, i.InputOperand(1)); + __ Movups(dst, i.InputOperand(1)); } for (int j = 5; j > 1; j--) { uint32_t lanes = i.InputUint32(j); @@ -3536,8 +3637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } } SetupShuffleMaskOnStack(tasm(), mask1); - __ pshufb(dst, Operand(rsp, 0)); - __ por(dst, kScratchDoubleReg); + __ Pshufb(dst, Operand(rsp, 0)); + __ Por(dst, kScratchDoubleReg); } __ movq(rsp, tmp); break; diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h index 8a0a45a916afc6..e390c6922c8001 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h +++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h @@ -160,6 +160,7 @@ namespace compiler { V(X64F64x2ReplaceLane) \ V(X64F64x2Abs) \ V(X64F64x2Neg) \ + V(X64F64x2Sqrt) \ V(X64F64x2Add) \ V(X64F64x2Sub) \ V(X64F64x2Mul) \ @@ -170,6 +171,8 @@ namespace compiler { V(X64F64x2Ne) \ V(X64F64x2Lt) \ V(X64F64x2Le) \ + V(X64F64x2Qfma) \ + V(X64F64x2Qfms) \ V(X64F32x4Splat) \ V(X64F32x4ExtractLane) \ V(X64F32x4ReplaceLane) \ @@ -177,6 +180,7 @@ namespace compiler { V(X64F32x4UConvertI32x4) \ V(X64F32x4Abs) \ V(X64F32x4Neg) \ + V(X64F32x4Sqrt) \ V(X64F32x4RecipApprox) \ V(X64F32x4RecipSqrtApprox) \ V(X64F32x4Add) \ @@ -190,6 +194,8 @@ namespace compiler { V(X64F32x4Ne) \ V(X64F32x4Lt) \ V(X64F32x4Le) \ + V(X64F32x4Qfma) \ + V(X64F32x4Qfms) \ V(X64I64x2Splat) \ V(X64I64x2ExtractLane) \ V(X64I64x2ReplaceLane) \ @@ -300,6 +306,7 @@ namespace compiler { V(X64S128Or) \ V(X64S128Xor) \ V(X64S128Select) \ + V(X64S8x16Swizzle) \ V(X64S8x16Shuffle) \ V(X64S32x4Swizzle) \ V(X64S32x4Shuffle) \ diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc index e9fa450c3820e7..28a935fd9164fe 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc @@ -129,6 +129,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F64x2ReplaceLane: case kX64F64x2Abs: case kX64F64x2Neg: + case kX64F64x2Sqrt: case kX64F64x2Add: case kX64F64x2Sub: case kX64F64x2Mul: @@ -139,6 +140,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F64x2Ne: case kX64F64x2Lt: case kX64F64x2Le: + case kX64F64x2Qfma: + case kX64F64x2Qfms: case kX64F32x4Splat: case kX64F32x4ExtractLane: case kX64F32x4ReplaceLane: @@ -148,6 +151,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F32x4RecipSqrtApprox: case kX64F32x4Abs: case kX64F32x4Neg: + case kX64F32x4Sqrt: case kX64F32x4Add: case kX64F32x4AddHoriz: case kX64F32x4Sub: @@ -159,6 +163,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F32x4Ne: case kX64F32x4Lt: case kX64F32x4Le: + case kX64F32x4Qfma: + case kX64F32x4Qfms: case kX64I64x2Splat: case kX64I64x2ExtractLane: case kX64I64x2ReplaceLane: @@ -275,6 +281,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64S1x4AllTrue: case kX64S1x8AnyTrue: case kX64S1x8AllTrue: + case kX64S8x16Swizzle: case kX64S8x16Shuffle: case kX64S32x4Swizzle: case kX64S32x4Shuffle: diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index 5379074bac8666..f5d05fdd85a384 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -4,7 +4,7 @@ #include -#include "src/base/adapters.h" +#include "src/base/iterator.h" #include "src/base/overflowing-math.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" @@ -250,9 +250,21 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) { #else UNREACHABLE(); #endif +#ifdef V8_COMPRESS_POINTERS + case MachineRepresentation::kTaggedSigned: + opcode = kX64MovqDecompressTaggedSigned; + break; + case MachineRepresentation::kTaggedPointer: + opcode = kX64MovqDecompressTaggedPointer; + break; + case MachineRepresentation::kTagged: + opcode = kX64MovqDecompressAnyTagged; + break; +#else case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. case MachineRepresentation::kTagged: // Fall through. +#endif case MachineRepresentation::kWord64: opcode = kX64Movq; break; @@ -288,7 +300,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) { #endif case MachineRepresentation::kTaggedSigned: // Fall through. case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kTagged: + return kX64MovqCompressTagged; case MachineRepresentation::kWord64: return kX64Movq; case MachineRepresentation::kSimd128: // Fall through. @@ -875,6 +888,10 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { + UNREACHABLE(); +} + void InstructionSelector::VisitInt32Add(Node* node) { X64OperandGenerator g(this); @@ -1843,17 +1860,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, node->op()->HasProperty(Operator::kCommutative)); } -// Shared routine for 64-bit word comparison operations. -void VisitWord64Compare(InstructionSelector* selector, Node* node, - FlagsContinuation* cont) { - X64OperandGenerator g(selector); +void VisitWord64EqualImpl(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { if (selector->CanUseRootsRegister()) { + X64OperandGenerator g(selector); const RootsTable& roots_table = selector->isolate()->roots_table(); RootIndex root_index; HeapObjectBinopMatcher m(node); if (m.right().HasValue() && roots_table.IsRootHandle(m.right().Value(), &root_index)) { - if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute(); InstructionCode opcode = kX64Cmp | AddressingModeField::encode(kMode_Root); return VisitCompare( @@ -1861,18 +1876,30 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node, g.TempImmediate( TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), g.UseRegister(m.left().node()), cont); - } else if (m.left().HasValue() && - roots_table.IsRootHandle(m.left().Value(), &root_index)) { + } + } + VisitWordCompare(selector, node, kX64Cmp, cont); +} + +void VisitWord32EqualImpl(InstructionSelector* selector, Node* node, + FlagsContinuation* cont) { + if (COMPRESS_POINTERS_BOOL && selector->CanUseRootsRegister()) { + X64OperandGenerator g(selector); + const RootsTable& roots_table = selector->isolate()->roots_table(); + RootIndex root_index; + CompressedHeapObjectBinopMatcher m(node); + if (m.right().HasValue() && + roots_table.IsRootHandle(m.right().Value(), &root_index)) { InstructionCode opcode = - kX64Cmp | AddressingModeField::encode(kMode_Root); + kX64Cmp32 | AddressingModeField::encode(kMode_Root); return VisitCompare( selector, opcode, g.TempImmediate( TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), - g.UseRegister(m.right().node()), cont); + g.UseRegister(m.left().node()), cont); } } - VisitWordCompare(selector, node, kX64Cmp, cont); + VisitWordCompare(selector, node, kX64Cmp32, cont); } // Shared routine for comparison with zero. @@ -2048,7 +2075,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, switch (value->opcode()) { case IrOpcode::kWord32Equal: cont->OverwriteAndNegateIfEqual(kEqual); - return VisitWordCompare(this, value, kX64Cmp32, cont); + return VisitWord32EqualImpl(this, value, cont); case IrOpcode::kInt32LessThan: cont->OverwriteAndNegateIfEqual(kSignedLessThan); return VisitWordCompare(this, value, kX64Cmp32, cont); @@ -2071,7 +2098,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, if (CanCover(user, value)) { switch (value->opcode()) { case IrOpcode::kInt64Sub: - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kWord64And: return VisitWordCompare(this, value, kX64Test, cont); default: @@ -2080,20 +2107,20 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, } return VisitCompareZero(this, user, value, kX64Cmp, cont); } - return VisitWord64Compare(this, value, cont); + return VisitWord64EqualImpl(this, value, cont); } case IrOpcode::kInt64LessThan: cont->OverwriteAndNegateIfEqual(kSignedLessThan); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kInt64LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kUint64LessThan: cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kUint64LessThanOrEqual: cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitWord64Compare(this, value, cont); + return VisitWordCompare(this, value, kX64Cmp, cont); case IrOpcode::kFloat32Equal: cont->OverwriteAndNegateIfEqual(kUnorderedEqual); return VisitFloat32Compare(this, value, cont); @@ -2221,7 +2248,7 @@ void InstructionSelector::VisitWord32Equal(Node* const node) { if (m.right().Is(0)) { return VisitWordCompareZero(m.node(), m.left().node(), &cont); } - VisitWordCompare(this, node, kX64Cmp32, &cont); + VisitWord32EqualImpl(this, node, &cont); } void InstructionSelector::VisitInt32LessThan(Node* node) { @@ -2246,7 +2273,7 @@ void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { VisitWordCompare(this, node, kX64Cmp32, &cont); } -void InstructionSelector::VisitWord64Equal(Node* const node) { +void InstructionSelector::VisitWord64Equal(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); Int64BinopMatcher m(node); if (m.right().Is(0)) { @@ -2256,7 +2283,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) { if (CanCover(user, value)) { switch (value->opcode()) { case IrOpcode::kInt64Sub: - return VisitWord64Compare(this, value, &cont); + return VisitWordCompare(this, value, kX64Cmp, &cont); case IrOpcode::kWord64And: return VisitWordCompare(this, value, kX64Test, &cont); default: @@ -2264,7 +2291,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) { } } } - VisitWord64Compare(this, node, &cont); + VisitWord64EqualImpl(this, node, &cont); } void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { @@ -2287,24 +2314,24 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { void InstructionSelector::VisitInt64LessThan(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitUint64LessThan(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); - VisitWord64Compare(this, node, &cont); + VisitWordCompare(this, node, kX64Cmp, &cont); } void InstructionSelector::VisitFloat32Equal(Node* node) { @@ -2685,9 +2712,11 @@ VISIT_ATOMIC_BINOP(Xor) V(I8x16GtU) #define SIMD_UNOP_LIST(V) \ + V(F64x2Sqrt) \ V(F32x4SConvertI32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ + V(F32x4Sqrt) \ V(F32x4RecipApprox) \ V(F32x4RecipSqrtApprox) \ V(I64x2Neg) \ @@ -2872,6 +2901,27 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { g.UseRegister(node->InputAt(0))); } +#define VISIT_SIMD_QFMOP(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + if (CpuFeatures::IsSupported(FMA3)) { \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ + g.UseRegister(node->InputAt(2))); \ + } else { \ + InstructionOperand temps[] = {g.TempSimd128Register()}; \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseUniqueRegister(node->InputAt(0)), \ + g.UseUniqueRegister(node->InputAt(1)), \ + g.UseRegister(node->InputAt(2)), arraysize(temps), temps); \ + } \ + } +VISIT_SIMD_QFMOP(F64x2Qfma) +VISIT_SIMD_QFMOP(F64x2Qfms) +VISIT_SIMD_QFMOP(F32x4Qfma) +VISIT_SIMD_QFMOP(F32x4Qfms) +#undef VISIT_SIMD_QFMOP + void InstructionSelector::VisitI64x2ShrS(Node* node) { X64OperandGenerator g(this); InstructionOperand temps[] = {g.TempRegister()}; @@ -2893,10 +2943,10 @@ void InstructionSelector::VisitI64x2Mul(Node* node) { void InstructionSelector::VisitI64x2MinS(Node* node) { X64OperandGenerator g(this); if (this->IsSupported(SSE4_2)) { - InstructionOperand temps[] = {g.TempSimd128Register()}; - Emit(kX64I64x2MinS, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), - arraysize(temps), temps); + InstructionOperand temps[] = {g.TempFpRegister(xmm0)}; + Emit(kX64I64x2MinS, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } else { InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister(), g.TempRegister()}; @@ -2908,27 +2958,27 @@ void InstructionSelector::VisitI64x2MinS(Node* node) { void InstructionSelector::VisitI64x2MaxS(Node* node) { X64OperandGenerator g(this); - InstructionOperand temps[] = {g.TempSimd128Register()}; + InstructionOperand temps[] = {g.TempFpRegister(xmm0)}; Emit(kX64I64x2MaxS, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } void InstructionSelector::VisitI64x2MinU(Node* node) { X64OperandGenerator g(this); InstructionOperand temps[] = {g.TempSimd128Register(), - g.TempSimd128Register()}; - Emit(kX64I64x2MinU, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), - arraysize(temps), temps); + g.TempFpRegister(xmm0)}; + Emit(kX64I64x2MinU, g.DefineAsRegister(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } void InstructionSelector::VisitI64x2MaxU(Node* node) { X64OperandGenerator g(this); InstructionOperand temps[] = {g.TempSimd128Register(), - g.TempSimd128Register()}; + g.TempFpRegister(xmm0)}; Emit(kX64I64x2MaxU, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseFixed(node->InputAt(1), xmm0), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); } @@ -3256,6 +3306,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) { Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps); } +void InstructionSelector::VisitS8x16Swizzle(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register()}; + Emit(kX64S8x16Swizzle, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), + arraysize(temps), temps); +} + // static MachineOperatorBuilder::Flags InstructionSelector::SupportedMachineOperatorFlags() { diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index b1051be5719e8b..17472a305dc1be 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -24,7 +24,7 @@ #include "src/objects/literal-objects-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/smi.h" -#include "src/objects/template-objects-inl.h" +#include "src/objects/template-objects.h" namespace v8 { namespace internal { @@ -215,6 +215,9 @@ class BytecodeGraphBuilder { FeedbackSlot slot); JSTypeHintLowering::LoweringResult TryBuildSimplifiedConstruct( const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot); + JSTypeHintLowering::LoweringResult TryBuildSimplifiedGetIterator( + const Operator* op, Node* receiver, FeedbackSlot load_slot, + FeedbackSlot call_slot); JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed( const Operator* op, Node* receiver, FeedbackSlot slot); JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed( @@ -945,7 +948,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( bytecode_array().parameter_count(), bytecode_array().register_count(), shared_info.object())), bytecode_iterator_( - base::make_unique(bytecode_array())), + std::make_unique(bytecode_array())), bytecode_analysis_(broker_->GetBytecodeAnalysis( bytecode_array().object(), osr_offset, flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, @@ -971,12 +974,12 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( if (FLAG_concurrent_inlining) { // With concurrent inlining on, the source position address doesn't change // because it's been copied from the heap. - source_position_iterator_ = base::make_unique( + source_position_iterator_ = std::make_unique( Vector(bytecode_array().source_positions_address(), bytecode_array().source_positions_size())); } else { // Otherwise, we need to access the table through a handle. - source_position_iterator_ = base::make_unique( + source_position_iterator_ = std::make_unique( handle(bytecode_array().object()->SourcePositionTableIfCollected(), isolate())); } @@ -2087,12 +2090,13 @@ void BytecodeGraphBuilder::VisitCloneObject() { void BytecodeGraphBuilder::VisitGetTemplateObject() { DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); - FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1); - ObjectRef description( + FeedbackSource source = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); + TemplateObjectDescriptionRef description( broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); - JSArrayRef template_object = - shared_info().GetTemplateObject(description, feedback_vector(), slot); - environment()->BindAccumulator(jsgraph()->Constant(template_object)); + Node* template_object = NewNode(javascript()->GetTemplateObject( + description.object(), shared_info().object(), source)); + environment()->BindAccumulator(template_object); } Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters( @@ -3297,19 +3301,21 @@ void BytecodeGraphBuilder::VisitForInStep() { void BytecodeGraphBuilder::VisitGetIterator() { PrepareEagerCheckpoint(); - Node* object = + Node* receiver = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - FeedbackSource feedback = + FeedbackSource load_feedback = CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); - const Operator* op = javascript()->GetIterator(feedback); + FeedbackSource call_feedback = + CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2)); + const Operator* op = javascript()->GetIterator(load_feedback, call_feedback); - JSTypeHintLowering::LoweringResult lowering = - TryBuildSimplifiedLoadNamed(op, object, feedback.slot); + JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedGetIterator( + op, receiver, load_feedback.slot, call_feedback.slot); if (lowering.IsExit()) return; DCHECK(!lowering.Changed()); - Node* node = NewNode(op, object); - environment()->BindAccumulator(node, Environment::kAttachFrameState); + Node* iterator = NewNode(op, receiver); + environment()->BindAccumulator(iterator, Environment::kAttachFrameState); } void BytecodeGraphBuilder::VisitSuspendGenerator() { @@ -3775,6 +3781,20 @@ BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op, return result; } +JSTypeHintLowering::LoweringResult +BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op, + Node* receiver, + FeedbackSlot load_slot, + FeedbackSlot call_slot) { + Node* effect = environment()->GetEffectDependency(); + Node* control = environment()->GetControlDependency(); + JSTypeHintLowering::LoweringResult early_reduction = + type_hint_lowering().ReduceGetIteratorOperation( + op, receiver, effect, control, load_slot, call_slot); + ApplyEarlyReduction(early_reduction); + return early_reduction; +} + JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op, Node* receiver, diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc index 428ba058a7f904..4c576b771acdc0 100644 --- a/deps/v8/src/compiler/c-linkage.cc +++ b/deps/v8/src/compiler/c-linkage.cc @@ -27,7 +27,7 @@ namespace { // == x64 ==================================================================== // =========================================================================== -#ifdef _WIN64 +#ifdef V8_TARGET_OS_WIN // == x64 windows ============================================================ #define STACK_SHADOW_WORDS 4 #define PARAM_REGISTERS rcx, rdx, r8, r9 @@ -39,12 +39,12 @@ namespace { (1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) | \ (1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) | \ (1 << xmm15.code()) -#else +#else // V8_TARGET_OS_WIN // == x64 other ============================================================== #define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9 #define CALLEE_SAVE_REGISTERS \ rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit() -#endif +#endif // V8_TARGET_OS_WIN #elif V8_TARGET_ARCH_ARM // =========================================================================== diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc index 4f1801146315ec..c618daf357ea53 100644 --- a/deps/v8/src/compiler/code-assembler.cc +++ b/deps/v8/src/compiler/code-assembler.cc @@ -29,6 +29,7 @@ namespace internal { constexpr MachineType MachineTypeOf::value; constexpr MachineType MachineTypeOf::value; +constexpr MachineType MachineTypeOf::value; namespace compiler { @@ -1349,8 +1350,8 @@ void CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor, Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode, const CallInterfaceDescriptor& descriptor, - size_t result_size, Node* target, - SloppyTNode context, + size_t result_size, TNode target, + TNode context, std::initializer_list args) { DCHECK(call_mode == StubCallMode::kCallCodeObject || call_mode == StubCallMode::kCallBuiltinPointer); @@ -1369,7 +1370,7 @@ Node* CodeAssembler::CallStubRImpl(StubCallMode call_mode, inputs.data()); } -Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl( +void CodeAssembler::TailCallStubThenBytecodeDispatchImpl( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, std::initializer_list args) { constexpr size_t kMaxNumArgs = 6; @@ -1389,33 +1390,33 @@ Node* CodeAssembler::TailCallStubThenBytecodeDispatchImpl( for (auto arg : args) inputs.Add(arg); inputs.Add(context); - return raw_assembler()->TailCallN(call_descriptor, inputs.size(), - inputs.data()); + raw_assembler()->TailCallN(call_descriptor, inputs.size(), inputs.data()); } template -Node* CodeAssembler::TailCallBytecodeDispatch( - const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) { +void CodeAssembler::TailCallBytecodeDispatch( + const CallInterfaceDescriptor& descriptor, TNode target, + TArgs... args) { DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args)); auto call_descriptor = Linkage::GetBytecodeDispatchCallDescriptor( zone(), descriptor, descriptor.GetStackParameterCount()); Node* nodes[] = {target, args...}; CHECK_EQ(descriptor.GetParameterCount() + 1, arraysize(nodes)); - return raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes); + raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes); } // Instantiate TailCallBytecodeDispatch() for argument counts used by // CSA-generated code -template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch( - const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*, - Node*, Node*); - -TNode CodeAssembler::TailCallJSCode(TNode code, - TNode context, - TNode function, - TNode new_target, - TNode arg_count) { +template V8_EXPORT_PRIVATE void CodeAssembler::TailCallBytecodeDispatch( + const CallInterfaceDescriptor& descriptor, TNode target, + TNode, TNode, TNode, + TNode); + +void CodeAssembler::TailCallJSCode(TNode code, TNode context, + TNode function, + TNode new_target, + TNode arg_count) { JSTrampolineDescriptor descriptor; auto call_descriptor = Linkage::GetStubCallDescriptor( zone(), descriptor, descriptor.GetStackParameterCount(), @@ -1423,8 +1424,7 @@ TNode CodeAssembler::TailCallJSCode(TNode code, Node* nodes[] = {code, function, new_target, arg_count, context}; CHECK_EQ(descriptor.GetParameterCount() + 2, arraysize(nodes)); - return UncheckedCast( - raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes)); + raw_assembler()->TailCallN(call_descriptor, arraysize(nodes), nodes); } Node* CodeAssembler::CallCFunctionN(Signature* signature, @@ -1439,6 +1439,13 @@ Node* CodeAssembler::CallCFunction( return raw_assembler()->CallCFunction(function, return_type, args); } +Node* CodeAssembler::CallCFunctionWithoutFunctionDescriptor( + Node* function, MachineType return_type, + std::initializer_list args) { + return raw_assembler()->CallCFunctionWithoutFunctionDescriptor( + function, return_type, args); +} + Node* CodeAssembler::CallCFunctionWithCallerSavedRegisters( Node* function, MachineType return_type, SaveFPRegsMode mode, std::initializer_list args) { @@ -1914,7 +1921,7 @@ CodeAssemblerScopedExceptionHandler::CodeAssemblerScopedExceptionHandler( compatibility_label_(label), exception_(exception) { if (has_handler_) { - label_ = base::make_unique( + label_ = std::make_unique( assembler, CodeAssemblerLabel::kDeferred); assembler_->state()->PushExceptionHandler(label_.get()); } diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index c9adb1601db1eb..8d5b8602854468 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -17,6 +17,7 @@ #include "src/codegen/code-factory.h" #include "src/codegen/machine-type.h" #include "src/codegen/source-position.h" +#include "src/codegen/tnode.h" #include "src/heap/heap.h" #include "src/objects/arguments.h" #include "src/objects/data-handler.h" @@ -79,210 +80,6 @@ TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED) template class Signature; -struct UntaggedT {}; - -struct IntegralT : UntaggedT {}; - -struct WordT : IntegralT { - static const MachineRepresentation kMachineRepresentation = - (kSystemPointerSize == 4) ? MachineRepresentation::kWord32 - : MachineRepresentation::kWord64; -}; - -struct RawPtrT : WordT { - static constexpr MachineType kMachineType = MachineType::Pointer(); -}; - -template -struct RawPtr : RawPtrT {}; - -struct Word32T : IntegralT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kWord32; -}; -struct Int32T : Word32T { - static constexpr MachineType kMachineType = MachineType::Int32(); -}; -struct Uint32T : Word32T { - static constexpr MachineType kMachineType = MachineType::Uint32(); -}; -struct Int16T : Int32T { - static constexpr MachineType kMachineType = MachineType::Int16(); -}; -struct Uint16T : Uint32T, Int32T { - static constexpr MachineType kMachineType = MachineType::Uint16(); -}; -struct Int8T : Int16T { - static constexpr MachineType kMachineType = MachineType::Int8(); -}; -struct Uint8T : Uint16T, Int16T { - static constexpr MachineType kMachineType = MachineType::Uint8(); -}; - -struct Word64T : IntegralT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kWord64; -}; -struct Int64T : Word64T { - static constexpr MachineType kMachineType = MachineType::Int64(); -}; -struct Uint64T : Word64T { - static constexpr MachineType kMachineType = MachineType::Uint64(); -}; - -struct IntPtrT : WordT { - static constexpr MachineType kMachineType = MachineType::IntPtr(); -}; -struct UintPtrT : WordT { - static constexpr MachineType kMachineType = MachineType::UintPtr(); -}; - -struct Float32T : UntaggedT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kFloat32; - static constexpr MachineType kMachineType = MachineType::Float32(); -}; - -struct Float64T : UntaggedT { - static const MachineRepresentation kMachineRepresentation = - MachineRepresentation::kFloat64; - static constexpr MachineType kMachineType = MachineType::Float64(); -}; - -#ifdef V8_COMPRESS_POINTERS -using TaggedT = Int32T; -#else -using TaggedT = IntPtrT; -#endif - -// Result of a comparison operation. -struct BoolT : Word32T {}; - -// Value type of a Turbofan node with two results. -template -struct PairT {}; - -inline constexpr MachineType CommonMachineType(MachineType type1, - MachineType type2) { - return (type1 == type2) ? type1 - : ((type1.IsTagged() && type2.IsTagged()) - ? MachineType::AnyTagged() - : MachineType::None()); -} - -template -struct MachineTypeOf { - static constexpr MachineType value = Type::kMachineType; -}; - -template -constexpr MachineType MachineTypeOf::value; - -template <> -struct MachineTypeOf { - static constexpr MachineType value = MachineType::AnyTagged(); -}; -template <> -struct MachineTypeOf { - static constexpr MachineType value = MachineType::AnyTagged(); -}; -template <> -struct MachineTypeOf { - static constexpr MachineType value = MachineType::TaggedSigned(); -}; -template -struct MachineTypeOf::value>::type> { - static constexpr MachineType value = MachineType::TaggedPointer(); -}; - -template -constexpr MachineType MachineTypeOf< - HeapObjectSubtype, typename std::enable_if::value>::type>::value; - -template -struct MachineRepresentationOf { - static const MachineRepresentation value = Type::kMachineRepresentation; -}; -template -struct MachineRepresentationOf< - T, typename std::enable_if::value>::type> { - static const MachineRepresentation value = - MachineTypeOf::value.representation(); -}; -template -struct MachineRepresentationOf< - T, typename std::enable_if::value>::type> { - static const MachineRepresentation value = - MachineTypeOf::value.representation(); -}; - -template -struct is_valid_type_tag { - static const bool value = std::is_base_of::value || - std::is_base_of::value || - std::is_base_of::value || - std::is_same::value; - static const bool is_tagged = std::is_base_of::value || - std::is_base_of::value; -}; - -template -struct is_valid_type_tag> { - static const bool value = - is_valid_type_tag::value && is_valid_type_tag::value; - static const bool is_tagged = false; -}; - -template -struct UnionT; - -template -struct is_valid_type_tag> { - static const bool is_tagged = - is_valid_type_tag::is_tagged && is_valid_type_tag::is_tagged; - static const bool value = is_tagged; -}; - -template -struct UnionT { - static constexpr MachineType kMachineType = - CommonMachineType(MachineTypeOf::value, MachineTypeOf::value); - static const MachineRepresentation kMachineRepresentation = - kMachineType.representation(); - static_assert(kMachineRepresentation != MachineRepresentation::kNone, - "no common representation"); - static_assert(is_valid_type_tag::is_tagged && - is_valid_type_tag::is_tagged, - "union types are only possible for tagged values"); -}; - -using Number = UnionT; -using Numeric = UnionT; - -// A pointer to a builtin function, used by Torque's function pointers. -using BuiltinPtr = Smi; - -class int31_t { - public: - int31_t() : value_(0) {} - int31_t(int value) : value_(value) { // NOLINT(runtime/explicit) - DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); - } - int31_t& operator=(int value) { - DCHECK_EQ((value & 0x80000000) != 0, (value & 0x40000000) != 0); - value_ = value; - return *this; - } - int32_t value() const { return value_; } - operator int32_t() const { return value_; } - - private: - int32_t value_; -}; - #define ENUM_ELEMENT(Name) k##Name, #define ENUM_STRUCT_ELEMENT(NAME, Name, name) k##Name, enum class ObjectType { @@ -334,6 +131,7 @@ class Undetectable; class UniqueName; class WasmCapiFunctionData; class WasmExceptionObject; +class WasmExceptionPackage; class WasmExceptionTag; class WasmExportedFunctionData; class WasmGlobalObject; @@ -396,143 +194,6 @@ using CodeAssemblerVariableList = ZoneVector; using CodeAssemblerCallback = std::function; -template -struct is_subtype { - static const bool value = std::is_base_of::value; -}; -template -struct is_subtype, U> { - static const bool value = - is_subtype::value && is_subtype::value; -}; -template -struct is_subtype> { - static const bool value = - is_subtype::value || is_subtype::value; -}; -template -struct is_subtype, UnionT> { - static const bool value = - (is_subtype::value || is_subtype::value) && - (is_subtype::value || is_subtype::value); -}; - -template -struct types_have_common_values { - static const bool value = is_subtype::value || is_subtype::value; -}; -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; -template -struct types_have_common_values, U> { - static const bool value = types_have_common_values::value || - types_have_common_values::value; -}; - -template -struct types_have_common_values> { - static const bool value = types_have_common_values::value || - types_have_common_values::value; -}; -template -struct types_have_common_values, UnionT> { - static const bool value = types_have_common_values::value || - types_have_common_values::value || - types_have_common_values::value || - types_have_common_values::value; -}; - -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; - -template -struct types_have_common_values { - static const bool value = types_have_common_values::value; -}; - -// TNode is an SSA value with the static type tag T, which is one of the -// following: -// - a subclass of internal::Object represents a tagged type -// - a subclass of internal::UntaggedT represents an untagged type -// - ExternalReference -// - PairT for an operation returning two values, with types T1 -// and T2 -// - UnionT represents either a value of type T1 or of type T2. -template -class TNode { - public: - template ::value, int>::type = 0> - TNode(const TNode& other) : node_(other) { - LazyTemplateChecks(); - } - TNode() : TNode(nullptr) {} - - TNode operator=(TNode other) { - DCHECK_NOT_NULL(other.node_); - node_ = other.node_; - return *this; - } - - operator compiler::Node*() const { return node_; } - - static TNode UncheckedCast(compiler::Node* node) { return TNode(node); } - - protected: - explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); } - - private: - // These checks shouldn't be checked before TNode is actually used. - void LazyTemplateChecks() { - static_assert(is_valid_type_tag::value, "invalid type tag"); - } - - compiler::Node* node_; -}; - -// SloppyTNode is a variant of TNode and allows implicit casts from -// Node*. It is intended for function arguments as long as some call sites -// still use untyped Node* arguments. -// TODO(tebbi): Delete this class once transition is finished. -template -class SloppyTNode : public TNode { - public: - SloppyTNode(compiler::Node* node) // NOLINT(runtime/explicit) - : TNode(node) {} - template ::value, - int>::type = 0> - SloppyTNode(const TNode& other) // NOLINT(runtime/explicit) - : TNode(other) {} -}; - template class CodeAssemblerParameterizedLabel; @@ -627,7 +288,7 @@ TNode Float64Add(TNode a, TNode b); V(Float64ExtractLowWord32, Uint32T, Float64T) \ V(Float64ExtractHighWord32, Uint32T, Float64T) \ V(BitcastTaggedToWord, IntPtrT, Object) \ - V(BitcastTaggedSignedToWord, IntPtrT, Smi) \ + V(BitcastTaggedToWordForTagAndSmiBits, IntPtrT, AnyTaggedT) \ V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \ V(BitcastWordToTagged, Object, WordT) \ V(BitcastWordToTaggedSigned, Smi, WordT) \ @@ -641,6 +302,7 @@ TNode Float64Add(TNode a, TNode b); V(ChangeInt32ToInt64, Int64T, Int32T) \ V(ChangeUint32ToFloat64, Float64T, Word32T) \ V(ChangeUint32ToUint64, Uint64T, Word32T) \ + V(ChangeTaggedToCompressed, TaggedT, AnyTaggedT) \ V(BitcastInt32ToFloat32, Float32T, Word32T) \ V(BitcastFloat32ToInt32, Uint32T, Float32T) \ V(RoundFloat64ToInt32, Int32T, Float64T) \ @@ -1187,8 +849,12 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode RawPtrAdd(TNode left, TNode right) { return ReinterpretCast(IntPtrAdd(left, right)); } - TNode RawPtrAdd(TNode left, TNode right) { - return ReinterpretCast(IntPtrAdd(left, right)); + TNode RawPtrSub(TNode left, TNode right) { + return ReinterpretCast(IntPtrSub(left, right)); + } + TNode RawPtrSub(TNode left, TNode right) { + return Signed( + IntPtrSub(static_cast(left), static_cast(right))); } TNode WordShl(SloppyTNode value, int shift); @@ -1243,7 +909,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { template TNode BitcastTaggedToWord(TNode node) { static_assert(sizeof(Dummy) < 0, - "Should use BitcastTaggedSignedToWord instead."); + "Should use BitcastTaggedToWordForTagAndSmiBits instead."); } // Changes a double to an inptr_t for pointer arithmetic outside of Smi range. @@ -1363,26 +1029,26 @@ class V8_EXPORT_PRIVATE CodeAssembler { void TailCallStub(Callable const& callable, SloppyTNode context, TArgs... args) { TNode target = HeapConstant(callable.code()); - return TailCallStub(callable.descriptor(), target, context, args...); + TailCallStub(callable.descriptor(), target, context, args...); } template void TailCallStub(const CallInterfaceDescriptor& descriptor, SloppyTNode target, SloppyTNode context, TArgs... args) { - return TailCallStubImpl(descriptor, target, context, {args...}); + TailCallStubImpl(descriptor, target, context, {args...}); } template - Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor, - Node* target, TArgs... args); + void TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor, + TNode target, TArgs... args); template - Node* TailCallStubThenBytecodeDispatch( + void TailCallStubThenBytecodeDispatch( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, TArgs... args) { - return TailCallStubThenBytecodeDispatchImpl(descriptor, target, context, - {args...}); + TailCallStubThenBytecodeDispatchImpl(descriptor, target, context, + {args...}); } // Tailcalls to the given code object with JSCall linkage. The JS arguments @@ -1392,14 +1058,13 @@ class V8_EXPORT_PRIVATE CodeAssembler { // Note that no arguments adaption is going on here - all the JavaScript // arguments are left on the stack unmodified. Therefore, this tail call can // only be used after arguments adaptation has been performed already. - TNode TailCallJSCode(TNode code, TNode context, - TNode function, - TNode new_target, - TNode arg_count); + void TailCallJSCode(TNode code, TNode context, + TNode function, TNode new_target, + TNode arg_count); template - Node* CallJS(Callable const& callable, Node* context, Node* function, - Node* receiver, TArgs... args) { + TNode CallJS(Callable const& callable, Node* context, Node* function, + Node* receiver, TArgs... args) { int argc = static_cast(sizeof...(args)); TNode arity = Int32Constant(argc); return CallStub(callable, context, function, arity, receiver, args...); @@ -1438,6 +1103,18 @@ class V8_EXPORT_PRIVATE CodeAssembler { return CallCFunction(function, return_type, {cargs...}); } + // Call to a C function without a function discriptor on AIX. + template + Node* CallCFunctionWithoutFunctionDescriptor(Node* function, + MachineType return_type, + CArgs... cargs) { + static_assert(v8::internal::conjunction< + std::is_convertible...>::value, + "invalid argument types"); + return CallCFunctionWithoutFunctionDescriptor(function, return_type, + {cargs...}); + } + // Call to a C function, while saving/restoring caller registers. template Node* CallCFunctionWithCallerSavedRegisters(Node* function, @@ -1486,6 +1163,10 @@ class V8_EXPORT_PRIVATE CodeAssembler { Node* CallCFunction(Node* function, MachineType return_type, std::initializer_list args); + Node* CallCFunctionWithoutFunctionDescriptor( + Node* function, MachineType return_type, + std::initializer_list args); + Node* CallCFunctionWithCallerSavedRegisters( Node* function, MachineType return_type, SaveFPRegsMode mode, std::initializer_list args); @@ -1511,15 +1192,14 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode target, TNode context, std::initializer_list args); - Node* TailCallStubThenBytecodeDispatchImpl( + void TailCallStubThenBytecodeDispatchImpl( const CallInterfaceDescriptor& descriptor, Node* target, Node* context, std::initializer_list args); Node* CallStubRImpl(StubCallMode call_mode, const CallInterfaceDescriptor& descriptor, - size_t result_size, Node* target, - SloppyTNode context, - std::initializer_list args); + size_t result_size, TNode target, + TNode context, std::initializer_list args); // These two don't have definitions and are here only for catching use cases // where the cast is not necessary. @@ -1810,7 +1490,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerScopedExceptionHandler { } // namespace compiler -#if defined(V8_HOST_ARCH_32_BIT) || defined(V8_COMPRESS_POINTERS) +#if defined(V8_HOST_ARCH_32_BIT) #define BINT_IS_SMI using BInt = Smi; #elif defined(V8_HOST_ARCH_64_BIT) diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc index 592d85440cc23b..33990dfa480ab0 100644 --- a/deps/v8/src/compiler/compilation-dependencies.cc +++ b/deps/v8/src/compiler/compilation-dependencies.cc @@ -5,6 +5,7 @@ #include "src/compiler/compilation-dependencies.h" #include "src/compiler/compilation-dependency.h" +#include "src/execution/protectors.h" #include "src/handles/handles-inl.h" #include "src/objects/allocation-site-inl.h" #include "src/objects/objects-inl.h" @@ -155,7 +156,7 @@ class FieldRepresentationDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the representation. - FieldRepresentationDependency(const MapRef& owner, int descriptor, + FieldRepresentationDependency(const MapRef& owner, InternalIndex descriptor, Representation representation) : owner_(owner), descriptor_(descriptor), @@ -180,7 +181,7 @@ class FieldRepresentationDependency final : public CompilationDependency { private: MapRef owner_; - int descriptor_; + InternalIndex descriptor_; Representation representation_; }; @@ -188,7 +189,7 @@ class FieldTypeDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the type. - FieldTypeDependency(const MapRef& owner, int descriptor, + FieldTypeDependency(const MapRef& owner, InternalIndex descriptor, const ObjectRef& type) : owner_(owner), descriptor_(descriptor), type_(type) { DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_))); @@ -210,13 +211,13 @@ class FieldTypeDependency final : public CompilationDependency { private: MapRef owner_; - int descriptor_; + InternalIndex descriptor_; ObjectRef type_; }; class FieldConstnessDependency final : public CompilationDependency { public: - FieldConstnessDependency(const MapRef& owner, int descriptor) + FieldConstnessDependency(const MapRef& owner, InternalIndex descriptor) : owner_(owner), descriptor_(descriptor) { DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_))); DCHECK_EQ(PropertyConstness::kConst, @@ -238,7 +239,7 @@ class FieldConstnessDependency final : public CompilationDependency { private: MapRef owner_; - int descriptor_; + InternalIndex descriptor_; }; class GlobalPropertyDependency final : public CompilationDependency { @@ -282,12 +283,12 @@ class GlobalPropertyDependency final : public CompilationDependency { class ProtectorDependency final : public CompilationDependency { public: explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) { - DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid); + DCHECK_EQ(cell_.value().AsSmi(), Protectors::kProtectorValid); } bool IsValid() const override { Handle cell = cell_.object(); - return cell->value() == Smi::FromInt(Isolate::kProtectorValid); + return cell->value() == Smi::FromInt(Protectors::kProtectorValid); } void Install(const MaybeObjectHandle& code) const override { @@ -404,7 +405,7 @@ AllocationType CompilationDependencies::DependOnPretenureMode( } PropertyConstness CompilationDependencies::DependOnFieldConstness( - const MapRef& map, int descriptor) { + const MapRef& map, InternalIndex descriptor) { MapRef owner = map.FindFieldOwner(descriptor); PropertyConstness constness = owner.GetPropertyDetails(descriptor).constness(); @@ -426,13 +427,13 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness( return PropertyConstness::kConst; } -void CompilationDependencies::DependOnFieldRepresentation(const MapRef& map, - int descriptor) { +void CompilationDependencies::DependOnFieldRepresentation( + const MapRef& map, InternalIndex descriptor) { RecordDependency(FieldRepresentationDependencyOffTheRecord(map, descriptor)); } void CompilationDependencies::DependOnFieldType(const MapRef& map, - int descriptor) { + InternalIndex descriptor) { RecordDependency(FieldTypeDependencyOffTheRecord(map, descriptor)); } @@ -444,7 +445,7 @@ void CompilationDependencies::DependOnGlobalProperty( } bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) { - if (cell.value().AsSmi() != Isolate::kProtectorValid) return false; + if (cell.value().AsSmi() != Protectors::kProtectorValid) return false; RecordDependency(new (zone_) ProtectorDependency(cell)); return true; } @@ -632,7 +633,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord( CompilationDependency const* CompilationDependencies::FieldRepresentationDependencyOffTheRecord( - const MapRef& map, int descriptor) const { + const MapRef& map, InternalIndex descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); PropertyDetails details = owner.GetPropertyDetails(descriptor); DCHECK(details.representation().Equals( @@ -642,8 +643,8 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord( } CompilationDependency const* -CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map, - int descriptor) const { +CompilationDependencies::FieldTypeDependencyOffTheRecord( + const MapRef& map, InternalIndex descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); ObjectRef type = owner.GetFieldType(descriptor); DCHECK(type.equals(map.GetFieldType(descriptor))); diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h index cb6cea0685f29e..0b1612487ed1dd 100644 --- a/deps/v8/src/compiler/compilation-dependencies.h +++ b/deps/v8/src/compiler/compilation-dependencies.h @@ -55,11 +55,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // Record the assumption that the field representation of a field does not // change. The field is identified by the arguments. - void DependOnFieldRepresentation(const MapRef& map, int descriptor); + void DependOnFieldRepresentation(const MapRef& map, InternalIndex descriptor); // Record the assumption that the field type of a field does not change. The // field is identified by the arguments. - void DependOnFieldType(const MapRef& map, int descriptor); + void DependOnFieldType(const MapRef& map, InternalIndex descriptor); // Return a field's constness and, if kConst, record the assumption that it // remains kConst. The field is identified by the arguments. @@ -68,7 +68,8 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // kConst if the map is stable (and register stability dependency in that // case). This is to ensure that fast elements kind transitions cannot be // used to mutate fields without deoptimization of the dependent code. - PropertyConstness DependOnFieldConstness(const MapRef& map, int descriptor); + PropertyConstness DependOnFieldConstness(const MapRef& map, + InternalIndex descriptor); // Record the assumption that neither {cell}'s {CellType} changes, nor the // {IsReadOnly()} flag of {cell}'s {PropertyDetails}. @@ -119,9 +120,9 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { CompilationDependency const* TransitionDependencyOffTheRecord( const MapRef& target_map) const; CompilationDependency const* FieldRepresentationDependencyOffTheRecord( - const MapRef& map, int descriptor) const; + const MapRef& map, InternalIndex descriptor) const; CompilationDependency const* FieldTypeDependencyOffTheRecord( - const MapRef& map, int descriptor) const; + const MapRef& map, InternalIndex descriptor) const; // Exposed only for testing purposes. bool AreValid() const; diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc index 537744652b9686..5c0f6b1cfaabf4 100644 --- a/deps/v8/src/compiler/decompression-elimination.cc +++ b/deps/v8/src/compiler/decompression-elimination.cc @@ -67,7 +67,6 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) { Node* input_node = node->InputAt(0); IrOpcode::Value input_opcode = input_node->opcode(); if (IrOpcode::IsDecompressOpcode(input_opcode)) { - DCHECK(IsValidDecompress(node->opcode(), input_opcode)); DCHECK_EQ(input_node->InputCount(), 1); return Replace(input_node->InputAt(0)); } else if (IsReducibleConstantOpcode(input_opcode)) { @@ -167,6 +166,42 @@ Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) { return any_change ? Changed(node) : NoChange(); } +Reduction DecompressionElimination::ReduceWord32Equal(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kWord32Equal); + + DCHECK_EQ(node->InputCount(), 2); + Node* lhs = node->InputAt(0); + Node* rhs = node->InputAt(1); + + if (!IrOpcode::IsCompressOpcode(lhs->opcode()) || + !IrOpcode::IsCompressOpcode(rhs->opcode())) { + return NoChange(); + } + // Input nodes for compress operation. + lhs = lhs->InputAt(0); + rhs = rhs->InputAt(0); + + bool changed = false; + + if (lhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) { + Node* input = lhs->InputAt(0); + if (IsReducibleConstantOpcode(input->opcode())) { + node->ReplaceInput(0, GetCompressedConstant(input)); + changed = true; + } + } + + if (rhs->opcode() == IrOpcode::kBitcastWordToTaggedSigned) { + Node* input = rhs->InputAt(0); + if (IsReducibleConstantOpcode(input->opcode())) { + node->ReplaceInput(1, GetCompressedConstant(input)); + changed = true; + } + } + + return changed ? Changed(node) : NoChange(); +} + Reduction DecompressionElimination::ReduceWord64Equal(Node* node) { DCHECK_EQ(node->opcode(), IrOpcode::kWord64Equal); @@ -220,6 +255,8 @@ Reduction DecompressionElimination::Reduce(Node* node) { return ReducePhi(node); case IrOpcode::kTypedStateValues: return ReduceTypedStateValues(node); + case IrOpcode::kWord32Equal: + return ReduceWord32Equal(node); case IrOpcode::kWord64Equal: return ReduceWord64Equal(node); default: diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h index 85a6c98aa0bbb5..6b2be009c6b06f 100644 --- a/deps/v8/src/compiler/decompression-elimination.h +++ b/deps/v8/src/compiler/decompression-elimination.h @@ -65,6 +65,11 @@ class V8_EXPORT_PRIVATE DecompressionElimination final // value of that constant. Reduction ReduceWord64Equal(Node* node); + // This is a workaround for load elimination test. + // Replaces Compress -> BitcastWordToTaggedSigned -> ReducibleConstant + // to CompressedConstant on both inputs of Word32Equal operation. + Reduction ReduceWord32Equal(Node* node); + Graph* graph() const { return graph_; } MachineOperatorBuilder* machine() const { return machine_; } CommonOperatorBuilder* common() const { return common_; } diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index 8dfe356c34d485..ceff453164bbfb 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -187,8 +187,11 @@ class EffectControlLinearizer { Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state); void LowerTransitionElementsKind(Node* node); Node* LowerLoadFieldByIndex(Node* node); + Node* LowerLoadMessage(Node* node); Node* LowerLoadTypedElement(Node* node); Node* LowerLoadDataViewElement(Node* node); + Node* LowerLoadStackArgument(Node* node); + void LowerStoreMessage(Node* node); void LowerStoreTypedElement(Node* node); void LowerStoreDataViewElement(Node* node); void LowerStoreSignedSmallElement(Node* node); @@ -227,6 +230,8 @@ class EffectControlLinearizer { Node* LowerStringComparison(Callable const& callable, Node* node); Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind); + Node* BuildTypedArrayDataPointer(Node* base, Node* external); + Node* ChangeInt32ToCompressedSmi(Node* value); Node* ChangeInt32ToSmi(Node* value); Node* ChangeInt32ToIntPtr(Node* value); @@ -247,6 +252,7 @@ class EffectControlLinearizer { Node* SmiShiftBitsConstant(); void TransitionElementsTo(Node* node, Node* array, ElementsKind from, ElementsKind to); + void ConnectUnreachableToEnd(Node* effect, Node* control); Factory* factory() const { return isolate()->factory(); } Isolate* isolate() const { return jsgraph()->isolate(); } @@ -308,19 +314,8 @@ struct PendingEffectPhi { : effect_phi(effect_phi), block(block) {} }; -void ConnectUnreachableToEnd(Node* effect, Node* control, JSGraph* jsgraph) { - Graph* graph = jsgraph->graph(); - CommonOperatorBuilder* common = jsgraph->common(); - if (effect->opcode() == IrOpcode::kDead) return; - if (effect->opcode() != IrOpcode::kUnreachable) { - effect = graph->NewNode(common->Unreachable(), effect, control); - } - Node* throw_node = graph->NewNode(common->Throw(), effect, control); - NodeProperties::MergeControlToEnd(graph, common, throw_node); -} - void UpdateEffectPhi(Node* node, BasicBlock* block, - BlockEffectControlMap* block_effects, JSGraph* jsgraph) { + BlockEffectControlMap* block_effects) { // Update all inputs to an effect phi with the effects from the given // block->effect map. DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode()); @@ -607,7 +602,7 @@ void EffectControlLinearizer::Run() { // record the effect phi for later processing. pending_effect_phis.push_back(PendingEffectPhi(effect_phi, block)); } else { - UpdateEffectPhi(effect_phi, block, &block_effects, jsgraph()); + UpdateEffectPhi(effect_phi, block, &block_effects); } } @@ -649,7 +644,7 @@ void EffectControlLinearizer::Run() { if (control->opcode() == IrOpcode::kLoop) { pending_effect_phis.push_back(PendingEffectPhi(effect, block)); } else { - UpdateEffectPhi(effect, block, &block_effects, jsgraph()); + UpdateEffectPhi(effect, block, &block_effects); } } else if (control->opcode() == IrOpcode::kIfException) { // The IfException is connected into the effect chain, so we need @@ -734,7 +729,7 @@ void EffectControlLinearizer::Run() { // during the first pass (because they could have incoming back edges). for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) { UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block, - &block_effects, jsgraph()); + &block_effects); } } @@ -828,7 +823,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state, // Break the effect chain on {Unreachable} and reconnect to the graph end. // Mark the following code for deletion by connecting to the {Dead} node. if (node->opcode() == IrOpcode::kUnreachable) { - ConnectUnreachableToEnd(*effect, *control, jsgraph()); + ConnectUnreachableToEnd(*effect, *control); *effect = *control = jsgraph()->Dead(); } } @@ -1243,6 +1238,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kTransitionElementsKind: LowerTransitionElementsKind(node); break; + case IrOpcode::kLoadMessage: + result = LowerLoadMessage(node); + break; + case IrOpcode::kStoreMessage: + LowerStoreMessage(node); + break; case IrOpcode::kLoadFieldByIndex: result = LowerLoadFieldByIndex(node); break; @@ -1252,6 +1253,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kLoadDataViewElement: result = LowerLoadDataViewElement(node); break; + case IrOpcode::kLoadStackArgument: + result = LowerLoadStackArgument(node); + break; case IrOpcode::kStoreTypedElement: LowerStoreTypedElement(node); break; @@ -1325,6 +1329,13 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, return true; } +void EffectControlLinearizer::ConnectUnreachableToEnd(Node* effect, + Node* control) { + DCHECK_EQ(effect->opcode(), IrOpcode::kUnreachable); + Node* throw_node = graph()->NewNode(common()->Throw(), effect, control); + NodeProperties::MergeControlToEnd(graph(), common(), throw_node); +} + #define __ gasm()-> Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) { @@ -1601,7 +1612,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) { __ Bind(&if_smi); { // If {value} is a Smi, then we only need to check that it's not zero. - __ Goto(&done, __ Word32Equal(__ IntPtrEqual(value, __ IntPtrConstant(0)), + __ Goto(&done, __ Word32Equal(__ TaggedEqual(value, __ SmiConstant(0)), __ Int32Constant(0))); } @@ -1952,7 +1963,7 @@ Node* EffectControlLinearizer::LowerCheckReceiverOrNullOrUndefined( __ LoadField(AccessBuilder::ForMapInstanceType(), value_map); // Rule out all primitives except oddballs (true, false, undefined, null). - STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE); + STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE); STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); Node* check0 = __ Uint32LessThanOrEqual(__ Uint32Constant(ODDBALL_TYPE), value_instance_type); @@ -2028,9 +2039,8 @@ Node* EffectControlLinearizer::LowerStringConcat(Node* node) { callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, Operator::kNoDeopt | Operator::kNoWrite | Operator::kNoThrow); - Node* value = - __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs, - rhs, __ NoContextConstant()); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); return value; } @@ -2112,8 +2122,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node, // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have // to return -kMinInt, which is not representable as Word32. - Node* check_lhs_minint = graph()->NewNode(machine()->Word32Equal(), lhs, - __ Int32Constant(kMinInt)); + Node* check_lhs_minint = __ Word32Equal(lhs, __ Int32Constant(kMinInt)); __ Branch(check_lhs_minint, &if_lhs_minint, &if_lhs_notminint); __ Bind(&if_lhs_minint); @@ -2760,7 +2769,7 @@ Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) { DCHECK(machine()->Is64()); Node* value = node->InputAt(0); - Node* map = jsgraph()->HeapConstant(factory()->bigint_map()); + Node* map = __ HeapConstant(factory()->bigint_map()); // BigInts with value 0 must be of size 0 (canonical form). auto if_zerodigits = __ MakeLabel(); auto if_onedigit = __ MakeLabel(); @@ -2963,10 +2972,11 @@ Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) { Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); Node* value_instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), value_map); - STATIC_ASSERT(JS_TYPED_ARRAY_TYPE + 1 == JS_DATA_VIEW_TYPE); Node* vfalse = __ Uint32LessThan( - __ Int32Sub(value_instance_type, __ Int32Constant(JS_TYPED_ARRAY_TYPE)), - __ Int32Constant(2)); + __ Int32Sub(value_instance_type, + __ Int32Constant(FIRST_JS_ARRAY_BUFFER_VIEW_TYPE)), + __ Int32Constant(LAST_JS_ARRAY_BUFFER_VIEW_TYPE - + FIRST_JS_ARRAY_BUFFER_VIEW_TYPE + 1)); __ Goto(&done, vfalse); __ Bind(&if_smi); @@ -3521,7 +3531,7 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) { __ Load(MachineType::Pointer(), frame, __ IntPtrConstant(StandardFrameConstants::kCallerFPOffset)); Node* parent_frame_type = __ Load( - MachineType::TypeCompressedTagged(), parent_frame, + MachineType::IntPtr(), parent_frame, __ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset)); __ GotoIf(__ IntPtrEqual(parent_frame_type, @@ -3541,7 +3551,7 @@ Node* EffectControlLinearizer::LowerNewDoubleElements(Node* node) { auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0)); __ GotoIf(zero_length, &done, - jsgraph()->HeapConstant(factory()->empty_fixed_array())); + __ HeapConstant(factory()->empty_fixed_array())); // Compute the effective size of the backing store. Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kDoubleSizeLog2)), @@ -3589,7 +3599,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) { auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer); Node* zero_length = __ IntPtrEqual(length, __ IntPtrConstant(0)); __ GotoIf(zero_length, &done, - jsgraph()->HeapConstant(factory()->empty_fixed_array())); + __ HeapConstant(factory()->empty_fixed_array())); // Compute the effective size of the backing store. Node* size = __ IntAdd(__ WordShl(length, __ IntPtrConstant(kTaggedSizeLog2)), @@ -3671,10 +3681,9 @@ Node* EffectControlLinearizer::LowerNewConsString(Node* node) { __ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)), &if_twobyte, &if_onebyte); __ Bind(&if_onebyte); - __ Goto(&done, - jsgraph()->HeapConstant(factory()->cons_one_byte_string_map())); + __ Goto(&done, __ HeapConstant(factory()->cons_one_byte_string_map())); __ Bind(&if_twobyte); - __ Goto(&done, jsgraph()->HeapConstant(factory()->cons_string_map())); + __ Goto(&done, __ HeapConstant(factory()->cons_string_map())); __ Bind(&done); Node* result_map = done.PhiAt(0); @@ -4287,9 +4296,8 @@ Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) { graph()->zone(), callable.descriptor(), callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, Operator::kFoldable | Operator::kNoThrow); - Node* value = - __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs, - rhs, __ NoContextConstant()); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); // Check for exception sentinel: Smi is returned to signal BigIntTooBig. __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, FeedbackSource{}, @@ -4305,9 +4313,8 @@ Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) { graph()->zone(), callable.descriptor(), callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, Operator::kFoldable | Operator::kNoThrow); - Node* value = - __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), - node->InputAt(0), __ NoContextConstant()); + Node* value = __ Call(call_descriptor, __ HeapConstant(callable.code()), + node->InputAt(0), __ NoContextConstant()); return value; } @@ -4746,6 +4753,20 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) { __ Bind(&done); } +Node* EffectControlLinearizer::LowerLoadMessage(Node* node) { + Node* offset = node->InputAt(0); + Node* object_pattern = + __ LoadField(AccessBuilder::ForExternalIntPtr(), offset); + return __ BitcastWordToTagged(object_pattern); +} + +void EffectControlLinearizer::LowerStoreMessage(Node* node) { + Node* offset = node->InputAt(0); + Node* object = node->InputAt(1); + Node* object_pattern = __ BitcastTaggedToWord(object); + __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern); +} + Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { Node* object = node->InputAt(0); Node* index = node->InputAt(1); @@ -4801,6 +4822,7 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { // architectures, or a mutable HeapNumber. __ Bind(&if_double); { + auto loaded_field = __ MakeLabel(MachineRepresentation::kTagged); auto done_double = __ MakeLabel(MachineRepresentation::kFloat64); index = __ WordSar(index, one); @@ -4818,10 +4840,9 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { Node* result = __ Load(MachineType::Float64(), object, offset); __ Goto(&done_double, result); } else { - Node* result = + Node* field = __ Load(MachineType::TypeCompressedTagged(), object, offset); - result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result); - __ Goto(&done_double, result); + __ Goto(&loaded_field, field); } } @@ -4834,10 +4855,24 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) { __ IntPtrConstant(kTaggedSizeLog2)), __ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) - kHeapObjectTag)); - Node* result = + Node* field = __ Load(MachineType::TypeCompressedTagged(), properties, offset); - result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result); - __ Goto(&done_double, result); + __ Goto(&loaded_field, field); + } + + __ Bind(&loaded_field); + { + Node* field = loaded_field.PhiAt(0); + // We may have transitioned in-place away from double, so check that + // this is a HeapNumber -- otherwise the load is fine and we don't need + // to copy anything anyway. + __ GotoIf(ObjectIsSmi(field), &done, field); + Node* field_map = __ LoadField(AccessBuilder::ForMap(), field); + __ GotoIfNot(__ TaggedEqual(field_map, __ HeapNumberMapConstant()), &done, + field); + + Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), field); + __ Goto(&done_double, value); } __ Bind(&done_double); @@ -4988,6 +5023,35 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) { done.PhiAt(0)); } +// Compute the data pointer, handling the case where the {external} pointer +// is the effective data pointer (i.e. the {base} is Smi zero). +Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base, + Node* external) { + if (IntPtrMatcher(base).Is(0)) { + return external; + } else { + if (COMPRESS_POINTERS_BOOL) { + // TurboFan does not support loading of compressed fields without + // decompression so we add the following operations to workaround that. + // We can't load the base value as word32 because in that case the + // value will not be marked as tagged in the pointer map and will not + // survive GC. + // Compress base value back to in order to be able to decompress by + // doing an unsafe add below. Both decompression and compression + // will be removed by the decompression elimination pass. + base = __ ChangeTaggedToCompressed(base); + base = __ BitcastTaggedToWord(base); + // Zero-extend Tagged_t to UintPtr according to current compression + // scheme so that the addition with |external_pointer| (which already + // contains compensated offset value) will decompress the tagged value. + // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for + // details. + base = ChangeUint32ToUintPtr(base); + } + return __ UnsafePointerAdd(base, external); + } +} + Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) { ExternalArrayType array_type = ExternalArrayTypeOf(node->op()); Node* buffer = node->InputAt(0); @@ -4999,17 +5063,22 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) { // ArrayBuffer (if there's any) as long as we are still operating on it. __ Retain(buffer); - // Compute the effective storage pointer, handling the case where the - // {external} pointer is the effective storage pointer (i.e. the {base} - // is Smi zero). - Node* storage = IntPtrMatcher(base).Is(0) - ? external - : __ UnsafePointerAdd(base, external); + Node* data_ptr = BuildTypedArrayDataPointer(base, external); // Perform the actual typed element access. return __ LoadElement(AccessBuilder::ForTypedArrayElement( array_type, true, LoadSensitivity::kCritical), - storage, index); + data_ptr, index); +} + +Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) { + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + + Node* argument = + __ LoadElement(AccessBuilder::ForStackArgument(), base, index); + + return __ BitcastWordToTagged(argument); } void EffectControlLinearizer::LowerStoreTypedElement(Node* node) { @@ -5024,16 +5093,11 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) { // ArrayBuffer (if there's any) as long as we are still operating on it. __ Retain(buffer); - // Compute the effective storage pointer, handling the case where the - // {external} pointer is the effective storage pointer (i.e. the {base} - // is Smi zero). - Node* storage = IntPtrMatcher(base).Is(0) - ? external - : __ UnsafePointerAdd(base, external); + Node* data_ptr = BuildTypedArrayDataPointer(base, external); // Perform the actual typed element access. __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true), - storage, index, value); + data_ptr, index, value); } void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array, @@ -5402,7 +5466,7 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) { auto call_descriptor = Linkage::GetRuntimeCallDescriptor( graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags); __ Call(call_descriptor, __ CEntryStubConstant(1), - jsgraph()->SmiConstant(static_cast(reason)), + __ SmiConstant(static_cast(reason)), __ ExternalConstant(ExternalReference::Create(id)), __ Int32Constant(1), __ NoContextConstant()); } diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc index 18ae069b21ad09..b2fb8d10ceeb97 100644 --- a/deps/v8/src/compiler/escape-analysis-reducer.cc +++ b/deps/v8/src/compiler/escape-analysis-reducer.cc @@ -326,9 +326,8 @@ void EscapeAnalysisReducer::Finalize() { TypeCache::Get()->kArgumentsLengthType); NodeProperties::ReplaceValueInput(load, arguments_frame, 0); NodeProperties::ReplaceValueInput(load, offset, 1); - NodeProperties::ChangeOp(load, - jsgraph()->simplified()->LoadElement( - AccessBuilder::ForStackArgument())); + NodeProperties::ChangeOp( + load, jsgraph()->simplified()->LoadStackArgument()); break; } case IrOpcode::kLoadField: { diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc index 9478c08c6c13a3..576f6ce5427d62 100644 --- a/deps/v8/src/compiler/frame-states.cc +++ b/deps/v8/src/compiler/frame-states.cc @@ -137,13 +137,17 @@ Node* CreateStubBuiltinContinuationFrameState( // Stack parameters first. Depending on {mode}, final parameters are added // by the deoptimizer and aren't explicitly passed in the frame state. int stack_parameter_count = - descriptor.GetParameterCount() - DeoptimizerParameterCountFor(mode); - // Reserving space in the vector, except for the case where - // stack_parameter_count is -1. - actual_parameters.reserve(stack_parameter_count >= 0 - ? stack_parameter_count + - descriptor.GetRegisterParameterCount() - : 0); + descriptor.GetStackParameterCount() - DeoptimizerParameterCountFor(mode); + + // Ensure the parameters added by the deoptimizer are passed on the stack. + // This check prevents using TFS builtins as continuations while doing the + // lazy deopt. Use TFC or TFJ builtin as a lazy deopt continuation which + // would pass the result parameter on the stack. + DCHECK_GE(stack_parameter_count, 0); + + // Reserving space in the vector. + actual_parameters.reserve(stack_parameter_count + + descriptor.GetRegisterParameterCount()); for (int i = 0; i < stack_parameter_count; ++i) { actual_parameters.push_back( parameters[descriptor.GetRegisterParameterCount() + i]); diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h index 2345f1d360539f..6af63030f83ad9 100644 --- a/deps/v8/src/compiler/functional-list.h +++ b/deps/v8/src/compiler/functional-list.h @@ -90,6 +90,8 @@ class FunctionalList { size_t Size() const { return elements_ ? elements_->size : 0; } + void Clear() { elements_ = nullptr; } + class iterator { public: explicit iterator(Cons* cur) : current_(cur) {} diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index b4ad81ecda0a1f..5c167db9805511 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -99,6 +99,10 @@ Node* GraphAssembler::IntPtrEqual(Node* left, Node* right) { } Node* GraphAssembler::TaggedEqual(Node* left, Node* right) { + if (COMPRESS_POINTERS_BOOL) { + return Word32Equal(ChangeTaggedToCompressed(left), + ChangeTaggedToCompressed(right)); + } return WordEqual(left, right); } @@ -232,10 +236,10 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) { current_effect_, current_control_); } -Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) { +Node* GraphAssembler::BitcastTaggedToWordForTagAndSmiBits(Node* value) { return current_effect_ = - graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value, - current_effect_, current_control_); + graph()->NewNode(machine()->BitcastTaggedToWordForTagAndSmiBits(), + value, current_effect_, current_control_); } Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) { diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index 0088f867c54f72..d2df5a75f3a168 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -233,7 +233,7 @@ class GraphAssembler { Node* ToNumber(Node* value); Node* BitcastWordToTagged(Node* value); Node* BitcastTaggedToWord(Node* value); - Node* BitcastTaggedSignedToWord(Node* value); + Node* BitcastTaggedToWordForTagAndSmiBits(Node* value); Node* Allocate(AllocationType allocation, Node* size); Node* LoadField(FieldAccess const&, Node* object); Node* LoadElement(ElementAccess const&, Node* object, Node* index); diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc index 85123261dbda78..dddba7d36f62ff 100644 --- a/deps/v8/src/compiler/graph-visualizer.cc +++ b/deps/v8/src/compiler/graph-visualizer.cc @@ -163,7 +163,6 @@ void JsonPrintInlinedFunctionInfo( void JsonPrintAllSourceWithPositions(std::ostream& os, OptimizedCompilationInfo* info, Isolate* isolate) { - AllowDeferredHandleDereference allow_deference_for_print_code; os << "\"sources\" : {"; Handle