From caf46dca5b272459823c353d136510f29447cb53 Mon Sep 17 00:00:00 2001 From: Xiaoping Liu Date: Thu, 13 Apr 2023 11:31:39 +0800 Subject: [PATCH 1/3] update to nodejs-14.21.3-1.module+el8.7.0+18531+81d21ca6 Signed-off-by: Xiaoping Liu --- 0001-add-LoongArch-support.patch | 52473 ---------------- ...semantics-Don-t-use-regex-to-trim-wh.patch | 49 + ...ignore-__proto__-keys-CVE-2022-24999.patch | 98 - ...tr-len-check-in-config_sortlist-to-a.patch | 52 + download | 2 +- nodejs.spec | 43 +- 6 files changed, 123 insertions(+), 52594 deletions(-) delete mode 100644 0001-add-LoongArch-support.patch create mode 100644 0003-deps-http-cache-semantics-Don-t-use-regex-to-trim-wh.patch delete mode 100644 0003-deps-qs-parse-ignore-__proto__-keys-CVE-2022-24999.patch create mode 100644 0004-deps-cares-Add-str-len-check-in-config_sortlist-to-a.patch diff --git a/0001-add-LoongArch-support.patch b/0001-add-LoongArch-support.patch deleted file mode 100644 index df51ac8..0000000 --- a/0001-add-LoongArch-support.patch +++ /dev/null @@ -1,52473 +0,0 @@ -From 998f865384dc7d927f79ac8c68dd7fe12d5e8c5a Mon Sep 17 00:00:00 2001 -From: Shi Pujin -Date: Wed, 26 Oct 2022 15:07:55 +0800 -Subject: [PATCH] add LoongArch support - - -diff --git a/configure.py b/configure.py -index 892e1d42..34ca13c6 100755 ---- a/configure.py -+++ b/configure.py -@@ -56,7 +56,7 @@ parser = optparse.OptionParser() - valid_os = ('win', 'mac', 'solaris', 'freebsd', 'openbsd', 'linux', - 'android', 'aix', 'cloudabi') - valid_arch = ('arm', 'arm64', 'ia32', 'mips', 'mipsel', 'mips64el', 'ppc', -- 'ppc64', 'x32','x64', 'x86', 'x86_64', 's390x') -+ 'ppc64', 'x32','x64', 'x86', 'x86_64', 's390x', 'loong64') - valid_arm_float_abi = ('soft', 'softfp', 'hard') - valid_arm_fpu = ('vfp', 'vfpv3', 'vfpv3-d16', 'neon') - valid_mips_arch = ('loongson', 'r1', 'r2', 'r6', 'rx') -@@ -987,6 +987,7 @@ def host_arch_cc(): - '__PPC__' : 'ppc64', - '__x86_64__' : 'x64', - '__s390x__' : 's390x', -+ '__loongarch64' : 'loong64', - } - - rtn = 'ia32' # default -@@ -1013,6 +1014,7 @@ def host_arch_win(): - 'x86' : 'ia32', - 'arm' : 'arm', - 'mips' : 'mips', -+ 'loongarch' : 'loong64', - } - - return matchup.get(arch, 'ia32') -@@ -1061,6 +1063,14 @@ def clang_version_ge(version_checked): - return True - return False - -+def configure_loong64(o): -+ can_use_fpu_instructions = 'true' -+ o['variables']['v8_can_use_fpu_instructions'] = b(can_use_fpu_instructions) -+ o['variables']['loong64_fpu_mode'] = 'hard' -+ host_byteorder = 'little' -+ o['variables']['v8_host_byteorder'] = host_byteorder -+ -+ - def gcc_version_ge(version_checked): - for compiler in [(CC, 'c'), (CXX, 'c++')]: - ok, is_clang, clang_version, gcc_version = \ -@@ -1122,6 +1132,8 @@ def configure_node(o): - configure_arm(o) - elif target_arch in ('mips', 'mipsel', 'mips64el'): - configure_mips(o, target_arch) -+ elif target_arch == 'loong64': -+ configure_loong64(o) - - if flavor == 'aix': - o['variables']['node_target_type'] = 'static_library' -diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn -index a8100ad6..af723df4 100644 ---- a/deps/v8/BUILD.gn -+++ b/deps/v8/BUILD.gn -@@ -692,6 +692,16 @@ config("toolchain") { - cflags += [ "-march=z196" ] - } - } -+ -+ # loong64 simulators. -+ if (target_is_simulator && v8_current_cpu == "loong64") { -+ defines += [ "_LOONG64_TARGET_SIMULATOR" ] -+ } -+ -+ if (v8_current_cpu == "loong64") { -+ defines += [ "V8_TARGET_ARCH_LOONG64" ] -+ } -+ - if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { - if (v8_current_cpu == "ppc") { - defines += [ "V8_TARGET_ARCH_PPC" ] -@@ -1715,6 +1725,11 @@ v8_source_set("v8_initializers") { - ### gcmole(arch:mips64el) ### - "src/builtins/mips64/builtins-mips64.cc", - ] -+ } else if (v8_current_cpu == "loong64") { -+ sources += [ -+ ### gcmole(arch:loong64) ### -+ "src/builtins/loong64/builtins-loong64.cc", -+ ] - } else if (v8_current_cpu == "ppc") { - sources += [ - ### gcmole(arch:ppc) ### -@@ -3413,6 +3428,33 @@ v8_source_set("v8_base_without_compiler") { - "src/regexp/mips64/regexp-macro-assembler-mips64.h", - "src/wasm/baseline/mips64/liftoff-assembler-mips64.h", - ] -+ } else if (v8_current_cpu == "loong64") { -+ sources += [ ### gcmole(arch:loong64) ### -+ "src/codegen/loong64/assembler-loong64-inl.h", -+ "src/codegen/loong64/assembler-loong64.cc", -+ "src/codegen/loong64/assembler-loong64.h", -+ "src/codegen/loong64/constants-loong64.cc", -+ "src/codegen/loong64/constants-loong64.h", -+ "src/codegen/loong64/cpu-loong64.cc", -+ "src/codegen/loong64/interface-descriptors-loong64.cc", -+ "src/codegen/loong64/macro-assembler-loong64.cc", -+ "src/codegen/loong64/macro-assembler-loong64.h", -+ "src/codegen/loong64/register-loong64.h", -+ "src/compiler/backend/loong64/code-generator-loong64.cc", -+ "src/compiler/backend/loong64/instruction-codes-loong64.h", -+ "src/compiler/backend/loong64/instruction-scheduler-loong64.cc", -+ "src/compiler/backend/loong64/instruction-selector-loong64.cc", -+ "src/debug/loong64/debug-loong64.cc", -+ "src/deoptimizer/loong64/deoptimizer-loong64.cc", -+ "src/diagnostics/loong64/disasm-loong64.cc", -+ "src/execution/loong64/frame-constants-loong64.cc", -+ "src/execution/loong64/frame-constants-loong64.h", -+ "src/execution/loong64/simulator-loong64.cc", -+ "src/execution/loong64/simulator-loong64.h", -+ "src/regexp/loong64/regexp-macro-assembler-loong64.cc", -+ "src/regexp/loong64/regexp-macro-assembler-loong64.h", -+ "src/wasm/baseline/loong64/liftoff-assembler-loong64.h", -+ ] - } else if (v8_current_cpu == "ppc") { - sources += [ ### gcmole(arch:ppc) ### - "src/codegen/ppc/assembler-ppc-inl.h", -@@ -4124,6 +4166,8 @@ v8_source_set("cppgc_base") { - sources += [ "src/heap/cppgc/asm/mips/push_registers_asm.cc" ] - } else if (target_cpu == "mips64el") { - sources += [ "src/heap/cppgc/asm/mips64/push_registers_asm.cc" ] -+ } else if (target_cpu == "loong64") { -+ sources += [ "src/heap/cppgc/asm/loong64/push_registers_asm.cc" ] - } - } else if (is_win) { - if (target_cpu == "x64") { -diff --git a/deps/v8/BUILD.gn.orig b/deps/v8/BUILD.gn.orig -new file mode 100644 -index 00000000..a8100ad6 ---- /dev/null -+++ b/deps/v8/BUILD.gn.orig -@@ -0,0 +1,5075 @@ -+# Copyright 2014 The Chromium Authors. All rights reserved. -+# Use of this source code is governed by a BSD-style license that can be -+# found in the LICENSE file. -+ -+import("//build/config/android/config.gni") -+import("//build/config/arm.gni") -+import("//build/config/dcheck_always_on.gni") -+import("//build/config/host_byteorder.gni") -+import("//build/config/mips.gni") -+import("//build/config/sanitizers/sanitizers.gni") -+import("//build_overrides/build.gni") -+ -+if (is_android) { -+ import("//build/config/android/rules.gni") -+} -+ -+import("gni/snapshot_toolchain.gni") -+import("gni/v8.gni") -+ -+# Specifies if the target build is a simulator build. Comparing target cpu -+# with v8 target cpu to not affect simulator builds for making cross-compile -+# snapshots. -+target_is_simulator = (target_cpu != v8_target_cpu && !v8_multi_arch_build) || -+ (current_cpu != v8_current_cpu && v8_multi_arch_build) -+ -+# For faster Windows builds. See https://crbug.com/v8/8475. -+emit_builtins_as_inline_asm = is_win && is_clang -+ -+declare_args() { -+ # Print to stdout on Android. -+ v8_android_log_stdout = false -+ -+ # Dynamically set an additional dependency from v8/custom_deps. -+ v8_custom_deps = "" -+ -+ # Turns on all V8 debug features. Enables running V8 in a pseudo debug mode -+ # within a release Chrome. -+ v8_enable_debugging_features = is_debug -+ -+ # Sets -DV8_ENABLE_FUTURE. -+ v8_enable_future = false -+ -+ # Lite mode disables a number of performance optimizations to reduce memory -+ # at the cost of performance. -+ # Sets --DV8_LITE_MODE. -+ v8_enable_lite_mode = false -+ -+ # Sets -DVERIFY_HEAP. -+ v8_enable_verify_heap = "" -+ -+ # Sets -DVERIFY_PREDICTABLE -+ v8_enable_verify_predictable = false -+ -+ # Enable compiler warnings when using V8_DEPRECATED apis. -+ v8_deprecation_warnings = true -+ -+ # Enable compiler warnings when using V8_DEPRECATE_SOON apis. -+ v8_imminent_deprecation_warnings = true -+ -+ # Embeds the given script into the snapshot. -+ v8_embed_script = "" -+ -+ # Allows the embedder to add a custom suffix to the version string. -+ v8_embedder_string = "" -+ -+ # Sets -dENABLE_DISASSEMBLER. -+ v8_enable_disassembler = "" -+ -+ # Sets the number of internal fields on promise objects. -+ v8_promise_internal_field_count = 0 -+ -+ # Sets -dENABLE_GDB_JIT_INTERFACE. -+ v8_enable_gdbjit = "" -+ -+ # Sets -dENABLE_VTUNE_JIT_INTERFACE. -+ v8_enable_vtunejit = false -+ -+ # Sets -dENABLE_VTUNE_TRACEMARK. -+ v8_enable_vtunetracemark = false -+ -+ # Sets -dENABLE_HANDLE_ZAPPING. -+ v8_enable_handle_zapping = is_debug -+ -+ # Enable slow dchecks. -+ v8_enable_slow_dchecks = false -+ -+ # Enable fast mksnapshot runs. -+ v8_enable_fast_mksnapshot = false -+ -+ # Optimize code for Torque executable, even during a debug build. -+ v8_enable_fast_torque = "" -+ -+ # Enable the registration of unwinding info for Windows x64 and ARM64. -+ v8_win64_unwinding_info = true -+ -+ # Enable code comments for builtins in the snapshot (impacts performance). -+ v8_enable_snapshot_code_comments = false -+ -+ # Enable native counters from the snapshot (impacts performance, sets -+ # -dV8_SNAPSHOT_NATIVE_CODE_COUNTERS). -+ # This option will generate extra code in the snapshot to increment counters, -+ # as per the --native-code-counters flag. -+ v8_enable_snapshot_native_code_counters = "" -+ -+ # Enable code-generation-time checking of types in the CodeStubAssembler. -+ v8_enable_verify_csa = false -+ -+ # Enable pointer compression (sets -dV8_COMPRESS_POINTERS). -+ v8_enable_pointer_compression = "" -+ v8_enable_31bit_smis_on_64bit_arch = false -+ -+ # Sets -dOBJECT_PRINT. -+ v8_enable_object_print = "" -+ -+ # Sets -dV8_TRACE_MAPS. -+ v8_enable_trace_maps = "" -+ -+ # Sets -dV8_ENABLE_CHECKS. -+ v8_enable_v8_checks = "" -+ -+ # Sets -dV8_TRACE_IGNITION. -+ v8_enable_trace_ignition = false -+ -+ # Sets -dV8_TRACE_FEEDBACK_UPDATES. -+ v8_enable_trace_feedback_updates = false -+ -+ # Sets -dV8_CONCURRENT_MARKING -+ v8_enable_concurrent_marking = true -+ -+ # Sets -dV8_ARRAY_BUFFER_EXTENSION -+ v8_enable_array_buffer_extension = true -+ -+ # Enables various testing features. -+ v8_enable_test_features = "" -+ -+ # With post mortem support enabled, metadata is embedded into libv8 that -+ # describes various parameters of the VM for use by debuggers. See -+ # tools/gen-postmortem-metadata.py for details. -+ v8_postmortem_support = false -+ -+ # Use Siphash as added protection against hash flooding attacks. -+ v8_use_siphash = false -+ -+ # Switches off inlining in V8. -+ v8_no_inline = false -+ -+ # Override OS page size when generating snapshot -+ v8_os_page_size = "0" -+ -+ # Similar to vfp but on MIPS. -+ v8_can_use_fpu_instructions = true -+ -+ # Similar to the ARM hard float ABI but on MIPS. -+ v8_use_mips_abi_hardfloat = true -+ -+ # Controls the threshold for on-heap/off-heap Typed Arrays. -+ v8_typed_array_max_size_in_heap = 64 -+ -+ v8_enable_gdbjit = -+ ((v8_current_cpu == "x86" || v8_current_cpu == "x64") && -+ (is_linux || is_mac)) || (v8_current_cpu == "ppc64" && is_linux) -+ -+ # Temporary flag to allow embedders to update their microtasks scopes -+ # while rolling in a new version of V8. -+ v8_check_microtasks_scopes_consistency = "" -+ -+ # Enable mitigations for executing untrusted code. -+ # Disabled by default on ia32 due to conflicting requirements with embedded -+ # builtins. Enabled by default on Android since it doesn't support -+ # site-isolation in Chrome and on simulator builds which test code generation -+ # on these platforms. -+ v8_untrusted_code_mitigations = -+ v8_current_cpu != "x86" && (is_android || target_is_simulator) -+ -+ # Enable minor mark compact. -+ v8_enable_minor_mc = true -+ -+ # Check that each header can be included in isolation (requires also -+ # setting the "check_v8_header_includes" gclient variable to run a -+ # specific hook). -+ v8_check_header_includes = false -+ -+ # Enable sharing read-only space across isolates. -+ # Sets -DV8_SHARED_RO_HEAP. -+ v8_enable_shared_ro_heap = "" -+ -+ # Enable lazy source positions by default. -+ v8_enable_lazy_source_positions = true -+ -+ # Enable third party HEAP library -+ v8_enable_third_party_heap = false -+ -+ # Libaries used by third party heap -+ v8_third_party_heap_libs = [] -+ -+ # Source code used by third party heap -+ v8_third_party_heap_files = [] -+ -+ # Disable write barriers when GCs are non-incremental and -+ # heap has single generation. -+ v8_disable_write_barriers = false -+ -+ # Redirect allocation in young generation so that there will be -+ # only one single generation. -+ v8_enable_single_generation = "" -+ -+ # Use token threaded dispatch for the regular expression interpreter. -+ # Use switch-based dispatch if this is false -+ v8_enable_regexp_interpreter_threaded_dispatch = true -+ -+ # Enable additional targets necessary for verification of torque -+ # file generation -+ v8_verify_torque_generation_invariance = false -+ -+ # Disable all snapshot compression. -+ v8_enable_snapshot_compression = true -+ -+ # Enable control-flow integrity features, such as pointer authentication for -+ # ARM64. -+ v8_control_flow_integrity = false -+ -+ # Enable object names in cppgc for debug purposes. -+ cppgc_enable_object_names = false -+ -+ # Enable V8 heap sandbox experimental feature. -+ # Sets -DV8_HEAP_SANDBOX. -+ v8_enable_heap_sandbox = "" -+ -+ # Experimental support for native context independent code. -+ # https://crbug.com/v8/8888 -+ v8_enable_nci_code = false -+} -+ -+# Derived defaults. -+if (v8_enable_verify_heap == "") { -+ v8_enable_verify_heap = v8_enable_debugging_features -+} -+if (v8_enable_object_print == "") { -+ v8_enable_object_print = v8_enable_debugging_features -+} -+if (v8_enable_disassembler == "") { -+ v8_enable_disassembler = v8_enable_debugging_features -+} -+if (v8_enable_trace_maps == "") { -+ v8_enable_trace_maps = v8_enable_debugging_features -+} -+if (v8_enable_test_features == "") { -+ v8_enable_test_features = v8_enable_debugging_features || dcheck_always_on -+} -+if (v8_enable_v8_checks == "") { -+ v8_enable_v8_checks = v8_enable_debugging_features -+} -+if (v8_check_microtasks_scopes_consistency == "") { -+ v8_check_microtasks_scopes_consistency = -+ v8_enable_debugging_features || dcheck_always_on -+} -+if (v8_enable_snapshot_native_code_counters == "") { -+ v8_enable_snapshot_native_code_counters = v8_enable_debugging_features -+} -+if (v8_enable_pointer_compression == "") { -+ # TODO(v8:v7703): temporarily enable pointer compression on arm64 and on x64 -+ v8_enable_pointer_compression = -+ v8_current_cpu == "arm64" || v8_current_cpu == "x64" -+} -+if (v8_enable_fast_torque == "") { -+ v8_enable_fast_torque = v8_enable_fast_mksnapshot -+} -+if (v8_enable_heap_sandbox == "") { -+ v8_enable_heap_sandbox = false -+} -+if (v8_enable_single_generation == "") { -+ v8_enable_single_generation = v8_disable_write_barriers -+} -+ -+# Toggle pointer compression for correctness fuzzing when building the -+# clang_x64_pointer_compression toolchain. We'll correctness-compare the -+# default build with the clang_x64_pointer_compression build. -+if (v8_multi_arch_build && -+ rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) == -+ "clang_x64_pointer_compression") { -+ v8_enable_pointer_compression = !v8_enable_pointer_compression -+} -+if (v8_enable_shared_ro_heap == "") { -+ v8_enable_shared_ro_heap = !v8_enable_pointer_compression -+} -+ -+assert(!v8_disable_write_barriers || v8_enable_single_generation, -+ "Disabling write barriers works only with single generation") -+ -+assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, -+ "Untrusted code mitigations are unsupported on ia32") -+ -+assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity, -+ "Control-flow integrity is only supported on arm64") -+ -+assert( -+ !v8_enable_pointer_compression || !v8_enable_shared_ro_heap, -+ "Pointer compression is not supported with shared read-only heap enabled") -+ -+assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression, -+ "V8 Heap Sandbox requires pointer compression") -+ -+v8_random_seed = "314159265" -+v8_toolset_for_shell = "host" -+ -+############################################################################### -+# Configurations -+# -+ -+config("internal_config_base") { -+ # Only targets in this file and its subdirs can depend on this. -+ visibility = [ "./*" ] -+ -+ configs = [ ":v8_tracing_config" ] -+ -+ include_dirs = [ -+ ".", -+ "include", -+ "$target_gen_dir", -+ ] -+} -+ -+config("internal_config") { -+ defines = [] -+ # Only targets in this file and its subdirs can depend on this. -+ visibility = [ "./*" ] -+ -+ configs = [ -+ "//build/config/compiler:wexit_time_destructors", -+ ":internal_config_base", -+ ":v8_header_features", -+ ] -+ -+ if (is_component_build) { -+ defines += [ "BUILDING_V8_SHARED" ] -+ } -+} -+ -+# Should be applied to all targets that write trace events. -+config("v8_tracing_config") { -+ if (v8_use_perfetto) { -+ include_dirs = [ -+ "third_party/perfetto/include", -+ "$root_gen_dir/third_party/perfetto", -+ "$root_gen_dir/third_party/perfetto/build_config", -+ ] -+ } -+} -+ -+# This config should be applied to code using the libplatform. -+config("libplatform_config") { -+ include_dirs = [ "include" ] -+ if (is_component_build) { -+ defines = [ "USING_V8_PLATFORM_SHARED" ] -+ } -+} -+ -+# This config should be applied to code using the libbase. -+config("libbase_config") { -+ if (is_component_build) { -+ defines = [ "USING_V8_BASE_SHARED" ] -+ } -+ libs = [] -+ if (is_android && current_toolchain != host_toolchain) { -+ libs += [ "log" ] -+ } -+} -+ -+# This config should be applied to code using the cppgc_base. -+config("cppgc_base_config") { -+ defines = [] -+ if (cppgc_enable_object_names) { -+ defines += [ "CPPGC_SUPPORTS_OBJECT_NAMES" ] -+ } -+} -+ -+# This config should be applied to code using the libsampler. -+config("libsampler_config") { -+ include_dirs = [ "include" ] -+} -+ -+# This config should only be applied to code using V8 and not any V8 code -+# itself. -+config("external_config") { -+ defines = [] -+ configs = [ ":v8_header_features" ] -+ if (is_component_build) { -+ defines += [ "USING_V8_SHARED" ] -+ } -+ include_dirs = [ -+ "include", -+ "$target_gen_dir/include", -+ ] -+} -+ -+# This config should only be applied to code that needs to be explicitly -+# aware of whether we are using startup data or not. -+config("external_startup_data") { -+ if (v8_use_external_startup_data) { -+ defines = [ "V8_USE_EXTERNAL_STARTUP_DATA" ] -+ } -+} -+ -+# Put defines that are used in public headers here; public headers are -+# defined in "v8_headers" and are included by embedders of V8. -+config("v8_header_features") { -+ visibility = [ ":*" ] -+ -+ defines = [] -+ -+ if (v8_enable_v8_checks) { -+ defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h". -+ } -+ if (v8_enable_pointer_compression) { -+ defines += [ "V8_COMPRESS_POINTERS" ] -+ } -+ if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) { -+ defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ] -+ } -+ if (v8_enable_heap_sandbox) { -+ defines += [ "V8_HEAP_SANDBOX" ] -+ } -+ if (v8_deprecation_warnings) { -+ defines += [ "V8_DEPRECATION_WARNINGS" ] -+ } -+ if (v8_imminent_deprecation_warnings) { -+ defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ] -+ } -+} -+ -+# Put defines here that are only used in our internal files and NEVER in -+# external headers that embedders (such as chromium and node) might include. -+config("features") { -+ # Only targets in this file and its subdirs can depend on this. -+ visibility = [ "./*" ] -+ -+ defines = [] -+ -+ configs = [ ":v8_header_features" ] -+ -+ if (v8_embedder_string != "") { -+ defines += [ "V8_EMBEDDER_STRING=\"$v8_embedder_string\"" ] -+ } -+ if (v8_enable_disassembler) { -+ defines += [ "ENABLE_DISASSEMBLER" ] -+ } -+ if (v8_promise_internal_field_count != 0) { -+ defines += -+ [ "V8_PROMISE_INTERNAL_FIELD_COUNT=${v8_promise_internal_field_count}" ] -+ } -+ defines += -+ [ "V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=${v8_typed_array_max_size_in_heap}" ] -+ -+ assert( -+ !v8_enable_raw_heap_snapshots, -+ "This flag is deprecated and is now available through the inspector interface as an argument to profiler's method `takeHeapSnapshot`. Consider using blink's flag `enable_additional_blink_object_names` to get better naming of internal objects.") -+ -+ if (v8_enable_future) { -+ defines += [ "V8_ENABLE_FUTURE" ] -+ } -+ if (v8_enable_lite_mode) { -+ defines += [ "V8_LITE_MODE" ] -+ } -+ if (v8_enable_gdbjit) { -+ defines += [ "ENABLE_GDB_JIT_INTERFACE" ] -+ } -+ if (v8_enable_vtunejit) { -+ defines += [ "ENABLE_VTUNE_JIT_INTERFACE" ] -+ } -+ if (v8_enable_vtunetracemark) { -+ defines += [ "ENABLE_VTUNE_TRACEMARK" ] -+ } -+ if (v8_enable_minor_mc) { -+ defines += [ "ENABLE_MINOR_MC" ] -+ } -+ if (v8_enable_object_print) { -+ defines += [ "OBJECT_PRINT" ] -+ } -+ if (v8_enable_verify_heap) { -+ defines += [ "VERIFY_HEAP" ] -+ } -+ if (v8_enable_verify_predictable) { -+ defines += [ "VERIFY_PREDICTABLE" ] -+ } -+ if (v8_enable_trace_maps) { -+ defines += [ "V8_TRACE_MAPS" ] -+ } -+ if (v8_enable_trace_ignition) { -+ defines += [ "V8_TRACE_IGNITION" ] -+ } -+ if (v8_enable_trace_feedback_updates) { -+ defines += [ "V8_TRACE_FEEDBACK_UPDATES" ] -+ } -+ if (v8_enable_test_features) { -+ defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ] -+ defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ] -+ defines += [ "V8_ENABLE_DOUBLE_CONST_STORE_CHECK" ] -+ } -+ if (v8_enable_i18n_support) { -+ defines += [ "V8_INTL_SUPPORT" ] -+ } -+ if (v8_enable_handle_zapping) { -+ defines += [ "ENABLE_HANDLE_ZAPPING" ] -+ } -+ if (v8_enable_snapshot_native_code_counters) { -+ defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ] -+ } -+ if (v8_enable_single_generation) { -+ defines += [ "V8_ENABLE_SINGLE_GENERATION" ] -+ } -+ if (v8_disable_write_barriers) { -+ defines += [ "V8_DISABLE_WRITE_BARRIERS" ] -+ } -+ if (v8_enable_third_party_heap) { -+ defines += [ "V8_ENABLE_THIRD_PARTY_HEAP" ] -+ } -+ if (v8_use_external_startup_data) { -+ defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] -+ } -+ if (v8_enable_concurrent_marking) { -+ defines += [ "V8_CONCURRENT_MARKING" ] -+ } -+ if (v8_enable_array_buffer_extension) { -+ defines += [ "V8_ARRAY_BUFFER_EXTENSION" ] -+ } -+ if (v8_enable_lazy_source_positions) { -+ defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ] -+ } -+ if (v8_check_microtasks_scopes_consistency) { -+ defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ] -+ } -+ if (v8_use_multi_snapshots) { -+ defines += [ "V8_MULTI_SNAPSHOTS" ] -+ } -+ if (v8_use_siphash) { -+ defines += [ "V8_USE_SIPHASH" ] -+ } -+ if (v8_enable_shared_ro_heap) { -+ defines += [ "V8_SHARED_RO_HEAP" ] -+ } -+ if (v8_use_perfetto) { -+ defines += [ "V8_USE_PERFETTO" ] -+ } -+ if (v8_win64_unwinding_info) { -+ defines += [ "V8_WIN64_UNWINDING_INFO" ] -+ } -+ if (v8_enable_regexp_interpreter_threaded_dispatch) { -+ defines += [ "V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH" ] -+ } -+ if (v8_enable_snapshot_compression) { -+ defines += [ "V8_SNAPSHOT_COMPRESSION" ] -+ } -+ if (v8_control_flow_integrity) { -+ defines += [ "V8_ENABLE_CONTROL_FLOW_INTEGRITY" ] -+ } -+ if (v8_enable_wasm_gdb_remote_debugging) { -+ defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ] -+ } -+ if (v8_enable_nci_code) { -+ defines += [ "V8_ENABLE_NCI_CODE" ] -+ } -+} -+ -+config("toolchain") { -+ # Only targets in this file and its subdirs can depend on this. -+ visibility = [ "./*" ] -+ -+ defines = [] -+ cflags = [] -+ ldflags = [] -+ -+ if (v8_current_cpu == "arm") { -+ defines += [ "V8_TARGET_ARCH_ARM" ] -+ if (arm_version >= 7) { -+ defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ] -+ } -+ if (arm_fpu == "vfpv3-d16") { -+ defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ] -+ } else if (arm_fpu == "vfpv3") { -+ defines += [ -+ "CAN_USE_VFP3_INSTRUCTIONS", -+ "CAN_USE_VFP32DREGS", -+ ] -+ } else if (arm_fpu == "neon") { -+ defines += [ -+ "CAN_USE_VFP3_INSTRUCTIONS", -+ "CAN_USE_VFP32DREGS", -+ "CAN_USE_NEON", -+ ] -+ } -+ -+ # TODO(jochen): Add support for arm_test_noprobe. -+ -+ if (current_cpu != "arm") { -+ # These defines ares used for the ARM simulator. -+ if (arm_float_abi == "hard") { -+ defines += [ "USE_EABI_HARDFLOAT=1" ] -+ } else if (arm_float_abi == "softfp") { -+ defines += [ "USE_EABI_HARDFLOAT=0" ] -+ } -+ } -+ } -+ if (v8_current_cpu == "arm64") { -+ defines += [ "V8_TARGET_ARCH_ARM64" ] -+ if (v8_control_flow_integrity) { -+ # TODO(v8:10026): Enable this in src/build. -+ if (current_cpu == "arm64") { -+ cflags += [ "-mbranch-protection=standard" ] -+ } -+ } -+ } -+ -+ # Mips64el/mipsel simulators. -+ if (target_is_simulator && -+ (v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el")) { -+ defines += [ "_MIPS_TARGET_SIMULATOR" ] -+ } -+ -+ if (v8_current_cpu == "mipsel" || v8_current_cpu == "mips") { -+ defines += [ "V8_TARGET_ARCH_MIPS" ] -+ if (v8_can_use_fpu_instructions) { -+ defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] -+ } -+ if (v8_use_mips_abi_hardfloat) { -+ defines += [ -+ "__mips_hard_float=1", -+ "CAN_USE_FPU_INSTRUCTIONS", -+ ] -+ } else { -+ defines += [ "__mips_soft_float=1" ] -+ } -+ if (mips_arch_variant == "r6") { -+ defines += [ -+ "_MIPS_ARCH_MIPS32R6", -+ "FPU_MODE_FP64", -+ ] -+ if (mips_use_msa) { -+ defines += [ "_MIPS_MSA" ] -+ } -+ } else if (mips_arch_variant == "r2") { -+ defines += [ "_MIPS_ARCH_MIPS32R2" ] -+ if (mips_fpu_mode == "fp64") { -+ defines += [ "FPU_MODE_FP64" ] -+ } else if (mips_fpu_mode == "fpxx") { -+ defines += [ "FPU_MODE_FPXX" ] -+ } else if (mips_fpu_mode == "fp32") { -+ defines += [ "FPU_MODE_FP32" ] -+ } -+ } else if (mips_arch_variant == "r1") { -+ defines += [ "FPU_MODE_FP32" ] -+ } -+ -+ # TODO(jochen): Add support for mips_arch_variant rx and loongson. -+ } -+ -+ if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") { -+ defines += [ "V8_TARGET_ARCH_MIPS64" ] -+ if (v8_can_use_fpu_instructions) { -+ defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] -+ } -+ if (mips_use_msa) { -+ defines += [ "_MIPS_MSA" ] -+ } -+ if (host_byteorder == "little") { -+ defines += [ "V8_TARGET_ARCH_MIPS64_LE" ] -+ } else if (host_byteorder == "big") { -+ defines += [ "V8_TARGET_ARCH_MIPS64_BE" ] -+ } -+ if (v8_use_mips_abi_hardfloat) { -+ defines += [ -+ "__mips_hard_float=1", -+ "CAN_USE_FPU_INSTRUCTIONS", -+ ] -+ } else { -+ defines += [ "__mips_soft_float=1" ] -+ } -+ if (mips_arch_variant == "r6") { -+ defines += [ "_MIPS_ARCH_MIPS64R6" ] -+ } else if (mips_arch_variant == "r2") { -+ defines += [ "_MIPS_ARCH_MIPS64R2" ] -+ } -+ } -+ if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { -+ defines += [ "V8_TARGET_ARCH_S390" ] -+ cflags += [ "-ffp-contract=off" ] -+ if (v8_current_cpu == "s390x") { -+ defines += [ "V8_TARGET_ARCH_S390X" ] -+ } -+ if (host_byteorder == "little") { -+ defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ] -+ } else { -+ cflags += [ "-march=z196" ] -+ } -+ } -+ if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { -+ if (v8_current_cpu == "ppc") { -+ defines += [ "V8_TARGET_ARCH_PPC" ] -+ } else if (v8_current_cpu == "ppc64") { -+ defines += [ "V8_TARGET_ARCH_PPC64" ] -+ } -+ if (host_byteorder == "little") { -+ defines += [ "V8_TARGET_ARCH_PPC_LE" ] -+ } else if (host_byteorder == "big") { -+ defines += [ "V8_TARGET_ARCH_PPC_BE" ] -+ if (current_os == "aix") { -+ cflags += [ -+ # Work around AIX ceil, trunc and round oddities. -+ "-mcpu=power5+", -+ "-mfprnd", -+ -+ # Work around AIX assembler popcntb bug. -+ "-mno-popcntb", -+ ] -+ } -+ } -+ } -+ -+ if (v8_current_cpu == "x86") { -+ defines += [ "V8_TARGET_ARCH_IA32" ] -+ if (is_win) { -+ # Ensure no surprising artifacts from 80bit double math with x86. -+ cflags += [ "/arch:SSE2" ] -+ } -+ } -+ if (v8_current_cpu == "x64") { -+ defines += [ "V8_TARGET_ARCH_X64" ] -+ if (is_win) { -+ # Increase the initial stack size. The default is 1MB, this is 2MB. This -+ # applies only to executables and shared libraries produced by V8 since -+ # ldflags are not pushed to dependants. -+ ldflags += [ "/STACK:2097152" ] -+ } -+ } -+ if (is_android && v8_android_log_stdout) { -+ defines += [ "V8_ANDROID_LOG_STDOUT" ] -+ } -+ -+ # V8_TARGET_OS_ defines. The target OS may differ from host OS e.g. in -+ # mksnapshot. We additionally set V8_HAVE_TARGET_OS to determine that a -+ # target OS has in fact been set; otherwise we internally assume that target -+ # OS == host OS (see v8config.h). -+ if (target_os == "android") { -+ defines += [ "V8_HAVE_TARGET_OS" ] -+ defines += [ "V8_TARGET_OS_ANDROID" ] -+ } else if (target_os == "fuchsia") { -+ defines += [ "V8_HAVE_TARGET_OS" ] -+ defines += [ "V8_TARGET_OS_FUCHSIA" ] -+ } else if (target_os == "ios") { -+ defines += [ "V8_HAVE_TARGET_OS" ] -+ defines += [ "V8_TARGET_OS_IOS" ] -+ } else if (target_os == "linux") { -+ defines += [ "V8_HAVE_TARGET_OS" ] -+ defines += [ "V8_TARGET_OS_LINUX" ] -+ } else if (target_os == "mac") { -+ defines += [ "V8_HAVE_TARGET_OS" ] -+ defines += [ "V8_TARGET_OS_MACOSX" ] -+ } else if (target_os == "win") { -+ defines += [ "V8_HAVE_TARGET_OS" ] -+ defines += [ "V8_TARGET_OS_WIN" ] -+ } -+ -+ # TODO(jochen): Support v8_enable_prof on Windows. -+ # TODO(jochen): Add support for compiling with simulators. -+ -+ if (v8_enable_debugging_features) { -+ if (is_linux && v8_enable_backtrace) { -+ ldflags += [ "-rdynamic" ] -+ } -+ -+ defines += [ "DEBUG" ] -+ if (v8_enable_slow_dchecks) { -+ defines += [ "ENABLE_SLOW_DCHECKS" ] -+ } -+ } else if (dcheck_always_on) { -+ defines += [ "DEBUG" ] -+ } -+ -+ if (v8_enable_verify_csa) { -+ defines += [ "ENABLE_VERIFY_CSA" ] -+ } -+ -+ if (!v8_untrusted_code_mitigations) { -+ defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ] -+ } -+ -+ if (v8_no_inline) { -+ if (is_win) { -+ cflags += [ "/Ob0" ] -+ } else { -+ cflags += [ -+ "-fno-inline-functions", -+ "-fno-inline", -+ ] -+ } -+ } -+ -+ if (is_clang) { -+ cflags += [ "-Wmissing-field-initializers" ] -+ -+ if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") { -+ # We exclude MIPS because the IsMipsArchVariant macro causes trouble. -+ cflags += [ "-Wunreachable-code" ] -+ } -+ -+ if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || -+ v8_current_cpu == "mips64el") { -+ cflags += [ "-Wshorten-64-to-32" ] -+ } -+ } -+ -+ if (is_win) { -+ cflags += [ -+ "/wd4245", # Conversion with signed/unsigned mismatch. -+ "/wd4267", # Conversion with possible loss of data. -+ "/wd4324", # Padding structure due to alignment. -+ "/wd4701", # Potentially uninitialized local variable. -+ "/wd4702", # Unreachable code. -+ "/wd4703", # Potentially uninitialized local pointer variable. -+ "/wd4709", # Comma operator within array index expr (bugged). -+ "/wd4714", # Function marked forceinline not inlined. -+ -+ # MSVC assumes that control can get past an exhaustive switch and then -+ # warns if there's no return there (see https://crbug.com/v8/7658) -+ "/wd4715", # Not all control paths return a value. -+ -+ "/wd4718", # Recursive call has no side-effect. -+ "/wd4723", # https://crbug.com/v8/7771 -+ "/wd4724", # https://crbug.com/v8/7771 -+ "/wd4800", # Forcing value to bool. -+ ] -+ } -+ -+ if (!is_clang && is_win) { -+ cflags += [ "/wd4506" ] # Benign "no definition for inline function" -+ } -+ -+ if (!is_clang && !is_win) { -+ cflags += [ -+ # Disable gcc warnings for optimizations based on the assumption that -+ # signed overflow does not occur. Generates false positives (see -+ # http://crbug.com/v8/6341). -+ "-Wno-strict-overflow", -+ -+ # GCC assumes that control can get past an exhaustive switch and then -+ # warns if there's no return there (see https://crbug.com/v8/7658). -+ "-Wno-return-type", -+ ] -+ } -+ -+ # Chromium uses a hand-picked subset of UBSan coverage. We want everything. -+ if (is_ubsan) { -+ cflags += [ "-fsanitize=undefined" ] -+ } -+} -+ -+# For code that is hot during mksnapshot. In fast-mksnapshot builds, we -+# optimize some files even in debug builds to speed up mksnapshot times. -+config("always_optimize") { -+ configs = [ ":internal_config" ] -+ -+ # TODO(crbug.com/621335) Rework this so that we don't have the confusion -+ # between "optimize_speed" and "optimize_max". -+ if (((is_posix && !is_android) || is_fuchsia) && !using_sanitizer) { -+ configs += [ "//build/config/compiler:optimize_speed" ] -+ } else { -+ configs += [ "//build/config/compiler:optimize_max" ] -+ } -+} -+ -+# Configs for code coverage with gcov. Separate configs for cflags and ldflags -+# to selectively influde cflags in non-test targets only. -+config("v8_gcov_coverage_cflags") { -+ cflags = [ -+ "-fprofile-arcs", -+ "-ftest-coverage", -+ ] -+} -+ -+config("v8_gcov_coverage_ldflags") { -+ ldflags = [ "-fprofile-arcs" ] -+} -+ -+############################################################################### -+# Actions -+# -+ -+# Only for Windows clang builds. Converts the embedded.S file produced by -+# mksnapshot into an embedded.cc file with corresponding inline assembly. -+template("asm_to_inline_asm") { -+ name = target_name -+ if (name == "default") { -+ suffix = "" -+ } else { -+ suffix = "_$name" -+ } -+ -+ action("asm_to_inline_asm_" + name) { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ assert(emit_builtins_as_inline_asm) -+ -+ script = "tools/snapshot/asm_to_inline_asm.py" -+ deps = [ ":run_mksnapshot_" + name ] -+ sources = [ "$target_gen_dir/embedded${suffix}.S" ] -+ outputs = [ "$target_gen_dir/embedded${suffix}.cc" ] -+ args = invoker.args -+ args += [ -+ rebase_path("$target_gen_dir/embedded${suffix}.S", root_build_dir), -+ rebase_path("$target_gen_dir/embedded${suffix}.cc", root_build_dir), -+ ] -+ } -+} -+ -+if (is_android && enable_java_templates) { -+ android_assets("v8_external_startup_data_assets") { -+ if (v8_use_external_startup_data) { -+ # We don't support side-by-side snapshots on Android within Chromium. -+ assert(!v8_use_multi_snapshots) -+ deps = [ "//v8" ] -+ renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ] -+ if (current_cpu == "arm" || current_cpu == "x86" || -+ current_cpu == "mipsel") { -+ renaming_destinations = [ "snapshot_blob_32.bin" ] -+ } else { -+ renaming_destinations = [ "snapshot_blob_64.bin" ] -+ } -+ disable_compression = true -+ } -+ } -+} -+ -+action("postmortem-metadata") { -+ # Only targets in this file and the top-level visibility target can -+ # depend on this. -+ visibility = [ -+ ":*", -+ "//:gn_visibility", -+ ] -+ -+ script = "tools/gen-postmortem-metadata.py" -+ -+ # NOSORT -+ sources = [ -+ "src/objects/objects.h", -+ "src/objects/objects-inl.h", -+ "src/objects/allocation-site-inl.h", -+ "src/objects/allocation-site.h", -+ "src/objects/cell-inl.h", -+ "src/objects/cell.h", -+ "src/objects/code-inl.h", -+ "src/objects/code.h", -+ "src/objects/data-handler.h", -+ "src/objects/data-handler-inl.h", -+ "src/objects/descriptor-array.h", -+ "src/objects/descriptor-array-inl.h", -+ "src/objects/feedback-cell.h", -+ "src/objects/feedback-cell-inl.h", -+ "src/objects/fixed-array-inl.h", -+ "src/objects/fixed-array.h", -+ "src/objects/heap-number-inl.h", -+ "src/objects/heap-number.h", -+ "src/objects/heap-object-inl.h", -+ "src/objects/heap-object.h", -+ "src/objects/instance-type.h", -+ "src/objects/js-array-inl.h", -+ "src/objects/js-array.h", -+ "src/objects/js-array-buffer-inl.h", -+ "src/objects/js-array-buffer.h", -+ "src/objects/js-objects-inl.h", -+ "src/objects/js-objects.h", -+ "src/objects/js-promise-inl.h", -+ "src/objects/js-promise.h", -+ "src/objects/js-regexp-inl.h", -+ "src/objects/js-regexp.cc", -+ "src/objects/js-regexp.h", -+ "src/objects/js-regexp-string-iterator-inl.h", -+ "src/objects/js-regexp-string-iterator.h", -+ "src/objects/map.h", -+ "src/objects/map.cc", -+ "src/objects/map-inl.h", -+ "src/objects/js-objects.cc", -+ "src/objects/name.h", -+ "src/objects/name-inl.h", -+ "src/objects/oddball-inl.h", -+ "src/objects/oddball.h", -+ "src/objects/primitive-heap-object.h", -+ "src/objects/primitive-heap-object-inl.h", -+ "src/objects/scope-info.h", -+ "src/objects/script.h", -+ "src/objects/script-inl.h", -+ "src/objects/shared-function-info.h", -+ "src/objects/shared-function-info-inl.h", -+ "src/objects/string.cc", -+ "src/objects/string.h", -+ "src/objects/string-comparator.cc", -+ "src/objects/string-comparator.h", -+ "src/objects/string-inl.h", -+ "src/objects/struct.h", -+ "src/objects/struct-inl.h", -+ "$target_gen_dir/torque-generated/instance-types-tq.h", -+ ] -+ -+ outputs = [ "$target_gen_dir/debug-support.cc" ] -+ -+ args = rebase_path(outputs, root_build_dir) + -+ rebase_path(sources, root_build_dir) -+ -+ deps = [ ":run_torque" ] -+} -+ -+torque_files = [ -+ "src/builtins/array-copywithin.tq", -+ "src/builtins/array-every.tq", -+ "src/builtins/array-filter.tq", -+ "src/builtins/array-find.tq", -+ "src/builtins/array-findindex.tq", -+ "src/builtins/array-foreach.tq", -+ "src/builtins/array-from.tq", -+ "src/builtins/array-isarray.tq", -+ "src/builtins/array-join.tq", -+ "src/builtins/array-lastindexof.tq", -+ "src/builtins/array-map.tq", -+ "src/builtins/array-of.tq", -+ "src/builtins/array-reduce-right.tq", -+ "src/builtins/array-reduce.tq", -+ "src/builtins/array-reverse.tq", -+ "src/builtins/array-shift.tq", -+ "src/builtins/array-slice.tq", -+ "src/builtins/array-some.tq", -+ "src/builtins/array-splice.tq", -+ "src/builtins/array-unshift.tq", -+ "src/builtins/array.tq", -+ "src/builtins/base.tq", -+ "src/builtins/bigint.tq", -+ "src/builtins/boolean.tq", -+ "src/builtins/builtins-string.tq", -+ "src/builtins/collections.tq", -+ "src/builtins/cast.tq", -+ "src/builtins/convert.tq", -+ "src/builtins/console.tq", -+ "src/builtins/data-view.tq", -+ "src/builtins/finalization-registry.tq", -+ "src/builtins/frames.tq", -+ "src/builtins/frame-arguments.tq", -+ "src/builtins/growable-fixed-array.tq", -+ "src/builtins/ic-callable.tq", -+ "src/builtins/ic.tq", -+ "src/builtins/internal-coverage.tq", -+ "src/builtins/iterator.tq", -+ "src/builtins/math.tq", -+ "src/builtins/number.tq", -+ "src/builtins/object-fromentries.tq", -+ "src/builtins/object.tq", -+ "src/builtins/promise-abstract-operations.tq", -+ "src/builtins/promise-all.tq", -+ "src/builtins/promise-all-element-closure.tq", -+ "src/builtins/promise-any.tq", -+ "src/builtins/promise-constructor.tq", -+ "src/builtins/promise-finally.tq", -+ "src/builtins/promise-misc.tq", -+ "src/builtins/promise-race.tq", -+ "src/builtins/promise-reaction-job.tq", -+ "src/builtins/promise-resolve.tq", -+ "src/builtins/promise-then.tq", -+ "src/builtins/promise-jobs.tq", -+ "src/builtins/proxy-constructor.tq", -+ "src/builtins/proxy-delete-property.tq", -+ "src/builtins/proxy-get-property.tq", -+ "src/builtins/proxy-get-prototype-of.tq", -+ "src/builtins/proxy-has-property.tq", -+ "src/builtins/proxy-is-extensible.tq", -+ "src/builtins/proxy-prevent-extensions.tq", -+ "src/builtins/proxy-revocable.tq", -+ "src/builtins/proxy-revoke.tq", -+ "src/builtins/proxy-set-property.tq", -+ "src/builtins/proxy-set-prototype-of.tq", -+ "src/builtins/proxy.tq", -+ "src/builtins/reflect.tq", -+ "src/builtins/regexp-exec.tq", -+ "src/builtins/regexp-match-all.tq", -+ "src/builtins/regexp-match.tq", -+ "src/builtins/regexp-replace.tq", -+ "src/builtins/regexp-search.tq", -+ "src/builtins/regexp-source.tq", -+ "src/builtins/regexp-split.tq", -+ "src/builtins/regexp-test.tq", -+ "src/builtins/regexp.tq", -+ "src/builtins/string-endswith.tq", -+ "src/builtins/string-html.tq", -+ "src/builtins/string-iterator.tq", -+ "src/builtins/string-pad.tq", -+ "src/builtins/string-repeat.tq", -+ "src/builtins/string-replaceall.tq", -+ "src/builtins/string-slice.tq", -+ "src/builtins/string-startswith.tq", -+ "src/builtins/string-substring.tq", -+ "src/builtins/string-substr.tq", -+ "src/builtins/symbol.tq", -+ "src/builtins/torque-internal.tq", -+ "src/builtins/typed-array-createtypedarray.tq", -+ "src/builtins/typed-array-every.tq", -+ "src/builtins/typed-array-filter.tq", -+ "src/builtins/typed-array-find.tq", -+ "src/builtins/typed-array-findindex.tq", -+ "src/builtins/typed-array-foreach.tq", -+ "src/builtins/typed-array-from.tq", -+ "src/builtins/typed-array-of.tq", -+ "src/builtins/typed-array-reduce.tq", -+ "src/builtins/typed-array-reduceright.tq", -+ "src/builtins/typed-array-set.tq", -+ "src/builtins/typed-array-slice.tq", -+ "src/builtins/typed-array-some.tq", -+ "src/builtins/typed-array-sort.tq", -+ "src/builtins/typed-array-subarray.tq", -+ "src/builtins/typed-array.tq", -+ "src/builtins/wasm.tq", -+ "src/ic/handler-configuration.tq", -+ "src/objects/allocation-site.tq", -+ "src/objects/api-callbacks.tq", -+ "src/objects/arguments.tq", -+ "src/objects/cell.tq", -+ "src/objects/code.tq", -+ "src/objects/contexts.tq", -+ "src/objects/data-handler.tq", -+ "src/objects/debug-objects.tq", -+ "src/objects/descriptor-array.tq", -+ "src/objects/embedder-data-array.tq", -+ "src/objects/feedback-cell.tq", -+ "src/objects/feedback-vector.tq", -+ "src/objects/fixed-array.tq", -+ "src/objects/foreign.tq", -+ "src/objects/free-space.tq", -+ "src/objects/heap-number.tq", -+ "src/objects/heap-object.tq", -+ "src/objects/intl-objects.tq", -+ "src/objects/js-aggregate-error.tq", -+ "src/objects/js-array-buffer.tq", -+ "src/objects/js-array.tq", -+ "src/objects/js-collection-iterator.tq", -+ "src/objects/js-collection.tq", -+ "src/objects/js-generator.tq", -+ "src/objects/js-objects.tq", -+ "src/objects/js-promise.tq", -+ "src/objects/js-proxy.tq", -+ "src/objects/js-regexp-string-iterator.tq", -+ "src/objects/js-regexp.tq", -+ "src/objects/js-weak-refs.tq", -+ "src/objects/literal-objects.tq", -+ "src/objects/map.tq", -+ "src/objects/microtask.tq", -+ "src/objects/module.tq", -+ "src/objects/name.tq", -+ "src/objects/oddball.tq", -+ "src/objects/ordered-hash-table.tq", -+ "src/objects/primitive-heap-object.tq", -+ "src/objects/promise.tq", -+ "src/objects/property-array.tq", -+ "src/objects/property-cell.tq", -+ "src/objects/property-descriptor-object.tq", -+ "src/objects/prototype-info.tq", -+ "src/objects/regexp-match-info.tq", -+ "src/objects/scope-info.tq", -+ "src/objects/script.tq", -+ "src/objects/shared-function-info.tq", -+ "src/objects/source-text-module.tq", -+ "src/objects/stack-frame-info.tq", -+ "src/objects/string.tq", -+ "src/objects/struct.tq", -+ "src/objects/synthetic-module.tq", -+ "src/objects/template-objects.tq", -+ "src/objects/template.tq", -+ "src/wasm/wasm-objects.tq", -+ "test/torque/test-torque.tq", -+ "third_party/v8/builtins/array-sort.tq", -+] -+ -+if (!v8_enable_i18n_support) { -+ torque_files -= [ "src/objects/intl-objects.tq" ] -+} -+ -+# Template for running torque -+# When building with v8_verify_torque_generation_invariance=true we need -+# to be able to run torque for both 32 and 64 bits in the same build -+template("run_torque") { -+ if (target_name == "") { -+ suffix = "" -+ } else { -+ suffix = "_$target_name" -+ } -+ -+ toolchain = invoker.toolchain -+ -+ action("run_torque" + suffix) { -+ visibility = [ -+ ":*", -+ "tools/debug_helper/:*", -+ "tools/gcmole/:*", -+ "test/cctest/:*", -+ ] -+ -+ deps = [ ":torque($toolchain)" ] -+ -+ script = "tools/run.py" -+ -+ sources = torque_files -+ -+ destination_folder = "$target_gen_dir/torque-generated$suffix" -+ -+ files = [ -+ "bit-fields-tq.h", -+ "builtin-definitions-tq.h", -+ "interface-descriptors-tq.inc", -+ "factory-tq.cc", -+ "factory-tq.inc", -+ "field-offsets-tq.h", -+ "class-verifiers-tq.cc", -+ "class-verifiers-tq.h", -+ "enum-verifiers-tq.cc", -+ "objects-printer-tq.cc", -+ "objects-body-descriptors-tq-inl.inc", -+ "class-definitions-tq.cc", -+ "class-definitions-tq-inl.h", -+ "class-definitions-tq.h", -+ "class-debug-readers-tq.cc", -+ "class-debug-readers-tq.h", -+ "exported-macros-assembler-tq.cc", -+ "exported-macros-assembler-tq.h", -+ "csa-types-tq.h", -+ "instance-types-tq.h", -+ "internal-class-definitions-tq.h", -+ "internal-class-definitions-tq-inl.h", -+ "exported-class-definitions-tq.h", -+ "exported-class-definitions-tq-inl.h", -+ ] -+ -+ outputs = [] -+ foreach(file, files) { -+ outputs += [ "$destination_folder/$file" ] -+ } -+ -+ foreach(file, torque_files) { -+ filetq = string_replace(file, ".tq", "-tq-csa") -+ outputs += [ -+ "$destination_folder/$filetq.cc", -+ "$destination_folder/$filetq.h", -+ ] -+ } -+ -+ args = [ -+ "./" + rebase_path( -+ get_label_info(":torque($toolchain)", "root_out_dir") + "/torque", -+ root_build_dir), -+ "-o", -+ rebase_path("$destination_folder", root_build_dir), -+ "-v8-root", -+ rebase_path(".", root_build_dir), -+ ] -+ if (defined(invoker.args)) { -+ args += invoker.args -+ } -+ args += torque_files -+ } -+} -+ -+# Default run_torque action -+run_torque("") { -+ toolchain = v8_generator_toolchain -+} -+ -+if (v8_verify_torque_generation_invariance) { -+ run_torque("x86") { -+ toolchain = "//build/toolchain/linux:clang_x86" -+ } -+ -+ run_torque("x64") { -+ args = [ "-m32" ] -+ toolchain = "//build/toolchain/linux:clang_x64" -+ } -+ -+ action("compare_torque_runs") { -+ deps = [ -+ ":run_torque_x64", -+ ":run_torque_x86", -+ ] -+ report_file = "$target_gen_dir/torque_comparison_results.txt" -+ script = "tools/compare_torque_output.py" -+ args = [ -+ rebase_path("$target_gen_dir/torque-generated_x64", root_build_dir), -+ rebase_path("$target_gen_dir/torque-generated_x86", root_build_dir), -+ rebase_path(report_file, root_build_dir), -+ ] -+ outputs = [ report_file ] -+ } -+} -+ -+group("v8_maybe_icu") { -+ if (v8_enable_i18n_support) { -+ public_deps = [ "//third_party/icu" ] -+ } -+} -+ -+v8_source_set("torque_generated_initializers") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ ":v8_tracing", -+ ] -+ -+ public_deps = [ ":v8_maybe_icu" ] -+ -+ sources = [ -+ "$target_gen_dir/torque-generated/csa-types-tq.h", -+ "$target_gen_dir/torque-generated/enum-verifiers-tq.cc", -+ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc", -+ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h", -+ "src/torque/runtime-support.h", -+ ] -+ foreach(file, torque_files) { -+ filetq = string_replace(file, ".tq", "-tq-csa") -+ sources += [ -+ "$target_gen_dir/torque-generated/$filetq.cc", -+ "$target_gen_dir/torque-generated/$filetq.h", -+ ] -+ } -+ -+ configs = [ ":internal_config" ] -+} -+ -+v8_source_set("torque_generated_definitions") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ ":v8_tracing", -+ ] -+ -+ public_deps = [ ":v8_maybe_icu" ] -+ -+ sources = [ -+ "$target_gen_dir/torque-generated/class-definitions-tq.cc", -+ "$target_gen_dir/torque-generated/class-verifiers-tq.cc", -+ "$target_gen_dir/torque-generated/class-verifiers-tq.h", -+ "$target_gen_dir/torque-generated/factory-tq.cc", -+ "$target_gen_dir/torque-generated/objects-printer-tq.cc", -+ ] -+ -+ configs = [ ":internal_config" ] -+} -+ -+action("generate_bytecode_builtins_list") { -+ script = "tools/run.py" -+ outputs = [ "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h" ] -+ deps = [ ":bytecode_builtins_list_generator($v8_generator_toolchain)" ] -+ args = [ -+ "./" + rebase_path( -+ get_label_info( -+ ":bytecode_builtins_list_generator($v8_generator_toolchain)", -+ "root_out_dir") + "/bytecode_builtins_list_generator", -+ root_build_dir), -+ rebase_path("$target_gen_dir/builtins-generated/bytecodes-builtins-list.h", -+ root_build_dir), -+ ] -+} -+ -+# Template to generate different V8 snapshots based on different runtime flags. -+# Can be invoked with run_mksnapshot(). The target will resolve to -+# run_mksnapshot_. If is "default", no file suffixes will be used. -+# Otherwise files are suffixed, e.g. embedded_.S and -+# snapshot_blob_.bin. -+# -+# The template exposes the variables: -+# args: additional flags for mksnapshots -+# embedded_suffix: a camel case suffix for method names in the embedded -+# snapshot. -+template("run_mksnapshot") { -+ name = target_name -+ if (name == "default") { -+ suffix = "" -+ } else { -+ suffix = "_$name" -+ } -+ action("run_mksnapshot_" + name) { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ deps = [ ":mksnapshot($v8_snapshot_toolchain)" ] -+ -+ script = "tools/run.py" -+ -+ sources = [] -+ -+ outputs = [] -+ -+ data = [] -+ -+ args = [ -+ "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)", -+ "root_out_dir") + "/mksnapshot", -+ root_build_dir), -+ "--turbo_instruction_scheduling", -+ -+ # In cross builds, the snapshot may be generated for both the host and -+ # target toolchains. The same host binary is used to generate both, so -+ # mksnapshot needs to know which target OS to use at runtime. It's weird, -+ # but the target OS is really |current_os|. -+ "--target_os=$current_os", -+ "--target_arch=$current_cpu", -+ -+ "--embedded_src", -+ rebase_path("$target_gen_dir/embedded${suffix}.S", root_build_dir), -+ ] -+ -+ # This is needed to distinguish between generating code for the simulator -+ # and cross-compiling. The latter may need to run code on the host with the -+ # simulator but cannot use simulator-specific instructions. -+ if (target_is_simulator) { -+ args += [ "--target_is_simulator" ] -+ } -+ -+ args += invoker.args -+ -+ outputs += [ "$target_gen_dir/embedded${suffix}.S" ] -+ if (invoker.embedded_variant != "") { -+ args += [ -+ "--embedded_variant", -+ invoker.embedded_variant, -+ ] -+ } -+ -+ if (v8_random_seed != "0") { -+ args += [ -+ "--random-seed", -+ v8_random_seed, -+ ] -+ } -+ -+ if (v8_os_page_size != "0") { -+ args += [ -+ "--v8_os_page_size", -+ v8_os_page_size, -+ ] -+ } -+ -+ if (v8_use_external_startup_data) { -+ outputs += [ "$root_out_dir/snapshot_blob${suffix}.bin" ] -+ data += [ "$root_out_dir/snapshot_blob${suffix}.bin" ] -+ args += [ -+ "--startup_blob", -+ rebase_path("$root_out_dir/snapshot_blob${suffix}.bin", root_build_dir), -+ ] -+ } else { -+ outputs += [ "$target_gen_dir/snapshot${suffix}.cc" ] -+ args += [ -+ "--startup_src", -+ rebase_path("$target_gen_dir/snapshot${suffix}.cc", root_build_dir), -+ ] -+ } -+ -+ if (v8_embed_script != "") { -+ sources += [ v8_embed_script ] -+ args += [ rebase_path(v8_embed_script, root_build_dir) ] -+ } -+ -+ if (v8_enable_snapshot_code_comments) { -+ args += [ "--code-comments" ] -+ } -+ -+ if (v8_enable_snapshot_native_code_counters) { -+ args += [ "--native-code-counters" ] -+ } else { -+ # --native-code-counters is the default in debug mode so make sure we can -+ # unset it. -+ args += [ "--no-native-code-counters" ] -+ } -+ -+ if (v8_enable_fast_mksnapshot) { -+ args += [ -+ "--no-turbo-rewrite-far-jumps", -+ "--no-turbo-verify-allocation", -+ ] -+ -+ if (v8_enable_debugging_features && v8_enable_slow_dchecks) { -+ # mksnapshot only accepts this flag if ENABLE_SLOW_DCHECKS is defined. -+ args += [ "--no-enable-slow-asserts" ] -+ } -+ } -+ -+ if (v8_enable_verify_heap) { -+ args += [ "--verify-heap" ] -+ } -+ } -+} -+ -+run_mksnapshot("default") { -+ args = [] -+ embedded_variant = "Default" -+} -+if (emit_builtins_as_inline_asm) { -+ asm_to_inline_asm("default") { -+ args = [] -+ } -+} -+if (v8_use_multi_snapshots) { -+ run_mksnapshot("trusted") { -+ args = [ "--no-untrusted-code-mitigations" ] -+ embedded_variant = "Trusted" -+ } -+ if (emit_builtins_as_inline_asm) { -+ asm_to_inline_asm("trusted") { -+ args = [] -+ } -+ } -+} -+ -+action("v8_dump_build_config") { -+ script = "tools/testrunner/utils/dump_build_config.py" -+ outputs = [ "$root_out_dir/v8_build_config.json" ] -+ is_gcov_coverage = v8_code_coverage && !is_clang -+ is_full_debug = v8_enable_debugging_features && !v8_optimized_debug -+ args = [ -+ rebase_path("$root_out_dir/v8_build_config.json", root_build_dir), -+ "current_cpu=\"$current_cpu\"", -+ "dcheck_always_on=$dcheck_always_on", -+ "is_android=$is_android", -+ "is_asan=$is_asan", -+ "is_cfi=$is_cfi", -+ "is_clang=$is_clang", -+ "is_component_build=$is_component_build", -+ "is_debug=$v8_enable_debugging_features", -+ "is_full_debug=$is_full_debug", -+ "is_gcov_coverage=$is_gcov_coverage", -+ "is_msan=$is_msan", -+ "is_tsan=$is_tsan", -+ "is_ubsan_vptr=$is_ubsan_vptr", -+ "target_cpu=\"$target_cpu\"", -+ "v8_current_cpu=\"$v8_current_cpu\"", -+ "v8_enable_i18n_support=$v8_enable_i18n_support", -+ "v8_enable_verify_predictable=$v8_enable_verify_predictable", -+ "v8_target_cpu=\"$v8_target_cpu\"", -+ "v8_enable_verify_csa=$v8_enable_verify_csa", -+ "v8_enable_lite_mode=$v8_enable_lite_mode", -+ "v8_enable_pointer_compression=$v8_enable_pointer_compression", -+ ] -+ -+ if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || -+ v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { -+ args += [ -+ "mips_arch_variant=\"$mips_arch_variant\"", -+ "mips_use_msa=$mips_use_msa", -+ ] -+ } -+} -+ -+############################################################################### -+# Source Sets (aka static libraries) -+# -+ -+v8_source_set("v8_snapshot") { -+ visibility = [ ":*" ] # Targets in this file can depend on this. -+ -+ deps = [] -+ public_deps = [ -+ # This should be public so downstream targets can declare the snapshot -+ # output file as their inputs. -+ ":run_mksnapshot_default", -+ ] -+ -+ # Do not publicize any header to remove build dependency. -+ public = [] -+ -+ sources = [ "src/init/setup-isolate-deserialize.cc" ] -+ if (emit_builtins_as_inline_asm) { -+ deps += [ ":asm_to_inline_asm_default" ] -+ sources += [ "$target_gen_dir/embedded.cc" ] -+ } else { -+ sources += [ "$target_gen_dir/embedded.S" ] -+ } -+ -+ configs = [ ":internal_config" ] -+ -+ if (v8_use_external_startup_data) { -+ deps += [ ":v8_base" ] -+ -+ sources += [ "src/snapshot/snapshot-external.cc" ] -+ -+ if (v8_use_multi_snapshots) { -+ public_deps += [ ":run_mksnapshot_trusted" ] -+ if (emit_builtins_as_inline_asm) { -+ deps += [ ":asm_to_inline_asm_trusted" ] -+ sources += [ "$target_gen_dir/embedded_trusted.cc" ] -+ } else { -+ sources += [ "$target_gen_dir/embedded_trusted.S" ] -+ } -+ } -+ } else { -+ # Also top-level visibility targets can depend on this. -+ visibility += [ "//:gn_visibility" ] -+ -+ public_deps += [ ":v8_maybe_icu" ] -+ -+ sources += [ "$target_gen_dir/snapshot.cc" ] -+ } -+} -+ -+v8_source_set("v8_initializers") { -+ visibility = [ -+ ":*", -+ "test/cctest:*", -+ ] -+ -+ deps = [ -+ ":torque_generated_initializers", -+ ":v8_tracing", -+ ] -+ -+ sources = [ -+ ### gcmole(all) ### -+ "src/builtins/builtins-array-gen.cc", -+ "src/builtins/builtins-array-gen.h", -+ "src/builtins/builtins-async-function-gen.cc", -+ "src/builtins/builtins-async-gen.cc", -+ "src/builtins/builtins-async-gen.h", -+ "src/builtins/builtins-async-generator-gen.cc", -+ "src/builtins/builtins-async-iterator-gen.cc", -+ "src/builtins/builtins-bigint-gen.cc", -+ "src/builtins/builtins-bigint-gen.h", -+ "src/builtins/builtins-call-gen.cc", -+ "src/builtins/builtins-call-gen.h", -+ "src/builtins/builtins-collections-gen.cc", -+ "src/builtins/builtins-constructor-gen.cc", -+ "src/builtins/builtins-constructor-gen.h", -+ "src/builtins/builtins-constructor.h", -+ "src/builtins/builtins-conversion-gen.cc", -+ "src/builtins/builtins-data-view-gen.h", -+ "src/builtins/builtins-date-gen.cc", -+ "src/builtins/builtins-debug-gen.cc", -+ "src/builtins/builtins-function-gen.cc", -+ "src/builtins/builtins-generator-gen.cc", -+ "src/builtins/builtins-global-gen.cc", -+ "src/builtins/builtins-handler-gen.cc", -+ "src/builtins/builtins-ic-gen.cc", -+ "src/builtins/builtins-internal-gen.cc", -+ "src/builtins/builtins-interpreter-gen.cc", -+ "src/builtins/builtins-intl-gen.cc", -+ "src/builtins/builtins-iterator-gen.cc", -+ "src/builtins/builtins-iterator-gen.h", -+ "src/builtins/builtins-lazy-gen.cc", -+ "src/builtins/builtins-lazy-gen.h", -+ "src/builtins/builtins-microtask-queue-gen.cc", -+ "src/builtins/builtins-number-gen.cc", -+ "src/builtins/builtins-object-gen.cc", -+ "src/builtins/builtins-promise-gen.cc", -+ "src/builtins/builtins-promise-gen.h", -+ "src/builtins/builtins-proxy-gen.cc", -+ "src/builtins/builtins-proxy-gen.h", -+ "src/builtins/builtins-regexp-gen.cc", -+ "src/builtins/builtins-regexp-gen.h", -+ "src/builtins/builtins-sharedarraybuffer-gen.cc", -+ "src/builtins/builtins-string-gen.cc", -+ "src/builtins/builtins-string-gen.h", -+ "src/builtins/builtins-typed-array-gen.cc", -+ "src/builtins/builtins-typed-array-gen.h", -+ "src/builtins/builtins-utils-gen.h", -+ "src/builtins/builtins-wasm-gen.cc", -+ "src/builtins/builtins-wasm-gen.h", -+ "src/builtins/growable-fixed-array-gen.cc", -+ "src/builtins/growable-fixed-array-gen.h", -+ "src/builtins/setup-builtins-internal.cc", -+ "src/codegen/code-stub-assembler.cc", -+ "src/codegen/code-stub-assembler.h", -+ "src/heap/setup-heap-internal.cc", -+ "src/ic/accessor-assembler.cc", -+ "src/ic/accessor-assembler.h", -+ "src/ic/binary-op-assembler.cc", -+ "src/ic/binary-op-assembler.h", -+ "src/ic/keyed-store-generic.cc", -+ "src/ic/keyed-store-generic.h", -+ "src/interpreter/interpreter-assembler.cc", -+ "src/interpreter/interpreter-assembler.h", -+ "src/interpreter/interpreter-generator.cc", -+ "src/interpreter/interpreter-generator.h", -+ "src/interpreter/interpreter-intrinsics-generator.cc", -+ "src/interpreter/interpreter-intrinsics-generator.h", -+ ] -+ -+ if (v8_current_cpu == "x86") { -+ sources += [ -+ ### gcmole(arch:ia32) ### -+ "src/builtins/ia32/builtins-ia32.cc", -+ ] -+ } else if (v8_current_cpu == "x64") { -+ sources += [ -+ ### gcmole(arch:x64) ### -+ "src/builtins/x64/builtins-x64.cc", -+ ] -+ } else if (v8_current_cpu == "arm") { -+ sources += [ -+ ### gcmole(arch:arm) ### -+ "src/builtins/arm/builtins-arm.cc", -+ ] -+ } else if (v8_current_cpu == "arm64") { -+ sources += [ -+ ### gcmole(arch:arm64) ### -+ "src/builtins/arm64/builtins-arm64.cc", -+ ] -+ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { -+ sources += [ -+ ### gcmole(arch:mipsel) ### -+ "src/builtins/mips/builtins-mips.cc", -+ ] -+ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { -+ sources += [ -+ ### gcmole(arch:mips64el) ### -+ "src/builtins/mips64/builtins-mips64.cc", -+ ] -+ } else if (v8_current_cpu == "ppc") { -+ sources += [ -+ ### gcmole(arch:ppc) ### -+ "src/builtins/ppc/builtins-ppc.cc", -+ ] -+ } else if (v8_current_cpu == "ppc64") { -+ sources += [ -+ ### gcmole(arch:ppc64) ### -+ "src/builtins/ppc/builtins-ppc.cc", -+ ] -+ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { -+ sources += [ -+ ### gcmole(arch:s390) ### -+ "src/builtins/s390/builtins-s390.cc", -+ ] -+ } -+ -+ if (!v8_enable_i18n_support) { -+ sources -= [ "src/builtins/builtins-intl-gen.cc" ] -+ } -+ -+ configs = [ ":internal_config" ] -+} -+ -+v8_source_set("v8_init") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ deps = [ -+ ":v8_initializers", -+ ":v8_tracing", -+ ] -+ -+ sources = [ -+ ### gcmole(all) ### -+ "src/init/setup-isolate-full.cc", -+ ] -+ -+ public_deps = [ ":v8_maybe_icu" ] -+ -+ configs = [ ":internal_config" ] -+} -+ -+# This is split out to be a non-code containing target that the Chromium browser -+# DLL can depend upon to get only a version string. -+v8_header_set("v8_version") { -+ configs = [ ":internal_config" ] -+ -+ sources = [ -+ "include/v8-value-serializer-version.h", -+ "include/v8-version-string.h", -+ "include/v8-version.h", -+ ] -+} -+ -+# This is split out to be a non-code containing target that the Chromium browser -+# can depend upon to get basic v8 types. -+v8_header_set("v8_headers") { -+ configs = [ ":internal_config" ] -+ public_configs = [ ":v8_header_features" ] -+ -+ sources = [ -+ "include/v8-fast-api-calls.h", -+ "include/v8-internal.h", -+ "include/v8.h", -+ "include/v8config.h", -+ ] -+ -+ sources += [ -+ # The following headers cannot be platform-specific. The include validation -+ # of `gn gen $dir --check` requires all header files to be available on all -+ # platforms. -+ "include/v8-wasm-trap-handler-posix.h", -+ "include/v8-wasm-trap-handler-win.h", -+ ] -+ -+ deps = [ ":v8_version" ] -+} -+ -+# This is split out to share basic headers with Torque. -+v8_header_set("v8_shared_internal_headers") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ configs = [ ":internal_config" ] -+ -+ sources = [ "src/common/globals.h" ] -+ -+ deps = [ ":v8_headers" ] -+} -+ -+v8_compiler_sources = [ -+ ### gcmole(all) ### -+ "src/compiler/access-builder.cc", -+ "src/compiler/access-builder.h", -+ "src/compiler/access-info.cc", -+ "src/compiler/access-info.h", -+ "src/compiler/add-type-assertions-reducer.cc", -+ "src/compiler/add-type-assertions-reducer.h", -+ "src/compiler/all-nodes.cc", -+ "src/compiler/all-nodes.h", -+ "src/compiler/allocation-builder-inl.h", -+ "src/compiler/allocation-builder.h", -+ "src/compiler/backend/code-generator-impl.h", -+ "src/compiler/backend/code-generator.cc", -+ "src/compiler/backend/code-generator.h", -+ "src/compiler/backend/frame-elider.cc", -+ "src/compiler/backend/frame-elider.h", -+ "src/compiler/backend/gap-resolver.cc", -+ "src/compiler/backend/gap-resolver.h", -+ "src/compiler/backend/instruction-codes.h", -+ "src/compiler/backend/instruction-scheduler.cc", -+ "src/compiler/backend/instruction-scheduler.h", -+ "src/compiler/backend/instruction-selector-impl.h", -+ "src/compiler/backend/instruction-selector.cc", -+ "src/compiler/backend/instruction-selector.h", -+ "src/compiler/backend/instruction.cc", -+ "src/compiler/backend/instruction.h", -+ "src/compiler/backend/jump-threading.cc", -+ "src/compiler/backend/jump-threading.h", -+ "src/compiler/backend/live-range-separator.cc", -+ "src/compiler/backend/live-range-separator.h", -+ "src/compiler/backend/move-optimizer.cc", -+ "src/compiler/backend/move-optimizer.h", -+ "src/compiler/backend/register-allocator-verifier.cc", -+ "src/compiler/backend/register-allocator-verifier.h", -+ "src/compiler/backend/register-allocator.cc", -+ "src/compiler/backend/register-allocator.h", -+ "src/compiler/backend/unwinding-info-writer.h", -+ "src/compiler/basic-block-instrumentor.cc", -+ "src/compiler/basic-block-instrumentor.h", -+ "src/compiler/branch-elimination.cc", -+ "src/compiler/branch-elimination.h", -+ "src/compiler/bytecode-analysis.cc", -+ "src/compiler/bytecode-analysis.h", -+ "src/compiler/bytecode-graph-builder.cc", -+ "src/compiler/bytecode-graph-builder.h", -+ "src/compiler/bytecode-liveness-map.cc", -+ "src/compiler/bytecode-liveness-map.h", -+ "src/compiler/c-linkage.cc", -+ "src/compiler/checkpoint-elimination.cc", -+ "src/compiler/checkpoint-elimination.h", -+ "src/compiler/code-assembler.cc", -+ "src/compiler/code-assembler.h", -+ "src/compiler/common-node-cache.cc", -+ "src/compiler/common-node-cache.h", -+ "src/compiler/common-operator-reducer.cc", -+ "src/compiler/common-operator-reducer.h", -+ "src/compiler/common-operator.cc", -+ "src/compiler/common-operator.h", -+ "src/compiler/compilation-dependencies.cc", -+ "src/compiler/compilation-dependencies.h", -+ "src/compiler/compiler-source-position-table.cc", -+ "src/compiler/compiler-source-position-table.h", -+ "src/compiler/constant-folding-reducer.cc", -+ "src/compiler/constant-folding-reducer.h", -+ "src/compiler/control-equivalence.cc", -+ "src/compiler/control-equivalence.h", -+ "src/compiler/control-flow-optimizer.cc", -+ "src/compiler/control-flow-optimizer.h", -+ "src/compiler/csa-load-elimination.cc", -+ "src/compiler/csa-load-elimination.h", -+ "src/compiler/dead-code-elimination.cc", -+ "src/compiler/dead-code-elimination.h", -+ "src/compiler/decompression-optimizer.cc", -+ "src/compiler/decompression-optimizer.h", -+ "src/compiler/diamond.h", -+ "src/compiler/effect-control-linearizer.cc", -+ "src/compiler/effect-control-linearizer.h", -+ "src/compiler/escape-analysis-reducer.cc", -+ "src/compiler/escape-analysis-reducer.h", -+ "src/compiler/escape-analysis.cc", -+ "src/compiler/escape-analysis.h", -+ "src/compiler/feedback-source.cc", -+ "src/compiler/feedback-source.h", -+ "src/compiler/frame-states.cc", -+ "src/compiler/frame-states.h", -+ "src/compiler/frame.cc", -+ "src/compiler/frame.h", -+ "src/compiler/functional-list.h", -+ "src/compiler/globals.h", -+ "src/compiler/graph-assembler.cc", -+ "src/compiler/graph-assembler.h", -+ "src/compiler/graph-reducer.cc", -+ "src/compiler/graph-reducer.h", -+ "src/compiler/graph-trimmer.cc", -+ "src/compiler/graph-trimmer.h", -+ "src/compiler/graph-visualizer.cc", -+ "src/compiler/graph-visualizer.h", -+ "src/compiler/graph.cc", -+ "src/compiler/graph.h", -+ "src/compiler/int64-lowering.cc", -+ "src/compiler/int64-lowering.h", -+ "src/compiler/js-call-reducer.cc", -+ "src/compiler/js-call-reducer.h", -+ "src/compiler/js-context-specialization.cc", -+ "src/compiler/js-context-specialization.h", -+ "src/compiler/js-create-lowering.cc", -+ "src/compiler/js-create-lowering.h", -+ "src/compiler/js-generic-lowering.cc", -+ "src/compiler/js-generic-lowering.h", -+ "src/compiler/js-graph.cc", -+ "src/compiler/js-graph.h", -+ "src/compiler/js-heap-broker.cc", -+ "src/compiler/js-heap-broker.h", -+ "src/compiler/js-heap-copy-reducer.cc", -+ "src/compiler/js-heap-copy-reducer.h", -+ "src/compiler/js-inlining-heuristic.cc", -+ "src/compiler/js-inlining-heuristic.h", -+ "src/compiler/js-inlining.cc", -+ "src/compiler/js-inlining.h", -+ "src/compiler/js-intrinsic-lowering.cc", -+ "src/compiler/js-intrinsic-lowering.h", -+ "src/compiler/js-native-context-specialization.cc", -+ "src/compiler/js-native-context-specialization.h", -+ "src/compiler/js-operator.cc", -+ "src/compiler/js-operator.h", -+ "src/compiler/js-type-hint-lowering.cc", -+ "src/compiler/js-type-hint-lowering.h", -+ "src/compiler/js-typed-lowering.cc", -+ "src/compiler/js-typed-lowering.h", -+ "src/compiler/linkage.cc", -+ "src/compiler/linkage.h", -+ "src/compiler/load-elimination.cc", -+ "src/compiler/load-elimination.h", -+ "src/compiler/loop-analysis.cc", -+ "src/compiler/loop-analysis.h", -+ "src/compiler/loop-peeling.cc", -+ "src/compiler/loop-peeling.h", -+ "src/compiler/loop-variable-optimizer.cc", -+ "src/compiler/loop-variable-optimizer.h", -+ "src/compiler/machine-graph-verifier.cc", -+ "src/compiler/machine-graph-verifier.h", -+ "src/compiler/machine-graph.cc", -+ "src/compiler/machine-graph.h", -+ "src/compiler/machine-operator-reducer.cc", -+ "src/compiler/machine-operator-reducer.h", -+ "src/compiler/machine-operator.cc", -+ "src/compiler/machine-operator.h", -+ "src/compiler/map-inference.cc", -+ "src/compiler/map-inference.h", -+ "src/compiler/memory-lowering.cc", -+ "src/compiler/memory-lowering.h", -+ "src/compiler/memory-optimizer.cc", -+ "src/compiler/memory-optimizer.h", -+ "src/compiler/node-aux-data.h", -+ "src/compiler/node-cache.h", -+ "src/compiler/node-marker.cc", -+ "src/compiler/node-marker.h", -+ "src/compiler/node-matchers.cc", -+ "src/compiler/node-matchers.h", -+ "src/compiler/node-origin-table.cc", -+ "src/compiler/node-origin-table.h", -+ "src/compiler/node-properties.cc", -+ "src/compiler/node-properties.h", -+ "src/compiler/node.cc", -+ "src/compiler/node.h", -+ "src/compiler/opcodes.cc", -+ "src/compiler/opcodes.h", -+ "src/compiler/operation-typer.cc", -+ "src/compiler/operation-typer.h", -+ "src/compiler/operator-properties.cc", -+ "src/compiler/operator-properties.h", -+ "src/compiler/operator.cc", -+ "src/compiler/operator.h", -+ "src/compiler/osr.cc", -+ "src/compiler/osr.h", -+ "src/compiler/per-isolate-compiler-cache.h", -+ "src/compiler/persistent-map.h", -+ "src/compiler/pipeline-statistics.cc", -+ "src/compiler/pipeline-statistics.h", -+ "src/compiler/pipeline.cc", -+ "src/compiler/pipeline.h", -+ "src/compiler/property-access-builder.cc", -+ "src/compiler/property-access-builder.h", -+ "src/compiler/raw-machine-assembler.cc", -+ "src/compiler/raw-machine-assembler.h", -+ "src/compiler/redundancy-elimination.cc", -+ "src/compiler/redundancy-elimination.h", -+ "src/compiler/refs-map.cc", -+ "src/compiler/refs-map.h", -+ "src/compiler/representation-change.cc", -+ "src/compiler/representation-change.h", -+ "src/compiler/schedule.cc", -+ "src/compiler/schedule.h", -+ "src/compiler/scheduled-machine-lowering.cc", -+ "src/compiler/scheduled-machine-lowering.h", -+ "src/compiler/scheduler.cc", -+ "src/compiler/scheduler.h", -+ "src/compiler/select-lowering.cc", -+ "src/compiler/select-lowering.h", -+ "src/compiler/serializer-for-background-compilation.cc", -+ "src/compiler/serializer-for-background-compilation.h", -+ "src/compiler/serializer-hints.h", -+ "src/compiler/simd-scalar-lowering.cc", -+ "src/compiler/simd-scalar-lowering.h", -+ "src/compiler/simplified-lowering.cc", -+ "src/compiler/simplified-lowering.h", -+ "src/compiler/simplified-operator-reducer.cc", -+ "src/compiler/simplified-operator-reducer.h", -+ "src/compiler/simplified-operator.cc", -+ "src/compiler/simplified-operator.h", -+ "src/compiler/state-values-utils.cc", -+ "src/compiler/state-values-utils.h", -+ "src/compiler/store-store-elimination.cc", -+ "src/compiler/store-store-elimination.h", -+ "src/compiler/type-cache.cc", -+ "src/compiler/type-cache.h", -+ "src/compiler/type-narrowing-reducer.cc", -+ "src/compiler/type-narrowing-reducer.h", -+ "src/compiler/typed-optimization.cc", -+ "src/compiler/typed-optimization.h", -+ "src/compiler/typer.cc", -+ "src/compiler/typer.h", -+ "src/compiler/types.cc", -+ "src/compiler/types.h", -+ "src/compiler/value-numbering-reducer.cc", -+ "src/compiler/value-numbering-reducer.h", -+ "src/compiler/verifier.cc", -+ "src/compiler/verifier.h", -+ "src/compiler/wasm-compiler.cc", -+ "src/compiler/wasm-compiler.h", -+ "src/compiler/write-barrier-kind.h", -+ "src/compiler/zone-stats.cc", -+ "src/compiler/zone-stats.h", -+] -+ -+# The src/compiler files with optimizations. -+v8_source_set("v8_compiler_opt") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = v8_compiler_sources -+ -+ public_deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ ":v8_maybe_icu", -+ ":v8_tracing", -+ ] -+ -+ if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) { -+ # The :no_optimize config is added to v8_add_configs in v8.gni. -+ remove_configs = [ "//build/config/compiler:no_optimize" ] -+ configs = [ ":always_optimize" ] -+ } else { -+ # Without this else branch, gn fails to generate build files for non-debug -+ # builds (because we try to remove a config that is not present). -+ # So we include it, even if this config is not used outside of debug builds. -+ configs = [ ":internal_config" ] -+ } -+} -+ -+# The src/compiler files with default optimization behavior. -+v8_source_set("v8_compiler") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = v8_compiler_sources -+ -+ public_deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ ":v8_maybe_icu", -+ ":v8_tracing", -+ ] -+ -+ configs = [ ":internal_config" ] -+} -+ -+group("v8_compiler_for_mksnapshot") { -+ if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) { -+ deps = [ ":v8_compiler_opt" ] -+ } else { -+ deps = [ ":v8_compiler" ] -+ } -+} -+ -+# Any target using trace events must directly or indirectly depend on -+# v8_tracing. -+group("v8_tracing") { -+ if (v8_use_perfetto) { -+ if (build_with_chromium) { -+ public_deps = [ "//third_party/perfetto:libperfetto" ] -+ } else { -+ public_deps = [ ":v8_libperfetto" ] -+ } -+ } -+} -+ -+v8_source_set("v8_base_without_compiler") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ # Split static libraries on windows into two. -+ split_count = 2 -+ -+ sources = [ -+ "//base/trace_event/common/trace_event_common.h", -+ -+ ### gcmole(all) ### -+ "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h", -+ "include/cppgc/common.h", -+ "include/v8-fast-api-calls.h", -+ "include/v8-inspector-protocol.h", -+ "include/v8-inspector.h", -+ "include/v8-internal.h", -+ "include/v8-platform.h", -+ "include/v8-profiler.h", -+ "include/v8-util.h", -+ "include/v8-wasm-trap-handler-posix.h", -+ "include/v8.h", -+ "include/v8config.h", -+ "src/api/api-arguments-inl.h", -+ "src/api/api-arguments.cc", -+ "src/api/api-arguments.h", -+ "src/api/api-natives.cc", -+ "src/api/api-natives.h", -+ "src/api/api.cc", -+ "src/api/api.h", -+ "src/asmjs/asm-js.cc", -+ "src/asmjs/asm-js.h", -+ "src/asmjs/asm-names.h", -+ "src/asmjs/asm-parser.cc", -+ "src/asmjs/asm-parser.h", -+ "src/asmjs/asm-scanner.cc", -+ "src/asmjs/asm-scanner.h", -+ "src/asmjs/asm-types.cc", -+ "src/asmjs/asm-types.h", -+ "src/ast/ast-function-literal-id-reindexer.cc", -+ "src/ast/ast-function-literal-id-reindexer.h", -+ "src/ast/ast-source-ranges.h", -+ "src/ast/ast-traversal-visitor.h", -+ "src/ast/ast-value-factory.cc", -+ "src/ast/ast-value-factory.h", -+ "src/ast/ast.cc", -+ "src/ast/ast.h", -+ "src/ast/modules.cc", -+ "src/ast/modules.h", -+ "src/ast/prettyprinter.cc", -+ "src/ast/prettyprinter.h", -+ "src/ast/scopes.cc", -+ "src/ast/scopes.h", -+ "src/ast/source-range-ast-visitor.cc", -+ "src/ast/source-range-ast-visitor.h", -+ "src/ast/variables.cc", -+ "src/ast/variables.h", -+ "src/builtins/accessors.cc", -+ "src/builtins/accessors.h", -+ "src/builtins/builtins-api.cc", -+ "src/builtins/builtins-array.cc", -+ "src/builtins/builtins-arraybuffer.cc", -+ "src/builtins/builtins-async-module.cc", -+ "src/builtins/builtins-bigint.cc", -+ "src/builtins/builtins-call.cc", -+ "src/builtins/builtins-callsite.cc", -+ "src/builtins/builtins-collections.cc", -+ "src/builtins/builtins-console.cc", -+ "src/builtins/builtins-constructor.h", -+ "src/builtins/builtins-dataview.cc", -+ "src/builtins/builtins-date.cc", -+ "src/builtins/builtins-definitions.h", -+ "src/builtins/builtins-descriptors.h", -+ "src/builtins/builtins-error.cc", -+ "src/builtins/builtins-function.cc", -+ "src/builtins/builtins-global.cc", -+ "src/builtins/builtins-internal.cc", -+ "src/builtins/builtins-intl.cc", -+ "src/builtins/builtins-json.cc", -+ "src/builtins/builtins-number.cc", -+ "src/builtins/builtins-object.cc", -+ "src/builtins/builtins-promise.h", -+ "src/builtins/builtins-reflect.cc", -+ "src/builtins/builtins-regexp.cc", -+ "src/builtins/builtins-sharedarraybuffer.cc", -+ "src/builtins/builtins-string.cc", -+ "src/builtins/builtins-symbol.cc", -+ "src/builtins/builtins-trace.cc", -+ "src/builtins/builtins-typed-array.cc", -+ "src/builtins/builtins-utils-inl.h", -+ "src/builtins/builtins-utils.h", -+ "src/builtins/builtins-weak-refs.cc", -+ "src/builtins/builtins.cc", -+ "src/builtins/builtins.h", -+ "src/builtins/constants-table-builder.cc", -+ "src/builtins/constants-table-builder.h", -+ "src/codegen/assembler-arch.h", -+ "src/codegen/assembler-inl.h", -+ "src/codegen/assembler.cc", -+ "src/codegen/assembler.h", -+ "src/codegen/bailout-reason.cc", -+ "src/codegen/bailout-reason.h", -+ "src/codegen/callable.h", -+ "src/codegen/code-comments.cc", -+ "src/codegen/code-comments.h", -+ "src/codegen/code-desc.cc", -+ "src/codegen/code-desc.h", -+ "src/codegen/code-factory.cc", -+ "src/codegen/code-factory.h", -+ "src/codegen/code-reference.cc", -+ "src/codegen/code-reference.h", -+ "src/codegen/compilation-cache.cc", -+ "src/codegen/compilation-cache.h", -+ "src/codegen/compiler.cc", -+ "src/codegen/compiler.h", -+ "src/codegen/constant-pool.cc", -+ "src/codegen/constant-pool.h", -+ "src/codegen/constants-arch.h", -+ "src/codegen/cpu-features.h", -+ "src/codegen/external-reference-encoder.cc", -+ "src/codegen/external-reference-encoder.h", -+ "src/codegen/external-reference-table.cc", -+ "src/codegen/external-reference-table.h", -+ "src/codegen/external-reference.cc", -+ "src/codegen/external-reference.h", -+ "src/codegen/flush-instruction-cache.cc", -+ "src/codegen/flush-instruction-cache.h", -+ "src/codegen/handler-table.cc", -+ "src/codegen/handler-table.h", -+ "src/codegen/interface-descriptors.cc", -+ "src/codegen/interface-descriptors.h", -+ "src/codegen/label.h", -+ "src/codegen/machine-type.cc", -+ "src/codegen/machine-type.h", -+ "src/codegen/macro-assembler-inl.h", -+ "src/codegen/macro-assembler.h", -+ "src/codegen/optimized-compilation-info.cc", -+ "src/codegen/optimized-compilation-info.h", -+ "src/codegen/pending-optimization-table.cc", -+ "src/codegen/pending-optimization-table.h", -+ "src/codegen/register-arch.h", -+ "src/codegen/register-configuration.cc", -+ "src/codegen/register-configuration.h", -+ "src/codegen/register.cc", -+ "src/codegen/register.h", -+ "src/codegen/reglist.h", -+ "src/codegen/reloc-info.cc", -+ "src/codegen/reloc-info.h", -+ "src/codegen/safepoint-table.cc", -+ "src/codegen/safepoint-table.h", -+ "src/codegen/signature.h", -+ "src/codegen/source-position-table.cc", -+ "src/codegen/source-position-table.h", -+ "src/codegen/source-position.cc", -+ "src/codegen/source-position.h", -+ "src/codegen/string-constants.cc", -+ "src/codegen/string-constants.h", -+ "src/codegen/tick-counter.cc", -+ "src/codegen/tick-counter.h", -+ "src/codegen/tnode.cc", -+ "src/codegen/tnode.h", -+ "src/codegen/turbo-assembler.cc", -+ "src/codegen/turbo-assembler.h", -+ "src/codegen/unoptimized-compilation-info.cc", -+ "src/codegen/unoptimized-compilation-info.h", -+ "src/common/assert-scope.cc", -+ "src/common/assert-scope.h", -+ "src/common/checks.h", -+ "src/common/external-pointer-inl.h", -+ "src/common/external-pointer.h", -+ "src/common/message-template.h", -+ "src/common/ptr-compr-inl.h", -+ "src/common/ptr-compr.h", -+ "src/compiler-dispatcher/compiler-dispatcher.cc", -+ "src/compiler-dispatcher/compiler-dispatcher.h", -+ "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", -+ "src/compiler-dispatcher/optimizing-compile-dispatcher.h", -+ "src/date/date.cc", -+ "src/date/date.h", -+ "src/date/dateparser-inl.h", -+ "src/date/dateparser.cc", -+ "src/date/dateparser.h", -+ "src/debug/debug-coverage.cc", -+ "src/debug/debug-coverage.h", -+ "src/debug/debug-evaluate.cc", -+ "src/debug/debug-evaluate.h", -+ "src/debug/debug-frames.cc", -+ "src/debug/debug-frames.h", -+ "src/debug/debug-interface.h", -+ "src/debug/debug-property-iterator.cc", -+ "src/debug/debug-property-iterator.h", -+ "src/debug/debug-scope-iterator.cc", -+ "src/debug/debug-scope-iterator.h", -+ "src/debug/debug-scopes.cc", -+ "src/debug/debug-scopes.h", -+ "src/debug/debug-stack-trace-iterator.cc", -+ "src/debug/debug-stack-trace-iterator.h", -+ "src/debug/debug-type-profile.cc", -+ "src/debug/debug-type-profile.h", -+ "src/debug/debug.cc", -+ "src/debug/debug.h", -+ "src/debug/interface-types.h", -+ "src/debug/liveedit.cc", -+ "src/debug/liveedit.h", -+ "src/deoptimizer/deoptimize-reason.cc", -+ "src/deoptimizer/deoptimize-reason.h", -+ "src/deoptimizer/deoptimizer.cc", -+ "src/deoptimizer/deoptimizer.h", -+ "src/diagnostics/basic-block-profiler.cc", -+ "src/diagnostics/basic-block-profiler.h", -+ "src/diagnostics/code-tracer.h", -+ "src/diagnostics/compilation-statistics.cc", -+ "src/diagnostics/compilation-statistics.h", -+ "src/diagnostics/disasm.h", -+ "src/diagnostics/disassembler.cc", -+ "src/diagnostics/disassembler.h", -+ "src/diagnostics/eh-frame.cc", -+ "src/diagnostics/eh-frame.h", -+ "src/diagnostics/gdb-jit.cc", -+ "src/diagnostics/gdb-jit.h", -+ "src/diagnostics/objects-debug.cc", -+ "src/diagnostics/objects-printer.cc", -+ "src/diagnostics/perf-jit.cc", -+ "src/diagnostics/perf-jit.h", -+ "src/diagnostics/unwinder.cc", -+ "src/execution/arguments-inl.h", -+ "src/execution/arguments.cc", -+ "src/execution/arguments.h", -+ "src/execution/execution.cc", -+ "src/execution/execution.h", -+ "src/execution/frame-constants.h", -+ "src/execution/frames-inl.h", -+ "src/execution/frames.cc", -+ "src/execution/frames.h", -+ "src/execution/futex-emulation.cc", -+ "src/execution/futex-emulation.h", -+ "src/execution/interrupts-scope.cc", -+ "src/execution/interrupts-scope.h", -+ "src/execution/isolate-data.h", -+ "src/execution/isolate-inl.h", -+ "src/execution/isolate-utils.h", -+ "src/execution/isolate.cc", -+ "src/execution/isolate.h", -+ "src/execution/messages.cc", -+ "src/execution/messages.h", -+ "src/execution/microtask-queue.cc", -+ "src/execution/microtask-queue.h", -+ "src/execution/off-thread-isolate-inl.h", -+ "src/execution/off-thread-isolate.cc", -+ "src/execution/off-thread-isolate.h", -+ "src/execution/pointer-authentication.h", -+ "src/execution/protectors-inl.h", -+ "src/execution/protectors.cc", -+ "src/execution/protectors.h", -+ "src/execution/runtime-profiler.cc", -+ "src/execution/runtime-profiler.h", -+ "src/execution/simulator-base.cc", -+ "src/execution/simulator-base.h", -+ "src/execution/simulator.h", -+ "src/execution/stack-guard.cc", -+ "src/execution/stack-guard.h", -+ "src/execution/thread-id.cc", -+ "src/execution/thread-id.h", -+ "src/execution/thread-local-top.cc", -+ "src/execution/thread-local-top.h", -+ "src/execution/v8threads.cc", -+ "src/execution/v8threads.h", -+ "src/execution/vm-state-inl.h", -+ "src/execution/vm-state.h", -+ "src/extensions/cputracemark-extension.cc", -+ "src/extensions/cputracemark-extension.h", -+ "src/extensions/externalize-string-extension.cc", -+ "src/extensions/externalize-string-extension.h", -+ "src/extensions/gc-extension.cc", -+ "src/extensions/gc-extension.h", -+ "src/extensions/ignition-statistics-extension.cc", -+ "src/extensions/ignition-statistics-extension.h", -+ "src/extensions/statistics-extension.cc", -+ "src/extensions/statistics-extension.h", -+ "src/extensions/trigger-failure-extension.cc", -+ "src/extensions/trigger-failure-extension.h", -+ "src/flags/flag-definitions.h", -+ "src/flags/flags.cc", -+ "src/flags/flags.h", -+ "src/handles/global-handles.cc", -+ "src/handles/global-handles.h", -+ "src/handles/handles-inl.h", -+ "src/handles/handles.cc", -+ "src/handles/handles.h", -+ "src/handles/local-handles-inl.h", -+ "src/handles/local-handles.cc", -+ "src/handles/local-handles.h", -+ "src/handles/maybe-handles-inl.h", -+ "src/handles/maybe-handles.h", -+ "src/handles/persistent-handles.cc", -+ "src/handles/persistent-handles.h", -+ "src/heap/array-buffer-collector.cc", -+ "src/heap/array-buffer-collector.h", -+ "src/heap/array-buffer-sweeper.cc", -+ "src/heap/array-buffer-sweeper.h", -+ "src/heap/array-buffer-tracker-inl.h", -+ "src/heap/array-buffer-tracker.cc", -+ "src/heap/array-buffer-tracker.h", -+ "src/heap/barrier.h", -+ "src/heap/basic-memory-chunk.cc", -+ "src/heap/basic-memory-chunk.h", -+ "src/heap/code-stats.cc", -+ "src/heap/code-stats.h", -+ "src/heap/combined-heap.cc", -+ "src/heap/combined-heap.h", -+ "src/heap/concurrent-allocator-inl.h", -+ "src/heap/concurrent-allocator.cc", -+ "src/heap/concurrent-allocator.h", -+ "src/heap/concurrent-marking.cc", -+ "src/heap/concurrent-marking.h", -+ "src/heap/embedder-tracing.cc", -+ "src/heap/embedder-tracing.h", -+ "src/heap/factory-base.cc", -+ "src/heap/factory-base.h", -+ "src/heap/factory-inl.h", -+ "src/heap/factory.cc", -+ "src/heap/factory.h", -+ "src/heap/finalization-registry-cleanup-task.cc", -+ "src/heap/finalization-registry-cleanup-task.h", -+ "src/heap/gc-idle-time-handler.cc", -+ "src/heap/gc-idle-time-handler.h", -+ "src/heap/gc-tracer.cc", -+ "src/heap/gc-tracer.h", -+ "src/heap/heap-controller.cc", -+ "src/heap/heap-controller.h", -+ "src/heap/heap-inl.h", -+ "src/heap/heap-write-barrier-inl.h", -+ "src/heap/heap-write-barrier.h", -+ "src/heap/heap.cc", -+ "src/heap/heap.h", -+ "src/heap/incremental-marking-inl.h", -+ "src/heap/incremental-marking-job.cc", -+ "src/heap/incremental-marking-job.h", -+ "src/heap/incremental-marking.cc", -+ "src/heap/incremental-marking.h", -+ "src/heap/invalidated-slots-inl.h", -+ "src/heap/invalidated-slots.cc", -+ "src/heap/invalidated-slots.h", -+ "src/heap/item-parallel-job.cc", -+ "src/heap/item-parallel-job.h", -+ "src/heap/large-spaces.cc", -+ "src/heap/large-spaces.h", -+ "src/heap/list.h", -+ "src/heap/local-allocator-inl.h", -+ "src/heap/local-allocator.h", -+ "src/heap/local-heap.cc", -+ "src/heap/local-heap.h", -+ "src/heap/mark-compact-inl.h", -+ "src/heap/mark-compact.cc", -+ "src/heap/mark-compact.h", -+ "src/heap/marking-visitor-inl.h", -+ "src/heap/marking-visitor.h", -+ "src/heap/marking-worklist.cc", -+ "src/heap/marking-worklist.h", -+ "src/heap/marking.cc", -+ "src/heap/marking.h", -+ "src/heap/memory-chunk-inl.h", -+ "src/heap/memory-chunk.cc", -+ "src/heap/memory-chunk.h", -+ "src/heap/memory-measurement-inl.h", -+ "src/heap/memory-measurement.cc", -+ "src/heap/memory-measurement.h", -+ "src/heap/memory-reducer.cc", -+ "src/heap/memory-reducer.h", -+ "src/heap/object-stats.cc", -+ "src/heap/object-stats.h", -+ "src/heap/objects-visiting-inl.h", -+ "src/heap/objects-visiting.cc", -+ "src/heap/objects-visiting.h", -+ "src/heap/off-thread-factory.cc", -+ "src/heap/off-thread-factory.h", -+ "src/heap/off-thread-heap.cc", -+ "src/heap/off-thread-heap.h", -+ "src/heap/read-only-heap-inl.h", -+ "src/heap/read-only-heap.cc", -+ "src/heap/read-only-heap.h", -+ "src/heap/read-only-spaces.cc", -+ "src/heap/read-only-spaces.h", -+ "src/heap/remembered-set.h", -+ "src/heap/safepoint.cc", -+ "src/heap/safepoint.h", -+ "src/heap/scavenge-job.cc", -+ "src/heap/scavenge-job.h", -+ "src/heap/scavenger-inl.h", -+ "src/heap/scavenger.cc", -+ "src/heap/scavenger.h", -+ "src/heap/slot-set.cc", -+ "src/heap/slot-set.h", -+ "src/heap/spaces-inl.h", -+ "src/heap/spaces.cc", -+ "src/heap/spaces.h", -+ "src/heap/stress-marking-observer.cc", -+ "src/heap/stress-marking-observer.h", -+ "src/heap/stress-scavenge-observer.cc", -+ "src/heap/stress-scavenge-observer.h", -+ "src/heap/sweeper.cc", -+ "src/heap/sweeper.h", -+ "src/heap/worklist.h", -+ "src/ic/call-optimization.cc", -+ "src/ic/call-optimization.h", -+ "src/ic/handler-configuration-inl.h", -+ "src/ic/handler-configuration.cc", -+ "src/ic/handler-configuration.h", -+ "src/ic/ic-inl.h", -+ "src/ic/ic-stats.cc", -+ "src/ic/ic-stats.h", -+ "src/ic/ic.cc", -+ "src/ic/ic.h", -+ "src/ic/stub-cache.cc", -+ "src/ic/stub-cache.h", -+ "src/init/bootstrapper.cc", -+ "src/init/bootstrapper.h", -+ "src/init/heap-symbols.h", -+ "src/init/icu_util.cc", -+ "src/init/icu_util.h", -+ "src/init/isolate-allocator.cc", -+ "src/init/isolate-allocator.h", -+ "src/init/setup-isolate.h", -+ "src/init/startup-data-util.cc", -+ "src/init/startup-data-util.h", -+ "src/init/v8.cc", -+ "src/init/v8.h", -+ "src/interpreter/block-coverage-builder.h", -+ "src/interpreter/bytecode-array-accessor.cc", -+ "src/interpreter/bytecode-array-accessor.h", -+ "src/interpreter/bytecode-array-builder.cc", -+ "src/interpreter/bytecode-array-builder.h", -+ "src/interpreter/bytecode-array-iterator.cc", -+ "src/interpreter/bytecode-array-iterator.h", -+ "src/interpreter/bytecode-array-random-iterator.cc", -+ "src/interpreter/bytecode-array-random-iterator.h", -+ "src/interpreter/bytecode-array-writer.cc", -+ "src/interpreter/bytecode-array-writer.h", -+ "src/interpreter/bytecode-decoder.cc", -+ "src/interpreter/bytecode-decoder.h", -+ "src/interpreter/bytecode-flags.cc", -+ "src/interpreter/bytecode-flags.h", -+ "src/interpreter/bytecode-generator.cc", -+ "src/interpreter/bytecode-generator.h", -+ "src/interpreter/bytecode-jump-table.h", -+ "src/interpreter/bytecode-label.cc", -+ "src/interpreter/bytecode-label.h", -+ "src/interpreter/bytecode-node.cc", -+ "src/interpreter/bytecode-node.h", -+ "src/interpreter/bytecode-operands.cc", -+ "src/interpreter/bytecode-operands.h", -+ "src/interpreter/bytecode-register-allocator.h", -+ "src/interpreter/bytecode-register-optimizer.cc", -+ "src/interpreter/bytecode-register-optimizer.h", -+ "src/interpreter/bytecode-register.cc", -+ "src/interpreter/bytecode-register.h", -+ "src/interpreter/bytecode-source-info.cc", -+ "src/interpreter/bytecode-source-info.h", -+ "src/interpreter/bytecode-traits.h", -+ "src/interpreter/bytecodes.cc", -+ "src/interpreter/bytecodes.h", -+ "src/interpreter/constant-array-builder.cc", -+ "src/interpreter/constant-array-builder.h", -+ "src/interpreter/control-flow-builders.cc", -+ "src/interpreter/control-flow-builders.h", -+ "src/interpreter/handler-table-builder.cc", -+ "src/interpreter/handler-table-builder.h", -+ "src/interpreter/interpreter-generator.h", -+ "src/interpreter/interpreter-intrinsics.cc", -+ "src/interpreter/interpreter-intrinsics.h", -+ "src/interpreter/interpreter.cc", -+ "src/interpreter/interpreter.h", -+ "src/json/json-parser.cc", -+ "src/json/json-parser.h", -+ "src/json/json-stringifier.cc", -+ "src/json/json-stringifier.h", -+ "src/logging/code-events.h", -+ "src/logging/counters-definitions.h", -+ "src/logging/counters-inl.h", -+ "src/logging/counters.cc", -+ "src/logging/counters.h", -+ "src/logging/log-inl.h", -+ "src/logging/log-utils.cc", -+ "src/logging/log-utils.h", -+ "src/logging/log.cc", -+ "src/logging/log.h", -+ "src/logging/off-thread-logger.h", -+ "src/numbers/bignum-dtoa.cc", -+ "src/numbers/bignum-dtoa.h", -+ "src/numbers/bignum.cc", -+ "src/numbers/bignum.h", -+ "src/numbers/cached-powers.cc", -+ "src/numbers/cached-powers.h", -+ "src/numbers/conversions-inl.h", -+ "src/numbers/conversions.cc", -+ "src/numbers/conversions.h", -+ "src/numbers/diy-fp.cc", -+ "src/numbers/diy-fp.h", -+ "src/numbers/double.h", -+ "src/numbers/dtoa.cc", -+ "src/numbers/dtoa.h", -+ "src/numbers/fast-dtoa.cc", -+ "src/numbers/fast-dtoa.h", -+ "src/numbers/fixed-dtoa.cc", -+ "src/numbers/fixed-dtoa.h", -+ "src/numbers/hash-seed-inl.h", -+ "src/numbers/math-random.cc", -+ "src/numbers/math-random.h", -+ "src/numbers/strtod.cc", -+ "src/numbers/strtod.h", -+ "src/objects/allocation-site-inl.h", -+ "src/objects/allocation-site-scopes-inl.h", -+ "src/objects/allocation-site-scopes.h", -+ "src/objects/allocation-site.h", -+ "src/objects/api-callbacks-inl.h", -+ "src/objects/api-callbacks.h", -+ "src/objects/arguments-inl.h", -+ "src/objects/arguments.h", -+ "src/objects/backing-store.cc", -+ "src/objects/backing-store.h", -+ "src/objects/bigint.cc", -+ "src/objects/bigint.h", -+ "src/objects/cell-inl.h", -+ "src/objects/cell.h", -+ "src/objects/code-inl.h", -+ "src/objects/code.cc", -+ "src/objects/code.h", -+ "src/objects/compilation-cache-inl.h", -+ "src/objects/compilation-cache.h", -+ "src/objects/compressed-slots-inl.h", -+ "src/objects/compressed-slots.h", -+ "src/objects/contexts-inl.h", -+ "src/objects/contexts.cc", -+ "src/objects/contexts.h", -+ "src/objects/data-handler.h", -+ "src/objects/debug-objects-inl.h", -+ "src/objects/debug-objects.cc", -+ "src/objects/debug-objects.h", -+ "src/objects/descriptor-array-inl.h", -+ "src/objects/descriptor-array.h", -+ "src/objects/dictionary-inl.h", -+ "src/objects/dictionary.h", -+ "src/objects/elements-inl.h", -+ "src/objects/elements-kind.cc", -+ "src/objects/elements-kind.h", -+ "src/objects/elements.cc", -+ "src/objects/elements.h", -+ "src/objects/embedder-data-array-inl.h", -+ "src/objects/embedder-data-array.cc", -+ "src/objects/embedder-data-array.h", -+ "src/objects/embedder-data-slot-inl.h", -+ "src/objects/embedder-data-slot.h", -+ "src/objects/feedback-cell-inl.h", -+ "src/objects/feedback-cell.h", -+ "src/objects/feedback-vector-inl.h", -+ "src/objects/feedback-vector.cc", -+ "src/objects/feedback-vector.h", -+ "src/objects/field-index-inl.h", -+ "src/objects/field-index.h", -+ "src/objects/field-type.cc", -+ "src/objects/field-type.h", -+ "src/objects/fixed-array-inl.h", -+ "src/objects/fixed-array.h", -+ "src/objects/frame-array-inl.h", -+ "src/objects/frame-array.h", -+ "src/objects/function-kind.h", -+ "src/objects/hash-table-inl.h", -+ "src/objects/hash-table.h", -+ "src/objects/heap-number-inl.h", -+ "src/objects/heap-number.h", -+ "src/objects/heap-object-inl.h", -+ "src/objects/heap-object.h", -+ "src/objects/instance-type-inl.h", -+ "src/objects/instance-type.h", -+ "src/objects/internal-index.h", -+ "src/objects/intl-objects.cc", -+ "src/objects/intl-objects.h", -+ "src/objects/js-aggregate-error-inl.h", -+ "src/objects/js-aggregate-error.h", -+ "src/objects/js-array-buffer-inl.h", -+ "src/objects/js-array-buffer.cc", -+ "src/objects/js-array-buffer.h", -+ "src/objects/js-array-inl.h", -+ "src/objects/js-array.h", -+ "src/objects/js-break-iterator-inl.h", -+ "src/objects/js-break-iterator.cc", -+ "src/objects/js-break-iterator.h", -+ "src/objects/js-collator-inl.h", -+ "src/objects/js-collator.cc", -+ "src/objects/js-collator.h", -+ "src/objects/js-collection-inl.h", -+ "src/objects/js-collection-iterator.h", -+ "src/objects/js-collection.h", -+ "src/objects/js-date-time-format-inl.h", -+ "src/objects/js-date-time-format.cc", -+ "src/objects/js-date-time-format.h", -+ "src/objects/js-display-names-inl.h", -+ "src/objects/js-display-names.cc", -+ "src/objects/js-display-names.h", -+ "src/objects/js-generator-inl.h", -+ "src/objects/js-generator.h", -+ "src/objects/js-list-format-inl.h", -+ "src/objects/js-list-format.cc", -+ "src/objects/js-list-format.h", -+ "src/objects/js-locale-inl.h", -+ "src/objects/js-locale.cc", -+ "src/objects/js-locale.h", -+ "src/objects/js-number-format-inl.h", -+ "src/objects/js-number-format.cc", -+ "src/objects/js-number-format.h", -+ "src/objects/js-objects-inl.h", -+ "src/objects/js-objects.cc", -+ "src/objects/js-objects.h", -+ "src/objects/js-plural-rules-inl.h", -+ "src/objects/js-plural-rules.cc", -+ "src/objects/js-plural-rules.h", -+ "src/objects/js-promise-inl.h", -+ "src/objects/js-promise.h", -+ "src/objects/js-proxy-inl.h", -+ "src/objects/js-proxy.h", -+ "src/objects/js-regexp-inl.h", -+ "src/objects/js-regexp-string-iterator-inl.h", -+ "src/objects/js-regexp-string-iterator.h", -+ "src/objects/js-regexp.cc", -+ "src/objects/js-regexp.h", -+ "src/objects/js-relative-time-format-inl.h", -+ "src/objects/js-relative-time-format.cc", -+ "src/objects/js-relative-time-format.h", -+ "src/objects/js-segment-iterator-inl.h", -+ "src/objects/js-segment-iterator.cc", -+ "src/objects/js-segment-iterator.h", -+ "src/objects/js-segmenter-inl.h", -+ "src/objects/js-segmenter.cc", -+ "src/objects/js-segmenter.h", -+ "src/objects/js-weak-refs-inl.h", -+ "src/objects/js-weak-refs.h", -+ "src/objects/keys.cc", -+ "src/objects/keys.h", -+ "src/objects/layout-descriptor-inl.h", -+ "src/objects/layout-descriptor.cc", -+ "src/objects/layout-descriptor.h", -+ "src/objects/literal-objects-inl.h", -+ "src/objects/literal-objects.cc", -+ "src/objects/literal-objects.h", -+ "src/objects/lookup-cache-inl.h", -+ "src/objects/lookup-cache.cc", -+ "src/objects/lookup-cache.h", -+ "src/objects/lookup-inl.h", -+ "src/objects/lookup.cc", -+ "src/objects/lookup.h", -+ "src/objects/managed.cc", -+ "src/objects/managed.h", -+ "src/objects/map-inl.h", -+ "src/objects/map-updater.cc", -+ "src/objects/map-updater.h", -+ "src/objects/map.cc", -+ "src/objects/map.h", -+ "src/objects/maybe-object-inl.h", -+ "src/objects/maybe-object.h", -+ "src/objects/microtask-inl.h", -+ "src/objects/microtask.h", -+ "src/objects/module-inl.h", -+ "src/objects/module.cc", -+ "src/objects/module.h", -+ "src/objects/name-inl.h", -+ "src/objects/name.h", -+ "src/objects/object-list-macros.h", -+ "src/objects/object-macros-undef.h", -+ "src/objects/object-macros.h", -+ "src/objects/objects-body-descriptors-inl.h", -+ "src/objects/objects-body-descriptors.h", -+ "src/objects/objects-inl.h", -+ "src/objects/objects.cc", -+ "src/objects/objects.h", -+ "src/objects/oddball-inl.h", -+ "src/objects/oddball.h", -+ "src/objects/ordered-hash-table-inl.h", -+ "src/objects/ordered-hash-table.cc", -+ "src/objects/ordered-hash-table.h", -+ "src/objects/osr-optimized-code-cache-inl.h", -+ "src/objects/osr-optimized-code-cache.cc", -+ "src/objects/osr-optimized-code-cache.h", -+ "src/objects/primitive-heap-object-inl.h", -+ "src/objects/primitive-heap-object.h", -+ "src/objects/promise-inl.h", -+ "src/objects/promise.h", -+ "src/objects/property-array-inl.h", -+ "src/objects/property-array.h", -+ "src/objects/property-cell-inl.h", -+ "src/objects/property-cell.h", -+ "src/objects/property-descriptor-object-inl.h", -+ "src/objects/property-descriptor-object.h", -+ "src/objects/property-descriptor.cc", -+ "src/objects/property-descriptor.h", -+ "src/objects/property-details.h", -+ "src/objects/property.cc", -+ "src/objects/property.h", -+ "src/objects/prototype-info-inl.h", -+ "src/objects/prototype-info.h", -+ "src/objects/prototype.h", -+ "src/objects/regexp-match-info.h", -+ "src/objects/scope-info.cc", -+ "src/objects/scope-info.h", -+ "src/objects/script-inl.h", -+ "src/objects/script.h", -+ "src/objects/shared-function-info-inl.h", -+ "src/objects/shared-function-info.h", -+ "src/objects/slots-atomic-inl.h", -+ "src/objects/slots-inl.h", -+ "src/objects/slots.h", -+ "src/objects/source-text-module.cc", -+ "src/objects/source-text-module.h", -+ "src/objects/stack-frame-info-inl.h", -+ "src/objects/stack-frame-info.cc", -+ "src/objects/stack-frame-info.h", -+ "src/objects/string-comparator.cc", -+ "src/objects/string-comparator.h", -+ "src/objects/string-inl.h", -+ "src/objects/string-table-inl.h", -+ "src/objects/string-table.h", -+ "src/objects/string.cc", -+ "src/objects/string.h", -+ "src/objects/struct-inl.h", -+ "src/objects/struct.h", -+ "src/objects/synthetic-module.cc", -+ "src/objects/synthetic-module.h", -+ "src/objects/tagged-field-inl.h", -+ "src/objects/tagged-field.h", -+ "src/objects/tagged-impl-inl.h", -+ "src/objects/tagged-impl.cc", -+ "src/objects/tagged-impl.h", -+ "src/objects/tagged-index.h", -+ "src/objects/tagged-value-inl.h", -+ "src/objects/tagged-value.h", -+ "src/objects/template-objects-inl.h", -+ "src/objects/template-objects.cc", -+ "src/objects/template-objects.h", -+ "src/objects/templates-inl.h", -+ "src/objects/templates.h", -+ "src/objects/transitions-inl.h", -+ "src/objects/transitions.cc", -+ "src/objects/transitions.h", -+ "src/objects/type-hints.cc", -+ "src/objects/type-hints.h", -+ "src/objects/value-serializer.cc", -+ "src/objects/value-serializer.h", -+ "src/objects/visitors.cc", -+ "src/objects/visitors.h", -+ "src/parsing/expression-scope.h", -+ "src/parsing/func-name-inferrer.cc", -+ "src/parsing/func-name-inferrer.h", -+ "src/parsing/literal-buffer.cc", -+ "src/parsing/literal-buffer.h", -+ "src/parsing/parse-info.cc", -+ "src/parsing/parse-info.h", -+ "src/parsing/parser-base.h", -+ "src/parsing/parser.cc", -+ "src/parsing/parser.h", -+ "src/parsing/parsing.cc", -+ "src/parsing/parsing.h", -+ "src/parsing/pending-compilation-error-handler.cc", -+ "src/parsing/pending-compilation-error-handler.h", -+ "src/parsing/preparse-data-impl.h", -+ "src/parsing/preparse-data.cc", -+ "src/parsing/preparse-data.h", -+ "src/parsing/preparser-logger.h", -+ "src/parsing/preparser.cc", -+ "src/parsing/preparser.h", -+ "src/parsing/rewriter.cc", -+ "src/parsing/rewriter.h", -+ "src/parsing/scanner-character-streams.cc", -+ "src/parsing/scanner-character-streams.h", -+ "src/parsing/scanner.cc", -+ "src/parsing/scanner.h", -+ "src/parsing/token.cc", -+ "src/parsing/token.h", -+ "src/profiler/allocation-tracker.cc", -+ "src/profiler/allocation-tracker.h", -+ "src/profiler/circular-queue-inl.h", -+ "src/profiler/circular-queue.h", -+ "src/profiler/cpu-profiler-inl.h", -+ "src/profiler/cpu-profiler.cc", -+ "src/profiler/cpu-profiler.h", -+ "src/profiler/heap-profiler.cc", -+ "src/profiler/heap-profiler.h", -+ "src/profiler/heap-snapshot-generator-inl.h", -+ "src/profiler/heap-snapshot-generator.cc", -+ "src/profiler/heap-snapshot-generator.h", -+ "src/profiler/profile-generator-inl.h", -+ "src/profiler/profile-generator.cc", -+ "src/profiler/profile-generator.h", -+ "src/profiler/profiler-listener.cc", -+ "src/profiler/profiler-listener.h", -+ "src/profiler/sampling-heap-profiler.cc", -+ "src/profiler/sampling-heap-profiler.h", -+ "src/profiler/strings-storage.cc", -+ "src/profiler/strings-storage.h", -+ "src/profiler/tick-sample.cc", -+ "src/profiler/tick-sample.h", -+ "src/profiler/tracing-cpu-profiler.cc", -+ "src/profiler/tracing-cpu-profiler.h", -+ "src/regexp/property-sequences.cc", -+ "src/regexp/property-sequences.h", -+ "src/regexp/regexp-ast.cc", -+ "src/regexp/regexp-ast.h", -+ "src/regexp/regexp-bytecode-generator-inl.h", -+ "src/regexp/regexp-bytecode-generator.cc", -+ "src/regexp/regexp-bytecode-generator.h", -+ "src/regexp/regexp-bytecode-peephole.cc", -+ "src/regexp/regexp-bytecode-peephole.h", -+ "src/regexp/regexp-bytecodes.cc", -+ "src/regexp/regexp-bytecodes.h", -+ "src/regexp/regexp-compiler-tonode.cc", -+ "src/regexp/regexp-compiler.cc", -+ "src/regexp/regexp-compiler.h", -+ "src/regexp/regexp-dotprinter.cc", -+ "src/regexp/regexp-dotprinter.h", -+ "src/regexp/regexp-error.cc", -+ "src/regexp/regexp-error.h", -+ "src/regexp/regexp-interpreter.cc", -+ "src/regexp/regexp-interpreter.h", -+ "src/regexp/regexp-macro-assembler-arch.h", -+ "src/regexp/regexp-macro-assembler-tracer.cc", -+ "src/regexp/regexp-macro-assembler-tracer.h", -+ "src/regexp/regexp-macro-assembler.cc", -+ "src/regexp/regexp-macro-assembler.h", -+ "src/regexp/regexp-nodes.h", -+ "src/regexp/regexp-parser.cc", -+ "src/regexp/regexp-parser.h", -+ "src/regexp/regexp-stack.cc", -+ "src/regexp/regexp-stack.h", -+ "src/regexp/regexp-utils.cc", -+ "src/regexp/regexp-utils.h", -+ "src/regexp/regexp.cc", -+ "src/regexp/regexp.h", -+ "src/regexp/special-case.h", -+ "src/roots/roots-inl.h", -+ "src/roots/roots.cc", -+ "src/roots/roots.h", -+ "src/runtime/runtime-array.cc", -+ "src/runtime/runtime-atomics.cc", -+ "src/runtime/runtime-bigint.cc", -+ "src/runtime/runtime-classes.cc", -+ "src/runtime/runtime-collections.cc", -+ "src/runtime/runtime-compiler.cc", -+ "src/runtime/runtime-date.cc", -+ "src/runtime/runtime-debug.cc", -+ "src/runtime/runtime-forin.cc", -+ "src/runtime/runtime-function.cc", -+ "src/runtime/runtime-futex.cc", -+ "src/runtime/runtime-generator.cc", -+ "src/runtime/runtime-internal.cc", -+ "src/runtime/runtime-interpreter.cc", -+ "src/runtime/runtime-intl.cc", -+ "src/runtime/runtime-literals.cc", -+ "src/runtime/runtime-module.cc", -+ "src/runtime/runtime-numbers.cc", -+ "src/runtime/runtime-object.cc", -+ "src/runtime/runtime-operators.cc", -+ "src/runtime/runtime-promise.cc", -+ "src/runtime/runtime-proxy.cc", -+ "src/runtime/runtime-regexp.cc", -+ "src/runtime/runtime-scopes.cc", -+ "src/runtime/runtime-strings.cc", -+ "src/runtime/runtime-symbol.cc", -+ "src/runtime/runtime-test.cc", -+ "src/runtime/runtime-typedarray.cc", -+ "src/runtime/runtime-utils.h", -+ "src/runtime/runtime-wasm.cc", -+ "src/runtime/runtime-weak-refs.cc", -+ "src/runtime/runtime.cc", -+ "src/runtime/runtime.h", -+ "src/sanitizer/asan.h", -+ "src/sanitizer/lsan-page-allocator.cc", -+ "src/sanitizer/lsan-page-allocator.h", -+ "src/sanitizer/msan.h", -+ "src/sanitizer/tsan.h", -+ "src/snapshot/code-serializer.cc", -+ "src/snapshot/code-serializer.h", -+ "src/snapshot/context-deserializer.cc", -+ "src/snapshot/context-deserializer.h", -+ "src/snapshot/context-serializer.cc", -+ "src/snapshot/context-serializer.h", -+ "src/snapshot/deserializer-allocator.cc", -+ "src/snapshot/deserializer-allocator.h", -+ "src/snapshot/deserializer.cc", -+ "src/snapshot/deserializer.h", -+ "src/snapshot/embedded/embedded-data.cc", -+ "src/snapshot/embedded/embedded-data.h", -+ "src/snapshot/object-deserializer.cc", -+ "src/snapshot/object-deserializer.h", -+ "src/snapshot/read-only-deserializer.cc", -+ "src/snapshot/read-only-deserializer.h", -+ "src/snapshot/read-only-serializer.cc", -+ "src/snapshot/read-only-serializer.h", -+ "src/snapshot/references.h", -+ "src/snapshot/roots-serializer.cc", -+ "src/snapshot/roots-serializer.h", -+ "src/snapshot/serializer-allocator.cc", -+ "src/snapshot/serializer-allocator.h", -+ "src/snapshot/serializer-deserializer.cc", -+ "src/snapshot/serializer-deserializer.h", -+ "src/snapshot/serializer.cc", -+ "src/snapshot/serializer.h", -+ "src/snapshot/snapshot-compression.cc", -+ "src/snapshot/snapshot-compression.h", -+ "src/snapshot/snapshot-data.cc", -+ "src/snapshot/snapshot-data.h", -+ "src/snapshot/snapshot-source-sink.cc", -+ "src/snapshot/snapshot-source-sink.h", -+ "src/snapshot/snapshot-utils.cc", -+ "src/snapshot/snapshot-utils.h", -+ "src/snapshot/snapshot.cc", -+ "src/snapshot/snapshot.h", -+ "src/snapshot/startup-deserializer.cc", -+ "src/snapshot/startup-deserializer.h", -+ "src/snapshot/startup-serializer.cc", -+ "src/snapshot/startup-serializer.h", -+ "src/strings/char-predicates-inl.h", -+ "src/strings/char-predicates.cc", -+ "src/strings/char-predicates.h", -+ "src/strings/string-builder-inl.h", -+ "src/strings/string-builder.cc", -+ "src/strings/string-case.cc", -+ "src/strings/string-case.h", -+ "src/strings/string-hasher-inl.h", -+ "src/strings/string-hasher.h", -+ "src/strings/string-search.h", -+ "src/strings/string-stream.cc", -+ "src/strings/string-stream.h", -+ "src/strings/unicode-decoder.cc", -+ "src/strings/unicode-decoder.h", -+ "src/strings/unicode-inl.h", -+ "src/strings/unicode.cc", -+ "src/strings/unicode.h", -+ "src/strings/uri.cc", -+ "src/strings/uri.h", -+ "src/tasks/cancelable-task.cc", -+ "src/tasks/cancelable-task.h", -+ "src/tasks/task-utils.cc", -+ "src/tasks/task-utils.h", -+ "src/third_party/siphash/halfsiphash.cc", -+ "src/third_party/siphash/halfsiphash.h", -+ "src/third_party/utf8-decoder/utf8-decoder.h", -+ "src/tracing/trace-event.cc", -+ "src/tracing/trace-event.h", -+ "src/tracing/traced-value.cc", -+ "src/tracing/traced-value.h", -+ "src/tracing/tracing-category-observer.cc", -+ "src/tracing/tracing-category-observer.h", -+ "src/trap-handler/handler-inside.cc", -+ "src/trap-handler/handler-outside.cc", -+ "src/trap-handler/handler-shared.cc", -+ "src/trap-handler/trap-handler-internal.h", -+ "src/trap-handler/trap-handler.h", -+ "src/utils/address-map.cc", -+ "src/utils/address-map.h", -+ "src/utils/allocation.cc", -+ "src/utils/allocation.h", -+ "src/utils/bit-vector.cc", -+ "src/utils/bit-vector.h", -+ "src/utils/boxed-float.h", -+ "src/utils/detachable-vector.cc", -+ "src/utils/detachable-vector.h", -+ "src/utils/identity-map.cc", -+ "src/utils/identity-map.h", -+ "src/utils/locked-queue-inl.h", -+ "src/utils/locked-queue.h", -+ "src/utils/memcopy.cc", -+ "src/utils/memcopy.h", -+ "src/utils/ostreams.cc", -+ "src/utils/ostreams.h", -+ "src/utils/pointer-with-payload.h", -+ "src/utils/utils-inl.h", -+ "src/utils/utils.cc", -+ "src/utils/utils.h", -+ "src/utils/vector.h", -+ "src/utils/version.cc", -+ "src/utils/version.h", -+ "src/wasm/baseline/liftoff-assembler-defs.h", -+ "src/wasm/baseline/liftoff-assembler.cc", -+ "src/wasm/baseline/liftoff-assembler.h", -+ "src/wasm/baseline/liftoff-compiler.cc", -+ "src/wasm/baseline/liftoff-compiler.h", -+ "src/wasm/baseline/liftoff-register.h", -+ "src/wasm/code-space-access.h", -+ "src/wasm/compilation-environment.h", -+ "src/wasm/decoder.h", -+ "src/wasm/function-body-decoder-impl.h", -+ "src/wasm/function-body-decoder.cc", -+ "src/wasm/function-body-decoder.h", -+ "src/wasm/function-compiler.cc", -+ "src/wasm/function-compiler.h", -+ "src/wasm/graph-builder-interface.cc", -+ "src/wasm/graph-builder-interface.h", -+ "src/wasm/jump-table-assembler.cc", -+ "src/wasm/jump-table-assembler.h", -+ "src/wasm/leb-helper.h", -+ "src/wasm/local-decl-encoder.cc", -+ "src/wasm/local-decl-encoder.h", -+ "src/wasm/memory-tracing.cc", -+ "src/wasm/memory-tracing.h", -+ "src/wasm/module-compiler.cc", -+ "src/wasm/module-compiler.h", -+ "src/wasm/module-decoder.cc", -+ "src/wasm/module-decoder.h", -+ "src/wasm/module-instantiate.cc", -+ "src/wasm/module-instantiate.h", -+ "src/wasm/object-access.h", -+ "src/wasm/signature-map.cc", -+ "src/wasm/signature-map.h", -+ "src/wasm/streaming-decoder.cc", -+ "src/wasm/streaming-decoder.h", -+ "src/wasm/struct-types.h", -+ "src/wasm/value-type.h", -+ "src/wasm/wasm-arguments.h", -+ "src/wasm/wasm-code-manager.cc", -+ "src/wasm/wasm-code-manager.h", -+ "src/wasm/wasm-constants.h", -+ "src/wasm/wasm-debug-evaluate.cc", -+ "src/wasm/wasm-debug-evaluate.h", -+ "src/wasm/wasm-debug.cc", -+ "src/wasm/wasm-engine.cc", -+ "src/wasm/wasm-engine.h", -+ "src/wasm/wasm-external-refs.cc", -+ "src/wasm/wasm-external-refs.h", -+ "src/wasm/wasm-feature-flags.h", -+ "src/wasm/wasm-features.cc", -+ "src/wasm/wasm-features.h", -+ "src/wasm/wasm-import-wrapper-cache.cc", -+ "src/wasm/wasm-import-wrapper-cache.h", -+ "src/wasm/wasm-interpreter.cc", -+ "src/wasm/wasm-interpreter.h", -+ "src/wasm/wasm-js.cc", -+ "src/wasm/wasm-js.h", -+ "src/wasm/wasm-limits.h", -+ "src/wasm/wasm-linkage.h", -+ "src/wasm/wasm-module-builder.cc", -+ "src/wasm/wasm-module-builder.h", -+ "src/wasm/wasm-module-sourcemap.cc", -+ "src/wasm/wasm-module-sourcemap.h", -+ "src/wasm/wasm-module.cc", -+ "src/wasm/wasm-module.h", -+ "src/wasm/wasm-objects-inl.h", -+ "src/wasm/wasm-objects.cc", -+ "src/wasm/wasm-objects.h", -+ "src/wasm/wasm-opcodes.cc", -+ "src/wasm/wasm-opcodes.h", -+ "src/wasm/wasm-result.cc", -+ "src/wasm/wasm-result.h", -+ "src/wasm/wasm-serialization.cc", -+ "src/wasm/wasm-serialization.h", -+ "src/wasm/wasm-tier.h", -+ "src/wasm/wasm-value.h", -+ "src/zone/accounting-allocator.cc", -+ "src/zone/accounting-allocator.h", -+ "src/zone/zone-allocator.h", -+ "src/zone/zone-chunk-list.h", -+ "src/zone/zone-containers.h", -+ "src/zone/zone-handle-set.h", -+ "src/zone/zone-list-inl.h", -+ "src/zone/zone-segment.cc", -+ "src/zone/zone-segment.h", -+ "src/zone/zone.cc", -+ "src/zone/zone.h", -+ ] -+ -+ if (!v8_control_flow_integrity) { -+ sources += [ "src/execution/pointer-authentication-dummy.h" ] -+ } -+ -+ if (v8_enable_third_party_heap) { -+ sources += v8_third_party_heap_files -+ } else { -+ sources += [ "src/heap/third-party/heap-api-stub.cc" ] -+ } -+ -+ if (v8_enable_wasm_gdb_remote_debugging) { -+ sources += [ -+ "src/debug/wasm/gdb-server/gdb-remote-util.cc", -+ "src/debug/wasm/gdb-server/gdb-remote-util.h", -+ "src/debug/wasm/gdb-server/gdb-server-thread.cc", -+ "src/debug/wasm/gdb-server/gdb-server-thread.h", -+ "src/debug/wasm/gdb-server/gdb-server.cc", -+ "src/debug/wasm/gdb-server/gdb-server.h", -+ "src/debug/wasm/gdb-server/packet.cc", -+ "src/debug/wasm/gdb-server/packet.h", -+ "src/debug/wasm/gdb-server/session.cc", -+ "src/debug/wasm/gdb-server/session.h", -+ "src/debug/wasm/gdb-server/target.cc", -+ "src/debug/wasm/gdb-server/target.h", -+ "src/debug/wasm/gdb-server/transport.cc", -+ "src/debug/wasm/gdb-server/transport.h", -+ "src/debug/wasm/gdb-server/wasm-module-debug.cc", -+ "src/debug/wasm/gdb-server/wasm-module-debug.h", -+ ] -+ } -+ -+ if (v8_check_header_includes) { -+ # This file will be generated by tools/generate-header-include-checks.py -+ # if the "check_v8_header_includes" gclient variable is set. -+ import("check-header-includes/sources.gni") -+ sources += check_header_includes_sources -+ } -+ -+ if (v8_current_cpu == "x86") { -+ sources += [ ### gcmole(arch:ia32) ### -+ "src/codegen/ia32/assembler-ia32-inl.h", -+ "src/codegen/ia32/assembler-ia32.cc", -+ "src/codegen/ia32/assembler-ia32.h", -+ "src/codegen/ia32/constants-ia32.h", -+ "src/codegen/ia32/cpu-ia32.cc", -+ "src/codegen/ia32/interface-descriptors-ia32.cc", -+ "src/codegen/ia32/macro-assembler-ia32.cc", -+ "src/codegen/ia32/macro-assembler-ia32.h", -+ "src/codegen/ia32/register-ia32.h", -+ "src/codegen/ia32/sse-instr.h", -+ "src/compiler/backend/ia32/code-generator-ia32.cc", -+ "src/compiler/backend/ia32/instruction-codes-ia32.h", -+ "src/compiler/backend/ia32/instruction-scheduler-ia32.cc", -+ "src/compiler/backend/ia32/instruction-selector-ia32.cc", -+ "src/debug/ia32/debug-ia32.cc", -+ "src/deoptimizer/ia32/deoptimizer-ia32.cc", -+ "src/diagnostics/ia32/disasm-ia32.cc", -+ "src/execution/ia32/frame-constants-ia32.cc", -+ "src/execution/ia32/frame-constants-ia32.h", -+ "src/regexp/ia32/regexp-macro-assembler-ia32.cc", -+ "src/regexp/ia32/regexp-macro-assembler-ia32.h", -+ "src/wasm/baseline/ia32/liftoff-assembler-ia32.h", -+ ] -+ } else if (v8_current_cpu == "x64") { -+ sources += [ ### gcmole(arch:x64) ### -+ "src/codegen/x64/assembler-x64-inl.h", -+ "src/codegen/x64/assembler-x64.cc", -+ "src/codegen/x64/assembler-x64.h", -+ "src/codegen/x64/constants-x64.h", -+ "src/codegen/x64/cpu-x64.cc", -+ "src/codegen/x64/fma-instr.h", -+ "src/codegen/x64/interface-descriptors-x64.cc", -+ "src/codegen/x64/macro-assembler-x64.cc", -+ "src/codegen/x64/macro-assembler-x64.h", -+ "src/codegen/x64/register-x64.h", -+ "src/codegen/x64/sse-instr.h", -+ "src/compiler/backend/x64/code-generator-x64.cc", -+ "src/compiler/backend/x64/instruction-codes-x64.h", -+ "src/compiler/backend/x64/instruction-scheduler-x64.cc", -+ "src/compiler/backend/x64/instruction-selector-x64.cc", -+ "src/compiler/backend/x64/unwinding-info-writer-x64.cc", -+ "src/compiler/backend/x64/unwinding-info-writer-x64.h", -+ "src/debug/x64/debug-x64.cc", -+ "src/deoptimizer/x64/deoptimizer-x64.cc", -+ "src/diagnostics/x64/disasm-x64.cc", -+ "src/diagnostics/x64/eh-frame-x64.cc", -+ "src/execution/x64/frame-constants-x64.cc", -+ "src/execution/x64/frame-constants-x64.h", -+ "src/regexp/x64/regexp-macro-assembler-x64.cc", -+ "src/regexp/x64/regexp-macro-assembler-x64.h", -+ "src/third_party/valgrind/valgrind.h", -+ "src/wasm/baseline/x64/liftoff-assembler-x64.h", -+ ] -+ -+ # iOS Xcode simulator builds run on an x64 target. iOS and macOS are both -+ # based on Darwin and thus POSIX-compliant to a similar degree. -+ if (is_linux || is_mac || is_ios || target_os == "freebsd") { -+ sources += [ -+ "src/trap-handler/handler-inside-posix.cc", -+ "src/trap-handler/handler-inside-posix.h", -+ "src/trap-handler/handler-outside-posix.cc", -+ ] -+ } -+ if (is_win) { -+ sources += [ -+ "src/diagnostics/unwinding-info-win64.cc", -+ "src/diagnostics/unwinding-info-win64.h", -+ "src/trap-handler/handler-inside-win.cc", -+ "src/trap-handler/handler-inside-win.h", -+ "src/trap-handler/handler-outside-win.cc", -+ ] -+ } -+ } else if (v8_current_cpu == "arm") { -+ sources += [ ### gcmole(arch:arm) ### -+ "src/codegen/arm/assembler-arm-inl.h", -+ "src/codegen/arm/assembler-arm.cc", -+ "src/codegen/arm/assembler-arm.h", -+ "src/codegen/arm/constants-arm.cc", -+ "src/codegen/arm/constants-arm.h", -+ "src/codegen/arm/cpu-arm.cc", -+ "src/codegen/arm/interface-descriptors-arm.cc", -+ "src/codegen/arm/macro-assembler-arm.cc", -+ "src/codegen/arm/macro-assembler-arm.h", -+ "src/codegen/arm/register-arm.h", -+ "src/compiler/backend/arm/code-generator-arm.cc", -+ "src/compiler/backend/arm/instruction-codes-arm.h", -+ "src/compiler/backend/arm/instruction-scheduler-arm.cc", -+ "src/compiler/backend/arm/instruction-selector-arm.cc", -+ "src/compiler/backend/arm/unwinding-info-writer-arm.cc", -+ "src/compiler/backend/arm/unwinding-info-writer-arm.h", -+ "src/debug/arm/debug-arm.cc", -+ "src/deoptimizer/arm/deoptimizer-arm.cc", -+ "src/diagnostics/arm/disasm-arm.cc", -+ "src/diagnostics/arm/eh-frame-arm.cc", -+ "src/execution/arm/frame-constants-arm.cc", -+ "src/execution/arm/frame-constants-arm.h", -+ "src/execution/arm/simulator-arm.cc", -+ "src/execution/arm/simulator-arm.h", -+ "src/regexp/arm/regexp-macro-assembler-arm.cc", -+ "src/regexp/arm/regexp-macro-assembler-arm.h", -+ "src/wasm/baseline/arm/liftoff-assembler-arm.h", -+ ] -+ } else if (v8_current_cpu == "arm64") { -+ sources += [ ### gcmole(arch:arm64) ### -+ "src/codegen/arm64/assembler-arm64-inl.h", -+ "src/codegen/arm64/assembler-arm64.cc", -+ "src/codegen/arm64/assembler-arm64.h", -+ "src/codegen/arm64/constants-arm64.h", -+ "src/codegen/arm64/cpu-arm64.cc", -+ "src/codegen/arm64/decoder-arm64-inl.h", -+ "src/codegen/arm64/decoder-arm64.cc", -+ "src/codegen/arm64/decoder-arm64.h", -+ "src/codegen/arm64/instructions-arm64-constants.cc", -+ "src/codegen/arm64/instructions-arm64.cc", -+ "src/codegen/arm64/instructions-arm64.h", -+ "src/codegen/arm64/interface-descriptors-arm64.cc", -+ "src/codegen/arm64/macro-assembler-arm64-inl.h", -+ "src/codegen/arm64/macro-assembler-arm64.cc", -+ "src/codegen/arm64/macro-assembler-arm64.h", -+ "src/codegen/arm64/register-arm64.cc", -+ "src/codegen/arm64/register-arm64.h", -+ "src/codegen/arm64/utils-arm64.cc", -+ "src/codegen/arm64/utils-arm64.h", -+ "src/compiler/backend/arm64/code-generator-arm64.cc", -+ "src/compiler/backend/arm64/instruction-codes-arm64.h", -+ "src/compiler/backend/arm64/instruction-scheduler-arm64.cc", -+ "src/compiler/backend/arm64/instruction-selector-arm64.cc", -+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc", -+ "src/compiler/backend/arm64/unwinding-info-writer-arm64.h", -+ "src/debug/arm64/debug-arm64.cc", -+ "src/deoptimizer/arm64/deoptimizer-arm64.cc", -+ "src/diagnostics/arm64/disasm-arm64.cc", -+ "src/diagnostics/arm64/disasm-arm64.h", -+ "src/diagnostics/arm64/eh-frame-arm64.cc", -+ "src/execution/arm64/frame-constants-arm64.cc", -+ "src/execution/arm64/frame-constants-arm64.h", -+ "src/execution/arm64/pointer-auth-arm64.cc", -+ "src/execution/arm64/simulator-arm64.cc", -+ "src/execution/arm64/simulator-arm64.h", -+ "src/execution/arm64/simulator-logic-arm64.cc", -+ "src/regexp/arm64/regexp-macro-assembler-arm64.cc", -+ "src/regexp/arm64/regexp-macro-assembler-arm64.h", -+ "src/wasm/baseline/arm64/liftoff-assembler-arm64.h", -+ ] -+ if (v8_control_flow_integrity) { -+ sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ] -+ } -+ if (is_win) { -+ sources += [ -+ "src/diagnostics/unwinding-info-win64.cc", -+ "src/diagnostics/unwinding-info-win64.h", -+ ] -+ } -+ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { -+ sources += [ ### gcmole(arch:mipsel) ### -+ "src/codegen/mips/assembler-mips-inl.h", -+ "src/codegen/mips/assembler-mips.cc", -+ "src/codegen/mips/assembler-mips.h", -+ "src/codegen/mips/constants-mips.cc", -+ "src/codegen/mips/constants-mips.h", -+ "src/codegen/mips/cpu-mips.cc", -+ "src/codegen/mips/interface-descriptors-mips.cc", -+ "src/codegen/mips/macro-assembler-mips.cc", -+ "src/codegen/mips/macro-assembler-mips.h", -+ "src/codegen/mips/register-mips.h", -+ "src/compiler/backend/mips/code-generator-mips.cc", -+ "src/compiler/backend/mips/instruction-codes-mips.h", -+ "src/compiler/backend/mips/instruction-scheduler-mips.cc", -+ "src/compiler/backend/mips/instruction-selector-mips.cc", -+ "src/debug/mips/debug-mips.cc", -+ "src/deoptimizer/mips/deoptimizer-mips.cc", -+ "src/diagnostics/mips/disasm-mips.cc", -+ "src/execution/mips/frame-constants-mips.cc", -+ "src/execution/mips/frame-constants-mips.h", -+ "src/execution/mips/simulator-mips.cc", -+ "src/execution/mips/simulator-mips.h", -+ "src/regexp/mips/regexp-macro-assembler-mips.cc", -+ "src/regexp/mips/regexp-macro-assembler-mips.h", -+ "src/wasm/baseline/mips/liftoff-assembler-mips.h", -+ ] -+ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { -+ sources += [ ### gcmole(arch:mips64el) ### -+ "src/codegen/mips64/assembler-mips64-inl.h", -+ "src/codegen/mips64/assembler-mips64.cc", -+ "src/codegen/mips64/assembler-mips64.h", -+ "src/codegen/mips64/constants-mips64.cc", -+ "src/codegen/mips64/constants-mips64.h", -+ "src/codegen/mips64/cpu-mips64.cc", -+ "src/codegen/mips64/interface-descriptors-mips64.cc", -+ "src/codegen/mips64/macro-assembler-mips64.cc", -+ "src/codegen/mips64/macro-assembler-mips64.h", -+ "src/codegen/mips64/register-mips64.h", -+ "src/compiler/backend/mips64/code-generator-mips64.cc", -+ "src/compiler/backend/mips64/instruction-codes-mips64.h", -+ "src/compiler/backend/mips64/instruction-scheduler-mips64.cc", -+ "src/compiler/backend/mips64/instruction-selector-mips64.cc", -+ "src/debug/mips64/debug-mips64.cc", -+ "src/deoptimizer/mips64/deoptimizer-mips64.cc", -+ "src/diagnostics/mips64/disasm-mips64.cc", -+ "src/execution/mips64/frame-constants-mips64.cc", -+ "src/execution/mips64/frame-constants-mips64.h", -+ "src/execution/mips64/simulator-mips64.cc", -+ "src/execution/mips64/simulator-mips64.h", -+ "src/regexp/mips64/regexp-macro-assembler-mips64.cc", -+ "src/regexp/mips64/regexp-macro-assembler-mips64.h", -+ "src/wasm/baseline/mips64/liftoff-assembler-mips64.h", -+ ] -+ } else if (v8_current_cpu == "ppc") { -+ sources += [ ### gcmole(arch:ppc) ### -+ "src/codegen/ppc/assembler-ppc-inl.h", -+ "src/codegen/ppc/assembler-ppc.cc", -+ "src/codegen/ppc/assembler-ppc.h", -+ "src/codegen/ppc/constants-ppc.cc", -+ "src/codegen/ppc/constants-ppc.h", -+ "src/codegen/ppc/cpu-ppc.cc", -+ "src/codegen/ppc/interface-descriptors-ppc.cc", -+ "src/codegen/ppc/macro-assembler-ppc.cc", -+ "src/codegen/ppc/macro-assembler-ppc.h", -+ "src/codegen/ppc/register-ppc.h", -+ "src/compiler/backend/ppc/code-generator-ppc.cc", -+ "src/compiler/backend/ppc/instruction-codes-ppc.h", -+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", -+ "src/compiler/backend/ppc/instruction-selector-ppc.cc", -+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", -+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", -+ "src/debug/ppc/debug-ppc.cc", -+ "src/deoptimizer/ppc/deoptimizer-ppc.cc", -+ "src/diagnostics/ppc/disasm-ppc.cc", -+ "src/diagnostics/ppc/eh-frame-ppc.cc", -+ "src/execution/ppc/frame-constants-ppc.cc", -+ "src/execution/ppc/frame-constants-ppc.h", -+ "src/execution/ppc/simulator-ppc.cc", -+ "src/execution/ppc/simulator-ppc.h", -+ "src/regexp/ppc/regexp-macro-assembler-ppc.cc", -+ "src/regexp/ppc/regexp-macro-assembler-ppc.h", -+ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", -+ ] -+ } else if (v8_current_cpu == "ppc64") { -+ sources += [ ### gcmole(arch:ppc64) ### -+ "src/codegen/ppc/assembler-ppc-inl.h", -+ "src/codegen/ppc/assembler-ppc.cc", -+ "src/codegen/ppc/assembler-ppc.h", -+ "src/codegen/ppc/constants-ppc.cc", -+ "src/codegen/ppc/constants-ppc.h", -+ "src/codegen/ppc/cpu-ppc.cc", -+ "src/codegen/ppc/interface-descriptors-ppc.cc", -+ "src/codegen/ppc/macro-assembler-ppc.cc", -+ "src/codegen/ppc/macro-assembler-ppc.h", -+ "src/codegen/ppc/register-ppc.h", -+ "src/compiler/backend/ppc/code-generator-ppc.cc", -+ "src/compiler/backend/ppc/instruction-codes-ppc.h", -+ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", -+ "src/compiler/backend/ppc/instruction-selector-ppc.cc", -+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", -+ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", -+ "src/debug/ppc/debug-ppc.cc", -+ "src/deoptimizer/ppc/deoptimizer-ppc.cc", -+ "src/diagnostics/ppc/disasm-ppc.cc", -+ "src/diagnostics/ppc/eh-frame-ppc.cc", -+ "src/execution/ppc/frame-constants-ppc.cc", -+ "src/execution/ppc/frame-constants-ppc.h", -+ "src/execution/ppc/simulator-ppc.cc", -+ "src/execution/ppc/simulator-ppc.h", -+ "src/regexp/ppc/regexp-macro-assembler-ppc.cc", -+ "src/regexp/ppc/regexp-macro-assembler-ppc.h", -+ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", -+ ] -+ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { -+ sources += [ ### gcmole(arch:s390) ### -+ "src/codegen/s390/assembler-s390-inl.h", -+ "src/codegen/s390/assembler-s390.cc", -+ "src/codegen/s390/assembler-s390.h", -+ "src/codegen/s390/constants-s390.cc", -+ "src/codegen/s390/constants-s390.h", -+ "src/codegen/s390/cpu-s390.cc", -+ "src/codegen/s390/interface-descriptors-s390.cc", -+ "src/codegen/s390/macro-assembler-s390.cc", -+ "src/codegen/s390/macro-assembler-s390.h", -+ "src/codegen/s390/register-s390.h", -+ "src/compiler/backend/s390/code-generator-s390.cc", -+ "src/compiler/backend/s390/instruction-codes-s390.h", -+ "src/compiler/backend/s390/instruction-scheduler-s390.cc", -+ "src/compiler/backend/s390/instruction-selector-s390.cc", -+ "src/compiler/backend/s390/unwinding-info-writer-s390.cc", -+ "src/compiler/backend/s390/unwinding-info-writer-s390.h", -+ "src/debug/s390/debug-s390.cc", -+ "src/deoptimizer/s390/deoptimizer-s390.cc", -+ "src/diagnostics/s390/disasm-s390.cc", -+ "src/diagnostics/s390/eh-frame-s390.cc", -+ "src/execution/s390/frame-constants-s390.cc", -+ "src/execution/s390/frame-constants-s390.h", -+ "src/execution/s390/simulator-s390.cc", -+ "src/execution/s390/simulator-s390.h", -+ "src/regexp/s390/regexp-macro-assembler-s390.cc", -+ "src/regexp/s390/regexp-macro-assembler-s390.h", -+ "src/wasm/baseline/s390/liftoff-assembler-s390.h", -+ ] -+ } -+ -+ configs = [ ":internal_config" ] -+ -+ defines = [] -+ deps = [ -+ ":torque_generated_definitions", -+ ":v8_headers", -+ ":v8_libbase", -+ ":v8_libsampler", -+ ":v8_shared_internal_headers", -+ ":v8_tracing", -+ ":v8_version", -+ "src/inspector:inspector", -+ ] -+ -+ public_deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ ":v8_maybe_icu", -+ ] -+ -+ if (v8_enable_i18n_support) { -+ deps += [ ":run_gen-regexp-special-case" ] -+ sources += [ "$target_gen_dir/src/regexp/special-case.cc" ] -+ if (is_win) { -+ deps += [ "//third_party/icu:icudata" ] -+ } -+ } else { -+ sources -= [ -+ "src/builtins/builtins-intl.cc", -+ "src/objects/intl-objects.cc", -+ "src/objects/intl-objects.h", -+ "src/objects/js-break-iterator-inl.h", -+ "src/objects/js-break-iterator.cc", -+ "src/objects/js-break-iterator.h", -+ "src/objects/js-collator-inl.h", -+ "src/objects/js-collator.cc", -+ "src/objects/js-collator.h", -+ "src/objects/js-date-time-format-inl.h", -+ "src/objects/js-date-time-format.cc", -+ "src/objects/js-date-time-format.h", -+ "src/objects/js-display-names-inl.h", -+ "src/objects/js-display-names.cc", -+ "src/objects/js-display-names.h", -+ "src/objects/js-list-format-inl.h", -+ "src/objects/js-list-format.cc", -+ "src/objects/js-list-format.h", -+ "src/objects/js-locale-inl.h", -+ "src/objects/js-locale.cc", -+ "src/objects/js-locale.h", -+ "src/objects/js-number-format-inl.h", -+ "src/objects/js-number-format.cc", -+ "src/objects/js-number-format.h", -+ "src/objects/js-plural-rules-inl.h", -+ "src/objects/js-plural-rules.cc", -+ "src/objects/js-plural-rules.h", -+ "src/objects/js-relative-time-format-inl.h", -+ "src/objects/js-relative-time-format.cc", -+ "src/objects/js-relative-time-format.h", -+ "src/objects/js-segment-iterator-inl.h", -+ "src/objects/js-segment-iterator.cc", -+ "src/objects/js-segment-iterator.h", -+ "src/objects/js-segmenter-inl.h", -+ "src/objects/js-segmenter.cc", -+ "src/objects/js-segmenter.h", -+ "src/runtime/runtime-intl.cc", -+ "src/strings/char-predicates.cc", -+ ] -+ } -+ -+ deps += [ -+ "//third_party/zlib", -+ "//third_party/zlib/google:compression_utils_portable", -+ ] -+ -+ if (v8_postmortem_support) { -+ sources += [ "$target_gen_dir/debug-support.cc" ] -+ deps += [ ":postmortem-metadata" ] -+ } -+ -+ libs = [] -+ -+ if (v8_enable_third_party_heap) { -+ libs += v8_third_party_heap_libs -+ } -+ -+ # Platforms that don't have CAS support need to link atomic library -+ # to implement atomic memory access -+ if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || -+ v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || -+ v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || -+ v8_current_cpu == "s390" || v8_current_cpu == "s390x") { -+ libs += [ "atomic" ] -+ } -+ -+ if (v8_enable_vtunetracemark && (is_linux || is_win)) { -+ sources += [ -+ "src/extensions/vtunedomain-support-extension.cc", -+ "src/extensions/vtunedomain-support-extension.h", -+ ] -+ deps += [ "src/third_party/vtune:v8_vtune_trace_mark" ] -+ } -+ -+ if (v8_use_perfetto) { -+ sources -= [ "//base/trace_event/common/trace_event_common.h" ] -+ sources += [ -+ "src/tracing/trace-categories.cc", -+ "src/tracing/trace-categories.h", -+ ] -+ } -+} -+ -+group("v8_base") { -+ public_deps = [ -+ ":v8_base_without_compiler", -+ ":v8_compiler", -+ ] -+} -+ -+v8_source_set("torque_base") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = [ -+ "src/torque/ast.h", -+ "src/torque/cfg.cc", -+ "src/torque/cfg.h", -+ "src/torque/class-debug-reader-generator.cc", -+ "src/torque/constants.h", -+ "src/torque/contextual.h", -+ "src/torque/csa-generator.cc", -+ "src/torque/csa-generator.h", -+ "src/torque/declarable.cc", -+ "src/torque/declarable.h", -+ "src/torque/declaration-visitor.cc", -+ "src/torque/declaration-visitor.h", -+ "src/torque/declarations.cc", -+ "src/torque/declarations.h", -+ "src/torque/earley-parser.cc", -+ "src/torque/earley-parser.h", -+ "src/torque/global-context.cc", -+ "src/torque/global-context.h", -+ "src/torque/implementation-visitor.cc", -+ "src/torque/implementation-visitor.h", -+ "src/torque/instance-type-generator.cc", -+ "src/torque/instructions.cc", -+ "src/torque/instructions.h", -+ "src/torque/parameter-difference.h", -+ "src/torque/server-data.cc", -+ "src/torque/server-data.h", -+ "src/torque/source-positions.cc", -+ "src/torque/source-positions.h", -+ "src/torque/torque-compiler.cc", -+ "src/torque/torque-compiler.h", -+ "src/torque/torque-parser.cc", -+ "src/torque/torque-parser.h", -+ "src/torque/type-inference.cc", -+ "src/torque/type-inference.h", -+ "src/torque/type-oracle.cc", -+ "src/torque/type-oracle.h", -+ "src/torque/type-visitor.cc", -+ "src/torque/type-visitor.h", -+ "src/torque/types.cc", -+ "src/torque/types.h", -+ "src/torque/utils.cc", -+ "src/torque/utils.h", -+ ] -+ -+ deps = [ ":v8_shared_internal_headers" ] -+ -+ public_deps = [ ":v8_libbase" ] -+ -+ # The use of exceptions for Torque in violation of the Chromium style-guide -+ # is justified by the fact that it is only used from the non-essential -+ # language server and can be removed anytime if it causes problems. -+ configs = [ -+ ":internal_config", -+ "//build/config/compiler:exceptions", -+ "//build/config/compiler:rtti", -+ ] -+ -+ remove_configs = [ -+ "//build/config/compiler:no_exceptions", -+ "//build/config/compiler:no_rtti", -+ ] -+ -+ if (is_win && is_asan) { -+ # Due to a bug in ASAN on Windows (chromium:893437), we disable ASAN for -+ # Torque on Windows. -+ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] -+ } -+ -+ if (is_debug && !v8_optimized_debug && v8_enable_fast_torque) { -+ # The :no_optimize config is added to v8_add_configs in v8.gni. -+ remove_configs += [ "//build/config/compiler:no_optimize" ] -+ configs += [ ":always_optimize" ] -+ } -+} -+ -+v8_source_set("torque_ls_base") { -+ sources = [ -+ "src/torque/ls/globals.h", -+ "src/torque/ls/json-parser.cc", -+ "src/torque/ls/json-parser.h", -+ "src/torque/ls/json.cc", -+ "src/torque/ls/json.h", -+ "src/torque/ls/message-handler.cc", -+ "src/torque/ls/message-handler.h", -+ "src/torque/ls/message-macros.h", -+ "src/torque/ls/message-pipe.h", -+ "src/torque/ls/message.h", -+ ] -+ -+ public_deps = [ ":torque_base" ] -+ -+ # The use of exceptions for Torque in violation of the Chromium style-guide -+ # is justified by the fact that it is only used from the non-essential -+ # language server and can be removed anytime if it causes problems. -+ configs = [ -+ ":internal_config", -+ "//build/config/compiler:exceptions", -+ "//build/config/compiler:rtti", -+ ] -+ -+ remove_configs = [ -+ "//build/config/compiler:no_exceptions", -+ "//build/config/compiler:no_rtti", -+ ] -+ -+ if (is_win && is_asan) { -+ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] -+ } -+} -+ -+v8_component("v8_libbase") { -+ sources = [ -+ "src/base/address-region.h", -+ "src/base/atomic-utils.h", -+ "src/base/atomicops.h", -+ "src/base/atomicops_internals_atomicword_compat.h", -+ "src/base/atomicops_internals_portable.h", -+ "src/base/atomicops_internals_std.h", -+ "src/base/base-export.h", -+ "src/base/bit-field.h", -+ "src/base/bits-iterator.h", -+ "src/base/bits.cc", -+ "src/base/bits.h", -+ "src/base/bounded-page-allocator.cc", -+ "src/base/bounded-page-allocator.h", -+ "src/base/bounds.h", -+ "src/base/build_config.h", -+ "src/base/compiler-specific.h", -+ "src/base/cpu.cc", -+ "src/base/cpu.h", -+ "src/base/debug/stack_trace.cc", -+ "src/base/debug/stack_trace.h", -+ "src/base/division-by-constant.cc", -+ "src/base/division-by-constant.h", -+ "src/base/enum-set.h", -+ "src/base/export-template.h", -+ "src/base/file-utils.cc", -+ "src/base/file-utils.h", -+ "src/base/flags.h", -+ "src/base/free_deleter.h", -+ "src/base/functional.cc", -+ "src/base/functional.h", -+ "src/base/hashmap-entry.h", -+ "src/base/hashmap.h", -+ "src/base/ieee754.cc", -+ "src/base/ieee754.h", -+ "src/base/iterator.h", -+ "src/base/lazy-instance.h", -+ "src/base/logging.cc", -+ "src/base/logging.h", -+ "src/base/lsan.h", -+ "src/base/macros.h", -+ "src/base/memory.h", -+ "src/base/once.cc", -+ "src/base/once.h", -+ "src/base/optional.h", -+ "src/base/overflowing-math.h", -+ "src/base/page-allocator.cc", -+ "src/base/page-allocator.h", -+ "src/base/platform/condition-variable.cc", -+ "src/base/platform/condition-variable.h", -+ "src/base/platform/elapsed-timer.h", -+ "src/base/platform/mutex.cc", -+ "src/base/platform/mutex.h", -+ "src/base/platform/platform.h", -+ "src/base/platform/semaphore.cc", -+ "src/base/platform/semaphore.h", -+ "src/base/platform/time.cc", -+ "src/base/platform/time.h", -+ "src/base/region-allocator.cc", -+ "src/base/region-allocator.h", -+ "src/base/ring-buffer.h", -+ "src/base/safe_conversions.h", -+ "src/base/safe_conversions_impl.h", -+ "src/base/small-vector.h", -+ "src/base/sys-info.cc", -+ "src/base/sys-info.h", -+ "src/base/template-utils.h", -+ "src/base/timezone-cache.h", -+ "src/base/type-traits.h", -+ "src/base/utils/random-number-generator.cc", -+ "src/base/utils/random-number-generator.h", -+ "src/base/vlq-base64.cc", -+ "src/base/vlq-base64.h", -+ ] -+ -+ configs = [ ":internal_config_base" ] -+ -+ public_configs = [ ":libbase_config" ] -+ -+ deps = [ ":v8_headers" ] -+ -+ public_deps = [] -+ -+ data = [] -+ -+ data_deps = [] -+ -+ defines = [] -+ -+ if (is_component_build) { -+ defines = [ "BUILDING_V8_BASE_SHARED" ] -+ } -+ -+ if (is_posix || is_fuchsia) { -+ sources += [ -+ "src/base/platform/platform-posix.cc", -+ "src/base/platform/platform-posix.h", -+ ] -+ if (current_os != "aix") { -+ sources += [ -+ "src/base/platform/platform-posix-time.cc", -+ "src/base/platform/platform-posix-time.h", -+ ] -+ } -+ } -+ -+ if (is_linux) { -+ sources += [ -+ "src/base/debug/stack_trace_posix.cc", -+ "src/base/platform/platform-linux.cc", -+ ] -+ -+ libs = [ -+ "dl", -+ "rt", -+ ] -+ } else if (current_os == "aix") { -+ sources += [ -+ "src/base/debug/stack_trace_posix.cc", -+ "src/base/platform/platform-aix.cc", -+ ] -+ -+ libs = [ -+ "dl", -+ "rt", -+ ] -+ } else if (is_android) { -+ if (current_toolchain == host_toolchain) { -+ libs = [ -+ "dl", -+ "rt", -+ ] -+ if (host_os == "mac") { -+ sources += [ -+ "src/base/debug/stack_trace_posix.cc", -+ "src/base/platform/platform-macos.cc", -+ ] -+ } else { -+ sources += [ -+ "src/base/debug/stack_trace_posix.cc", -+ "src/base/platform/platform-linux.cc", -+ ] -+ } -+ } else { -+ sources += [ -+ "src/base/debug/stack_trace_android.cc", -+ "src/base/platform/platform-linux.cc", -+ ] -+ } -+ } else if (is_fuchsia) { -+ sources += [ -+ "src/base/debug/stack_trace_fuchsia.cc", -+ "src/base/platform/platform-fuchsia.cc", -+ ] -+ } else if (is_mac || is_ios) { -+ sources += [ -+ "src/base/debug/stack_trace_posix.cc", -+ "src/base/platform/platform-macos.cc", -+ ] -+ } else if (is_win) { -+ # TODO(jochen): Add support for cygwin. -+ sources += [ -+ "src/base/debug/stack_trace_win.cc", -+ "src/base/platform/platform-win32.cc", -+ "src/base/win32-headers.h", -+ ] -+ -+ defines += [ "_CRT_RAND_S" ] # for rand_s() -+ -+ libs = [ -+ "dbghelp.lib", -+ "winmm.lib", -+ "ws2_32.lib", -+ ] -+ -+ data_deps += [ "//build/win:runtime_libs" ] -+ } -+ -+ if (v8_current_cpu == "mips" || v8_current_cpu == "mips64") { -+ # Add runtime libs for mips. -+ data += [ -+ "tools/mips_toolchain/sysroot/usr/lib/", -+ "tools/mips_toolchain/mips-mti-linux-gnu/lib", -+ ] -+ } -+ -+ if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm" || -+ v8_current_cpu == "mips")) { -+ # Special UBSan 32-bit requirement. -+ sources += [ "src/base/ubsan.cc" ] -+ } -+ -+ if (is_tsan && !build_with_chromium) { -+ data += [ "tools/sanitizers/tsan_suppressions.txt" ] -+ } -+ -+ # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris. -+} -+ -+v8_component("v8_libplatform") { -+ sources = [ -+ "//base/trace_event/common/trace_event_common.h", -+ "include/libplatform/libplatform-export.h", -+ "include/libplatform/libplatform.h", -+ "include/libplatform/v8-tracing.h", -+ "src/libplatform/default-foreground-task-runner.cc", -+ "src/libplatform/default-foreground-task-runner.h", -+ "src/libplatform/default-job.cc", -+ "src/libplatform/default-job.h", -+ "src/libplatform/default-platform.cc", -+ "src/libplatform/default-platform.h", -+ "src/libplatform/default-worker-threads-task-runner.cc", -+ "src/libplatform/default-worker-threads-task-runner.h", -+ "src/libplatform/delayed-task-queue.cc", -+ "src/libplatform/delayed-task-queue.h", -+ "src/libplatform/task-queue.cc", -+ "src/libplatform/task-queue.h", -+ "src/libplatform/tracing/trace-buffer.cc", -+ "src/libplatform/tracing/trace-buffer.h", -+ "src/libplatform/tracing/trace-config.cc", -+ "src/libplatform/tracing/trace-object.cc", -+ "src/libplatform/tracing/trace-writer.cc", -+ "src/libplatform/tracing/trace-writer.h", -+ "src/libplatform/tracing/tracing-controller.cc", -+ "src/libplatform/worker-thread.cc", -+ "src/libplatform/worker-thread.h", -+ ] -+ -+ configs = [ ":internal_config_base" ] -+ -+ if (is_component_build) { -+ defines = [ "BUILDING_V8_PLATFORM_SHARED" ] -+ } -+ -+ public_configs = [ ":libplatform_config" ] -+ -+ deps = [ -+ ":v8_headers", -+ ":v8_libbase", -+ ":v8_tracing", -+ ] -+ -+ if (v8_use_perfetto) { -+ sources -= [ -+ "//base/trace_event/common/trace_event_common.h", -+ "src/libplatform/tracing/trace-buffer.cc", -+ "src/libplatform/tracing/trace-buffer.h", -+ "src/libplatform/tracing/trace-object.cc", -+ "src/libplatform/tracing/trace-writer.cc", -+ "src/libplatform/tracing/trace-writer.h", -+ ] -+ sources += [ -+ "src/libplatform/tracing/trace-event-listener.cc", -+ "src/libplatform/tracing/trace-event-listener.h", -+ ] -+ deps += [ -+ # TODO(skyostil): Switch TraceEventListener to protozero. -+ "//third_party/perfetto/protos/perfetto/trace:lite", -+ ] -+ } -+} -+ -+v8_source_set("v8_libsampler") { -+ sources = [ -+ "src/libsampler/sampler.cc", -+ "src/libsampler/sampler.h", -+ ] -+ -+ configs = [ ":internal_config" ] -+ -+ public_configs = [ ":libsampler_config" ] -+ -+ deps = [ ":v8_libbase" ] -+} -+ -+v8_source_set("fuzzer_support") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = [ -+ "test/fuzzer/fuzzer-support.cc", -+ "test/fuzzer/fuzzer-support.h", -+ ] -+ -+ configs = [ ":internal_config_base" ] -+ -+ public_deps = [ -+ ":v8", -+ ":v8_libbase", -+ ":v8_libplatform", -+ ":v8_maybe_icu", -+ ] -+} -+ -+v8_source_set("cppgc_base") { -+ visibility = [ ":*" ] -+ -+ sources = [ -+ "include/cppgc/allocation.h", -+ "include/cppgc/common.h", -+ "include/cppgc/custom-space.h", -+ "include/cppgc/garbage-collected.h", -+ "include/cppgc/heap.h", -+ "include/cppgc/internal/accessors.h", -+ "include/cppgc/internal/api-contants.h", -+ "include/cppgc/internal/compiler-specific.h", -+ "include/cppgc/internal/finalizer-traits.h", -+ "include/cppgc/internal/gc-info.h", -+ "include/cppgc/internal/persistent-node.h", -+ "include/cppgc/internal/pointer-policies.h", -+ "include/cppgc/internal/prefinalizer-handler.h", -+ "include/cppgc/liveness-broker.h", -+ "include/cppgc/liveness-broker.h", -+ "include/cppgc/macros.h", -+ "include/cppgc/member.h", -+ "include/cppgc/persistent.h", -+ "include/cppgc/platform.h", -+ "include/cppgc/prefinalizer.h", -+ "include/cppgc/source-location.h", -+ "include/cppgc/trace-trait.h", -+ "include/cppgc/type-traits.h", -+ "include/cppgc/visitor.h", -+ "include/v8config.h", -+ "src/heap/cppgc/allocation.cc", -+ "src/heap/cppgc/free-list.cc", -+ "src/heap/cppgc/free-list.h", -+ "src/heap/cppgc/gc-info-table.cc", -+ "src/heap/cppgc/gc-info-table.h", -+ "src/heap/cppgc/gc-info.cc", -+ "src/heap/cppgc/heap-inl.h", -+ "src/heap/cppgc/heap-object-header-inl.h", -+ "src/heap/cppgc/heap-object-header.cc", -+ "src/heap/cppgc/heap-object-header.h", -+ "src/heap/cppgc/heap-page.cc", -+ "src/heap/cppgc/heap-page.h", -+ "src/heap/cppgc/heap-space.cc", -+ "src/heap/cppgc/heap-space.h", -+ "src/heap/cppgc/heap-visitor.h", -+ "src/heap/cppgc/heap.cc", -+ "src/heap/cppgc/heap.h", -+ "src/heap/cppgc/liveness-broker.cc", -+ "src/heap/cppgc/logging.cc", -+ "src/heap/cppgc/marker.cc", -+ "src/heap/cppgc/marker.h", -+ "src/heap/cppgc/marking-visitor.cc", -+ "src/heap/cppgc/marking-visitor.h", -+ "src/heap/cppgc/object-allocator-inl.h", -+ "src/heap/cppgc/object-allocator.cc", -+ "src/heap/cppgc/object-allocator.h", -+ "src/heap/cppgc/object-start-bitmap-inl.h", -+ "src/heap/cppgc/object-start-bitmap.h", -+ "src/heap/cppgc/page-memory-inl.h", -+ "src/heap/cppgc/page-memory.cc", -+ "src/heap/cppgc/page-memory.h", -+ "src/heap/cppgc/persistent-node.cc", -+ "src/heap/cppgc/platform.cc", -+ "src/heap/cppgc/pointer-policies.cc", -+ "src/heap/cppgc/prefinalizer-handler.cc", -+ "src/heap/cppgc/prefinalizer-handler.h", -+ "src/heap/cppgc/raw-heap.cc", -+ "src/heap/cppgc/raw-heap.h", -+ "src/heap/cppgc/sanitizers.h", -+ "src/heap/cppgc/source-location.cc", -+ "src/heap/cppgc/stack.cc", -+ "src/heap/cppgc/stack.h", -+ "src/heap/cppgc/sweeper.cc", -+ "src/heap/cppgc/sweeper.h", -+ "src/heap/cppgc/worklist.h", -+ ] -+ -+ if (is_clang || !is_win) { -+ if (target_cpu == "x64") { -+ sources += [ "src/heap/cppgc/asm/x64/push_registers_asm.cc" ] -+ } else if (target_cpu == "x86") { -+ sources += [ "src/heap/cppgc/asm/ia32/push_registers_asm.cc" ] -+ } else if (target_cpu == "arm") { -+ sources += [ "src/heap/cppgc/asm/arm/push_registers_asm.cc" ] -+ } else if (target_cpu == "arm64") { -+ sources += [ "src/heap/cppgc/asm/arm64/push_registers_asm.cc" ] -+ } else if (target_cpu == "ppc64") { -+ sources += [ "src/heap/cppgc/asm/ppc/push_registers_asm.cc" ] -+ } else if (target_cpu == "s390x") { -+ sources += [ "src/heap/cppgc/asm/s390/push_registers_asm.cc" ] -+ } else if (target_cpu == "mipsel") { -+ sources += [ "src/heap/cppgc/asm/mips/push_registers_asm.cc" ] -+ } else if (target_cpu == "mips64el") { -+ sources += [ "src/heap/cppgc/asm/mips64/push_registers_asm.cc" ] -+ } -+ } else if (is_win) { -+ if (target_cpu == "x64") { -+ sources += [ "src/heap/cppgc/asm/x64/push_registers_masm.S" ] -+ } else if (target_cpu == "x86") { -+ sources += [ "src/heap/cppgc/asm/ia32/push_registers_masm.S" ] -+ } else if (target_cpu == "arm64") { -+ sources += [ "src/heap/cppgc/asm/arm64/push_registers_masm.S" ] -+ } -+ } -+ -+ configs = [ -+ ":internal_config", -+ ":cppgc_base_config", -+ ] -+ -+ public_deps = [ ":v8_libbase" ] -+} -+ -+############################################################################### -+# Produce a single static library for embedders -+# -+ -+if (v8_monolithic) { -+ # A component build is not monolithic. -+ assert(!is_component_build) -+ -+ # Using external startup data would produce separate files. -+ assert(!v8_use_external_startup_data) -+ v8_static_library("v8_monolith") { -+ deps = [ -+ ":v8", -+ ":v8_libbase", -+ ":v8_libplatform", -+ ":v8_libsampler", -+ "//build/win:default_exe_manifest", -+ ] -+ -+ configs = [ ":internal_config" ] -+ } -+} -+ -+v8_static_library("wee8") { -+ deps = [ -+ ":v8_base", -+ ":v8_libbase", -+ ":v8_libplatform", -+ ":v8_libsampler", -+ ":v8_snapshot", -+ "//build/win:default_exe_manifest", -+ ] -+ -+ # TODO: v8dll-main.cc equivalent for shared library builds -+ -+ configs = [ ":internal_config" ] -+ -+ sources = [ -+ ### gcmole(all) ### -+ "src/wasm/c-api.cc", -+ "src/wasm/c-api.h", -+ "third_party/wasm-api/wasm.h", -+ "third_party/wasm-api/wasm.hh", -+ ] -+} -+ -+############################################################################### -+# Executables -+# -+ -+if (current_toolchain == v8_generator_toolchain) { -+ v8_executable("bytecode_builtins_list_generator") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ include_dirs = [ "." ] -+ -+ sources = [ -+ "src/builtins/generate-bytecodes-builtins-list.cc", -+ "src/interpreter/bytecode-operands.cc", -+ "src/interpreter/bytecode-operands.h", -+ "src/interpreter/bytecodes.cc", -+ "src/interpreter/bytecodes.h", -+ ] -+ -+ configs = [ ":internal_config" ] -+ -+ deps = [ -+ ":v8_libbase", -+ "//build/win:default_exe_manifest", -+ ] -+ } -+} -+ -+if (current_toolchain == v8_snapshot_toolchain) { -+ v8_executable("mksnapshot") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = [ -+ "src/snapshot/embedded/embedded-empty.cc", -+ "src/snapshot/embedded/embedded-file-writer.cc", -+ "src/snapshot/embedded/embedded-file-writer.h", -+ "src/snapshot/embedded/platform-embedded-file-writer-aix.cc", -+ "src/snapshot/embedded/platform-embedded-file-writer-aix.h", -+ "src/snapshot/embedded/platform-embedded-file-writer-base.cc", -+ "src/snapshot/embedded/platform-embedded-file-writer-base.h", -+ "src/snapshot/embedded/platform-embedded-file-writer-generic.cc", -+ "src/snapshot/embedded/platform-embedded-file-writer-generic.h", -+ "src/snapshot/embedded/platform-embedded-file-writer-mac.cc", -+ "src/snapshot/embedded/platform-embedded-file-writer-mac.h", -+ "src/snapshot/embedded/platform-embedded-file-writer-win.cc", -+ "src/snapshot/embedded/platform-embedded-file-writer-win.h", -+ "src/snapshot/mksnapshot.cc", -+ "src/snapshot/snapshot-empty.cc", -+ ] -+ -+ configs = [ ":internal_config" ] -+ -+ deps = [ -+ ":v8_base_without_compiler", -+ ":v8_compiler_for_mksnapshot", -+ ":v8_init", -+ ":v8_libbase", -+ ":v8_libplatform", -+ ":v8_maybe_icu", -+ ":v8_tracing", -+ "//build/win:default_exe_manifest", -+ ] -+ } -+} -+ -+if (current_toolchain == v8_snapshot_toolchain) { -+ v8_executable("torque") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = [ "src/torque/torque.cc" ] -+ -+ deps = [ -+ ":torque_base", -+ "//build/win:default_exe_manifest", -+ ] -+ -+ # The use of exceptions for Torque in violation of the Chromium style-guide -+ # is justified by the fact that it is only used from the non-essential -+ # language server and can be removed anytime if it causes problems. -+ configs = [ -+ ":internal_config", -+ "//build/config/compiler:exceptions", -+ "//build/config/compiler:rtti", -+ ] -+ -+ remove_configs = [ -+ "//build/config/compiler:no_exceptions", -+ "//build/config/compiler:no_rtti", -+ ] -+ -+ if (is_win && is_asan) { -+ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] -+ } -+ } -+} -+ -+v8_executable("torque-language-server") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = [ "src/torque/ls/torque-language-server.cc" ] -+ -+ deps = [ -+ ":torque_base", -+ ":torque_ls_base", -+ "//build/win:default_exe_manifest", -+ ] -+ -+ # The use of exceptions for Torque in violation of the Chromium style-guide -+ # is justified by the fact that it is only used from the non-essential -+ # language server and can be removed anytime if it causes problems. -+ configs = [ -+ ":internal_config", -+ "//build/config/compiler:exceptions", -+ "//build/config/compiler:rtti", -+ ] -+ -+ remove_configs = [ -+ "//build/config/compiler:no_exceptions", -+ "//build/config/compiler:no_rtti", -+ ] -+ -+ if (is_win && is_asan) { -+ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] -+ } -+} -+ -+if (v8_enable_i18n_support) { -+ if (current_toolchain == v8_generator_toolchain) { -+ v8_executable("gen-regexp-special-case") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ sources = [ "src/regexp/gen-regexp-special-case.cc" ] -+ -+ deps = [ -+ ":v8_libbase", -+ "//build/win:default_exe_manifest", -+ "//third_party/icu", -+ ] -+ -+ configs = [ ":internal_config" ] -+ } -+ } -+ -+ action("run_gen-regexp-special-case") { -+ visibility = [ ":*" ] # Only targets in this file can depend on this. -+ -+ script = "tools/run.py" -+ -+ deps = [ ":gen-regexp-special-case($v8_generator_toolchain)" ] -+ -+ output_file = "$target_gen_dir/src/regexp/special-case.cc" -+ -+ outputs = [ output_file ] -+ -+ args = [ -+ "./" + rebase_path( -+ get_label_info( -+ ":gen-regexp-special-case($v8_generator_toolchain)", -+ "root_out_dir") + "/gen-regexp-special-case", -+ root_build_dir), -+ rebase_path(output_file, root_build_dir), -+ ] -+ } -+} -+ -+############################################################################### -+# Public targets -+# -+ -+want_v8_shell = -+ (current_toolchain == host_toolchain && v8_toolset_for_shell == "host") || -+ (current_toolchain == v8_snapshot_toolchain && -+ v8_toolset_for_shell == "host") || -+ (current_toolchain != host_toolchain && v8_toolset_for_shell == "target") -+ -+group("gn_all") { -+ testonly = true -+ -+ deps = [ -+ ":d8", -+ ":v8_fuzzers", -+ ":v8_hello_world", -+ ":v8_sample_process", -+ "test:gn_all", -+ "tools:gn_all", -+ ] -+ -+ if (v8_custom_deps != "") { -+ # Custom dependency from directory under v8/custom_deps. -+ deps += [ v8_custom_deps ] -+ } -+ -+ if (want_v8_shell) { -+ deps += [ ":v8_shell" ] -+ } -+} -+ -+group("v8_python_base") { -+ data = [ ".vpython" ] -+} -+ -+group("v8_clusterfuzz") { -+ testonly = true -+ -+ deps = [ ":d8" ] -+ -+ if (v8_multi_arch_build) { -+ deps += [ -+ ":d8(//build/toolchain/linux:clang_x64)", -+ ":d8(//build/toolchain/linux:clang_x64_v8_arm64)", -+ ":d8(//build/toolchain/linux:clang_x86)", -+ ":d8(//build/toolchain/linux:clang_x86_v8_arm)", -+ ":d8(tools/clusterfuzz/toolchain:clang_x64_pointer_compression)", -+ ] -+ } -+} -+ -+group("v8_archive") { -+ testonly = true -+ -+ deps = [ ":d8" ] -+ -+ if (!is_win) { -+ # On windows, cctest doesn't link with v8_static_library. -+ deps += [ "test/cctest:cctest" ] -+ } -+} -+ -+# TODO(dglazkov): Remove the "!build_with_chromium" condition once this clause -+# is removed from Chromium. -+if (is_fuchsia && !build_with_chromium) { -+ import("//build/config/fuchsia/rules.gni") -+ -+ cr_fuchsia_package("d8_fuchsia_pkg") { -+ testonly = true -+ binary = ":d8" -+ package_name_override = "d8" -+ } -+ -+ fuchsia_package_runner("d8_fuchsia") { -+ testonly = true -+ package = ":d8_fuchsia_pkg" -+ package_name_override = "d8" -+ } -+} -+ -+group("v8_fuzzers") { -+ testonly = true -+ data_deps = [ -+ ":v8_simple_json_fuzzer", -+ ":v8_simple_multi_return_fuzzer", -+ ":v8_simple_parser_fuzzer", -+ ":v8_simple_regexp_builtins_fuzzer", -+ ":v8_simple_regexp_fuzzer", -+ ":v8_simple_wasm_async_fuzzer", -+ ":v8_simple_wasm_code_fuzzer", -+ ":v8_simple_wasm_compile_fuzzer", -+ ":v8_simple_wasm_fuzzer", -+ ] -+} -+ -+if (is_component_build) { -+ v8_component("v8") { -+ sources = [ "src/utils/v8dll-main.cc" ] -+ -+ public_deps = [ -+ ":v8_base", -+ ":v8_snapshot", -+ ] -+ -+ configs = [ ":internal_config" ] -+ -+ public_configs = [ ":external_config" ] -+ } -+ -+ v8_component("v8_for_testing") { -+ testonly = true -+ -+ sources = [ "src/utils/v8dll-main.cc" ] -+ -+ public_deps = [ -+ ":torque_base", -+ ":torque_ls_base", -+ ":v8_base", -+ ":v8_headers", -+ ":v8_initializers", -+ ":v8_snapshot", -+ ] -+ -+ configs = [ ":internal_config" ] -+ -+ public_configs = [ ":external_config" ] -+ } -+ -+ v8_component("cppgc") { -+ public_deps = [ ":cppgc_base" ] -+ -+ configs = [ ":internal_config" ] -+ -+ public_configs = [ ":external_config" ] -+ } -+ -+ v8_component("cppgc_for_testing") { -+ testonly = true -+ -+ public_deps = [ ":cppgc_base" ] -+ -+ configs = [ ":internal_config" ] -+ public_configs = [ ":external_config" ] -+ } -+} else { -+ group("v8") { -+ public_deps = [ -+ ":v8_base", -+ ":v8_snapshot", -+ ] -+ -+ public_configs = [ ":external_config" ] -+ } -+ -+ group("v8_for_testing") { -+ testonly = true -+ -+ public_deps = [ -+ ":torque_base", -+ ":torque_ls_base", -+ ":v8_base", -+ ":v8_initializers", -+ ":v8_snapshot", -+ ] -+ -+ public_configs = [ ":external_config" ] -+ } -+ -+ group("cppgc") { -+ public_deps = [ ":cppgc_base" ] -+ -+ public_configs = [ ":external_config" ] -+ } -+ -+ group("cppgc_for_testing") { -+ testonly = true -+ -+ public_deps = [ ":cppgc_base" ] -+ -+ public_configs = [ ":external_config" ] -+ } -+} -+ -+v8_executable("d8") { -+ sources = [ -+ "src/d8/async-hooks-wrapper.cc", -+ "src/d8/async-hooks-wrapper.h", -+ "src/d8/d8-console.cc", -+ "src/d8/d8-console.h", -+ "src/d8/d8-js.cc", -+ "src/d8/d8-platforms.cc", -+ "src/d8/d8-platforms.h", -+ "src/d8/d8.cc", -+ "src/d8/d8.h", -+ ] -+ -+ configs = [ -+ # Note: don't use :internal_config here because this target will get -+ # the :external_config applied to it by virtue of depending on :v8, and -+ # you can't have both applied to the same target. -+ ":internal_config_base", -+ ":v8_tracing_config", -+ ] -+ -+ deps = [ -+ ":v8", -+ ":v8_libbase", -+ ":v8_libplatform", -+ ":v8_tracing", -+ "//build/win:default_exe_manifest", -+ ] -+ -+ if (is_posix || is_fuchsia) { -+ sources += [ "src/d8/d8-posix.cc" ] -+ } else if (is_win) { -+ sources += [ "src/d8/d8-windows.cc" ] -+ } -+ -+ if (v8_correctness_fuzzer) { -+ deps += [ "tools/clusterfuzz:v8_correctness_fuzzer_resources" ] -+ } -+ -+ defines = [] -+ -+ if (v8_enable_vtunejit) { -+ deps += [ "src/third_party/vtune:v8_vtune" ] -+ } -+} -+ -+v8_executable("v8_hello_world") { -+ sources = [ "samples/hello-world.cc" ] -+ -+ configs = [ -+ # Note: don't use :internal_config here because this target will get -+ # the :external_config applied to it by virtue of depending on :v8, and -+ # you can't have both applied to the same target. -+ ":internal_config_base", -+ ] -+ -+ deps = [ -+ ":v8", -+ ":v8_libbase", -+ ":v8_libplatform", -+ "//build/win:default_exe_manifest", -+ ] -+} -+ -+v8_executable("v8_sample_process") { -+ sources = [ "samples/process.cc" ] -+ -+ configs = [ -+ # Note: don't use :internal_config here because this target will get -+ # the :external_config applied to it by virtue of depending on :v8, and -+ # you can't have both applied to the same target. -+ ":internal_config_base", -+ ] -+ -+ deps = [ -+ ":v8", -+ ":v8_libbase", -+ ":v8_libplatform", -+ "//build/win:default_exe_manifest", -+ ] -+} -+ -+if (want_v8_shell) { -+ v8_executable("v8_shell") { -+ sources = [ "samples/shell.cc" ] -+ -+ configs = [ -+ # Note: don't use :internal_config here because this target will get -+ # the :external_config applied to it by virtue of depending on :v8, and -+ # you can't have both applied to the same target. -+ ":internal_config_base", -+ ] -+ -+ deps = [ -+ ":v8", -+ ":v8_libbase", -+ ":v8_libplatform", -+ "//build/win:default_exe_manifest", -+ ] -+ } -+} -+ -+template("v8_fuzzer") { -+ name = target_name -+ forward_variables_from(invoker, "*") -+ v8_executable("v8_simple_" + name) { -+ deps = [ -+ ":" + name, -+ "//build/win:default_exe_manifest", -+ ] -+ -+ sources = [ "test/fuzzer/fuzzer.cc" ] -+ -+ configs = [ ":external_config" ] -+ } -+} -+ -+v8_source_set("json_fuzzer") { -+ sources = [ "test/fuzzer/json.cc" ] -+ -+ deps = [ ":fuzzer_support" ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("json_fuzzer") { -+} -+ -+v8_source_set("multi_return_fuzzer") { -+ sources = [ "test/fuzzer/multi-return.cc" ] -+ -+ deps = [ ":fuzzer_support" ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("multi_return_fuzzer") { -+} -+ -+v8_source_set("parser_fuzzer") { -+ sources = [ "test/fuzzer/parser.cc" ] -+ -+ deps = [ ":fuzzer_support" ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("parser_fuzzer") { -+} -+ -+v8_source_set("regexp_builtins_fuzzer") { -+ sources = [ -+ "test/fuzzer/regexp-builtins.cc", -+ "test/fuzzer/regexp_builtins/mjsunit.js.h", -+ ] -+ -+ deps = [ ":fuzzer_support" ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("regexp_builtins_fuzzer") { -+} -+ -+v8_source_set("regexp_fuzzer") { -+ sources = [ "test/fuzzer/regexp.cc" ] -+ -+ deps = [ ":fuzzer_support" ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("regexp_fuzzer") { -+} -+ -+v8_source_set("wasm_module_runner") { -+ sources = [ -+ "test/common/wasm/wasm-module-runner.cc", -+ "test/common/wasm/wasm-module-runner.h", -+ ] -+ -+ deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ ":v8_tracing", -+ ] -+ -+ public_deps = [ ":v8_maybe_icu" ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_source_set("wasm_fuzzer") { -+ sources = [ "test/fuzzer/wasm.cc" ] -+ -+ deps = [ -+ ":fuzzer_support", -+ ":lib_wasm_fuzzer_common", -+ ":wasm_module_runner", -+ ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("wasm_fuzzer") { -+} -+ -+v8_source_set("wasm_async_fuzzer") { -+ sources = [ "test/fuzzer/wasm-async.cc" ] -+ -+ deps = [ -+ ":fuzzer_support", -+ ":lib_wasm_fuzzer_common", -+ ":wasm_module_runner", -+ ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("wasm_async_fuzzer") { -+} -+ -+v8_source_set("wasm_code_fuzzer") { -+ sources = [ -+ "test/common/wasm/test-signatures.h", -+ "test/fuzzer/wasm-code.cc", -+ ] -+ -+ deps = [ -+ ":fuzzer_support", -+ ":lib_wasm_fuzzer_common", -+ ":wasm_module_runner", -+ ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("wasm_code_fuzzer") { -+} -+ -+v8_source_set("lib_wasm_fuzzer_common") { -+ sources = [ -+ "test/fuzzer/wasm-fuzzer-common.cc", -+ "test/fuzzer/wasm-fuzzer-common.h", -+ ] -+ -+ deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ ":v8_tracing", -+ ] -+ -+ public_deps = [ ":v8_maybe_icu" ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_source_set("wasm_compile_fuzzer") { -+ sources = [ -+ "test/common/wasm/test-signatures.h", -+ "test/fuzzer/wasm-compile.cc", -+ ] -+ -+ deps = [ -+ ":fuzzer_support", -+ ":lib_wasm_fuzzer_common", -+ ":wasm_module_runner", -+ ] -+ -+ configs = [ -+ ":external_config", -+ ":internal_config_base", -+ ] -+} -+ -+v8_fuzzer("wasm_compile_fuzzer") { -+} -+ -+# Target to build all generated .cc files. -+group("v8_generated_cc_files") { -+ testonly = true -+ -+ deps = [ -+ ":generate_bytecode_builtins_list", -+ ":run_torque", -+ "src/inspector:v8_generated_cc_files", -+ ] -+} -+ -+# Protobuf targets, used only when building outside of chromium. -+ -+if (!build_with_chromium && v8_use_perfetto) { -+ # This config is applied to the autogenerated .pb.{cc,h} files in -+ # proto_library.gni. This config is propagated up to the source sets -+ # that depend on generated proto headers. -+ config("protobuf_gen_config") { -+ defines = [ -+ "GOOGLE_PROTOBUF_NO_RTTI", -+ "GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER", -+ ] -+ cflags = [ -+ "-Wno-unknown-warning-option", -+ "-Wno-deprecated", -+ "-Wno-undef", -+ "-Wno-zero-as-null-pointer-constant", -+ "-Wno-thread-safety-attributes", -+ ] -+ include_dirs = [ "third_party/protobuf/src" ] -+ } -+ -+ # Configuration used to build libprotobuf_* and the protoc compiler. -+ config("protobuf_config") { -+ # Apply the lighter supressions and macro definitions from above. -+ configs = [ ":protobuf_gen_config" ] -+ -+ if (!is_win) { -+ defines = [ "HAVE_PTHREAD=1" ] -+ } -+ if (is_clang) { -+ cflags = [ -+ "-Wno-unused-private-field", -+ "-Wno-unused-function", -+ "-Wno-inconsistent-missing-override", -+ "-Wno-unknown-warning-option", -+ "-Wno-enum-compare-switch", -+ "-Wno-user-defined-warnings", -+ "-Wno-tautological-constant-compare", -+ ] -+ } -+ if (is_win && is_clang) { -+ cflags += [ "-Wno-microsoft-unqualified-friend" ] -+ } -+ } -+ -+ source_set("protobuf_lite") { -+ sources = [ -+ "third_party/protobuf/src/google/protobuf/any_lite.cc", -+ "third_party/protobuf/src/google/protobuf/arena.cc", -+ "third_party/protobuf/src/google/protobuf/extension_set.cc", -+ "third_party/protobuf/src/google/protobuf/generated_message_table_driven_lite.cc", -+ "third_party/protobuf/src/google/protobuf/generated_message_util.cc", -+ "third_party/protobuf/src/google/protobuf/implicit_weak_message.cc", -+ "third_party/protobuf/src/google/protobuf/io/coded_stream.cc", -+ "third_party/protobuf/src/google/protobuf/io/strtod.cc", -+ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream.cc", -+ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.cc", -+ "third_party/protobuf/src/google/protobuf/message_lite.cc", -+ "third_party/protobuf/src/google/protobuf/repeated_field.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/bytestream.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/common.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/int128.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/io_win32.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/status.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/statusor.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/stringpiece.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/stringprintf.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/structurally_valid.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/strutil.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/time.cc", -+ "third_party/protobuf/src/google/protobuf/wire_format_lite.cc", -+ ] -+ configs -= [ "//build/config/compiler:chromium_code" ] -+ configs += [ -+ "//build/config/compiler:no_chromium_code", -+ ":protobuf_config", -+ ] -+ if (is_win) { -+ configs -= [ "//build/config/win:lean_and_mean" ] -+ } -+ public_configs = [ ":protobuf_gen_config" ] -+ } -+ -+ # This target should be used only by the protoc compiler and by test targets. -+ source_set("protobuf_full") { -+ deps = [ ":protobuf_lite" ] -+ sources = [ -+ "third_party/protobuf/src/google/protobuf/any.cc", -+ "third_party/protobuf/src/google/protobuf/any.pb.cc", -+ "third_party/protobuf/src/google/protobuf/api.pb.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/importer.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/parser.cc", -+ "third_party/protobuf/src/google/protobuf/descriptor.cc", -+ "third_party/protobuf/src/google/protobuf/descriptor.pb.cc", -+ "third_party/protobuf/src/google/protobuf/descriptor_database.cc", -+ "third_party/protobuf/src/google/protobuf/duration.pb.cc", -+ "third_party/protobuf/src/google/protobuf/dynamic_message.cc", -+ "third_party/protobuf/src/google/protobuf/empty.pb.cc", -+ "third_party/protobuf/src/google/protobuf/extension_set_heavy.cc", -+ "third_party/protobuf/src/google/protobuf/field_mask.pb.cc", -+ "third_party/protobuf/src/google/protobuf/generated_message_reflection.cc", -+ "third_party/protobuf/src/google/protobuf/generated_message_table_driven.cc", -+ "third_party/protobuf/src/google/protobuf/io/gzip_stream.cc", -+ "third_party/protobuf/src/google/protobuf/io/printer.cc", -+ "third_party/protobuf/src/google/protobuf/io/tokenizer.cc", -+ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.cc", -+ "third_party/protobuf/src/google/protobuf/map_field.cc", -+ "third_party/protobuf/src/google/protobuf/message.cc", -+ "third_party/protobuf/src/google/protobuf/reflection_ops.cc", -+ "third_party/protobuf/src/google/protobuf/service.cc", -+ "third_party/protobuf/src/google/protobuf/source_context.pb.cc", -+ "third_party/protobuf/src/google/protobuf/struct.pb.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/mathlimits.cc", -+ "third_party/protobuf/src/google/protobuf/stubs/substitute.cc", -+ "third_party/protobuf/src/google/protobuf/text_format.cc", -+ "third_party/protobuf/src/google/protobuf/timestamp.pb.cc", -+ "third_party/protobuf/src/google/protobuf/type.pb.cc", -+ "third_party/protobuf/src/google/protobuf/unknown_field_set.cc", -+ "third_party/protobuf/src/google/protobuf/util/delimited_message_util.cc", -+ "third_party/protobuf/src/google/protobuf/util/field_comparator.cc", -+ "third_party/protobuf/src/google/protobuf/util/field_mask_util.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/error_listener.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/field_mask_utility.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/json_escaping.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/object_writer.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/proto_writer.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/protostream_objectsource.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/protostream_objectwriter.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/type_info.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/type_info_test_helper.cc", -+ "third_party/protobuf/src/google/protobuf/util/internal/utility.cc", -+ "third_party/protobuf/src/google/protobuf/util/json_util.cc", -+ "third_party/protobuf/src/google/protobuf/util/message_differencer.cc", -+ "third_party/protobuf/src/google/protobuf/util/time_util.cc", -+ "third_party/protobuf/src/google/protobuf/util/type_resolver_util.cc", -+ "third_party/protobuf/src/google/protobuf/wire_format.cc", -+ "third_party/protobuf/src/google/protobuf/wrappers.pb.cc", -+ ] -+ configs -= [ "//build/config/compiler:chromium_code" ] -+ configs += [ -+ "//build/config/compiler:no_chromium_code", -+ ":protobuf_config", -+ ] -+ if (is_win) { -+ configs -= [ "//build/config/win:lean_and_mean" ] -+ } -+ public_configs = [ ":protobuf_gen_config" ] -+ } -+ -+ if (current_toolchain == host_toolchain) { -+ source_set("protoc_lib") { -+ deps = [ ":protobuf_full" ] -+ sources = [ -+ "third_party/protobuf/src/google/protobuf/compiler/code_generator.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum_field.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_extension.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_field.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_map_field.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message_field.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_primitive_field.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_service.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_string_field.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/plugin.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/plugin.pb.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/subprocess.cc", -+ "third_party/protobuf/src/google/protobuf/compiler/zip_writer.cc", -+ ] -+ configs -= [ "//build/config/compiler:chromium_code" ] -+ configs += [ -+ "//build/config/compiler:no_chromium_code", -+ ":protobuf_config", -+ ] -+ if (is_win) { -+ configs -= [ "//build/config/win:lean_and_mean" ] -+ } -+ public_configs = [ ":protobuf_gen_config" ] -+ } -+ -+ executable("protoc") { -+ deps = [ -+ ":protoc_lib", -+ "//build/win:default_exe_manifest", -+ ] -+ sources = [ "src/protobuf/protobuf-compiler-main.cc" ] -+ configs -= [ "//build/config/compiler:chromium_code" ] -+ configs += [ "//build/config/compiler:no_chromium_code" ] -+ } -+ } # host_toolchain -+ -+ v8_component("v8_libperfetto") { -+ configs = [ ":v8_tracing_config" ] -+ public_configs = [ "//third_party/perfetto/gn:public_config" ] -+ deps = [ -+ "//third_party/perfetto/src/trace_processor:export_json", -+ "//third_party/perfetto/src/trace_processor:storage_minimal", -+ "//third_party/perfetto/src/tracing:client_api", -+ "//third_party/perfetto/src/tracing/core", -+ -+ # TODO(skyostil): Support non-POSIX platforms. -+ "//third_party/perfetto/protos/perfetto/config:cpp", -+ "//third_party/perfetto/protos/perfetto/trace/track_event:zero", -+ "//third_party/perfetto/src/tracing:in_process_backend", -+ "//third_party/perfetto/src/tracing:platform_posix", -+ ] -+ } -+} # if (!build_with_chromium && v8_use_perfetto) -diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni -index b5fb1823..5b8e6f77 100644 ---- a/deps/v8/gni/snapshot_toolchain.gni -+++ b/deps/v8/gni/snapshot_toolchain.gni -@@ -79,7 +79,8 @@ if (v8_snapshot_toolchain == "") { - - if (v8_current_cpu == "x64" || v8_current_cpu == "x86") { - _cpus = v8_current_cpu -- } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") { -+ } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" || -+ v8_current_cpu == "loong64") { - if (is_win && v8_current_cpu == "arm64") { - # set _cpus to blank for Windows ARM64 so host_toolchain could be - # selected as snapshot toolchain later. -diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h -index 327f9ab3..d895868c 100644 ---- a/deps/v8/src/base/build_config.h -+++ b/deps/v8/src/base/build_config.h -@@ -33,6 +33,9 @@ - #elif defined(__MIPSEB__) || defined(__MIPSEL__) - #define V8_HOST_ARCH_MIPS 1 - #define V8_HOST_ARCH_32_BIT 1 -+#elif defined(__loongarch64) -+#define V8_HOST_ARCH_LOONG64 1 -+#define V8_HOST_ARCH_64_BIT 1 - #elif defined(__PPC64__) || defined(_ARCH_PPC64) - #define V8_HOST_ARCH_PPC64 1 - #define V8_HOST_ARCH_64_BIT 1 -@@ -77,7 +80,8 @@ - // environment as presented by the compiler. - #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ - !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ -- !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 -+ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ -+ !V8_TARGET_ARCH_LOONG64 - #if defined(_M_X64) || defined(__x86_64__) - #define V8_TARGET_ARCH_X64 1 - #elif defined(_M_IX86) || defined(__i386__) -@@ -118,6 +122,8 @@ - #define V8_TARGET_ARCH_32_BIT 1 - #elif V8_TARGET_ARCH_MIPS64 - #define V8_TARGET_ARCH_64_BIT 1 -+#elif V8_TARGET_ARCH_LOONG64 -+#define V8_TARGET_ARCH_64_BIT 1 - #elif V8_TARGET_ARCH_PPC - #define V8_TARGET_ARCH_32_BIT 1 - #elif V8_TARGET_ARCH_PPC64 -@@ -156,6 +162,9 @@ - #if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) - #error Target architecture mips64 is only supported on mips64 and x64 host - #endif -+#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64)) -+#error Target architecture loong64 is only supported on loong64 and x64 host -+#endif - - // Determine architecture endianness. - #if V8_TARGET_ARCH_IA32 -@@ -166,6 +175,8 @@ - #define V8_TARGET_LITTLE_ENDIAN 1 - #elif V8_TARGET_ARCH_ARM64 - #define V8_TARGET_LITTLE_ENDIAN 1 -+#elif V8_TARGET_ARCH_LOONG64 -+#define V8_TARGET_LITTLE_ENDIAN 1 - #elif V8_TARGET_ARCH_MIPS - #if defined(__MIPSEB__) - #define V8_TARGET_BIG_ENDIAN 1 -diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc -index e5aa4de1..c6bdfcfb 100644 ---- a/deps/v8/src/base/platform/platform-posix.cc -+++ b/deps/v8/src/base/platform/platform-posix.cc -@@ -303,6 +303,10 @@ void* OS::GetRandomMmapAddr() { - // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance - // to fulfill request. - raw_addr &= uint64_t{0xFFFFFF0000}; -+#elif V8_TARGET_ARCH_LOONG64 -+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance -+ // to fulfill request. -+ raw_addr &= uint64_t{0xFFFFFF0000}; - #else - raw_addr &= 0x3FFFF000; - -@@ -486,6 +490,8 @@ void OS::DebugBreak() { - asm("break"); - #elif V8_HOST_ARCH_MIPS64 - asm("break"); -+#elif V8_HOST_ARCH_LOONG64 -+ asm("break 0"); - #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 - asm("twge 2,2"); - #elif V8_HOST_ARCH_IA32 -diff --git a/deps/v8/src/base/platform/platform-posix.cc.orig b/deps/v8/src/base/platform/platform-posix.cc.orig -new file mode 100644 -index 00000000..e5aa4de1 ---- /dev/null -+++ b/deps/v8/src/base/platform/platform-posix.cc.orig -@@ -0,0 +1,1027 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+// Platform-specific code for POSIX goes here. This is not a platform on its -+// own, but contains the parts which are the same across the POSIX platforms -+// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX. -+ -+#include -+#include -+#include -+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) -+#include // for pthread_set_name_np -+#endif -+#include // for sched_yield -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ -+ defined(__NetBSD__) || defined(__OpenBSD__) -+#include // NOLINT, for sysctl -+#endif -+ -+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) -+#define LOG_TAG "v8" -+#include // NOLINT -+#endif -+ -+#include -+#include -+ -+#include "src/base/platform/platform-posix.h" -+ -+#include "src/base/lazy-instance.h" -+#include "src/base/macros.h" -+#include "src/base/platform/platform.h" -+#include "src/base/platform/time.h" -+#include "src/base/utils/random-number-generator.h" -+ -+#ifdef V8_FAST_TLS_SUPPORTED -+#include -+#endif -+ -+#if V8_OS_MACOSX -+#include -+#include -+#endif -+ -+#if V8_OS_LINUX -+#include // NOLINT, for prctl -+#endif -+ -+#if defined(V8_OS_FUCHSIA) -+#include -+#else -+#include -+#endif -+ -+#if !defined(_AIX) && !defined(V8_OS_FUCHSIA) -+#include -+#endif -+ -+#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS -+#define MAP_ANONYMOUS MAP_ANON -+#endif -+ -+#if defined(V8_OS_SOLARIS) -+#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__) -+extern "C" int madvise(caddr_t, size_t, int); -+#else -+extern int madvise(caddr_t, size_t, int); -+#endif -+#endif -+ -+#ifndef MADV_FREE -+#define MADV_FREE MADV_DONTNEED -+#endif -+ -+#if defined(V8_LIBC_GLIBC) -+extern "C" void* __libc_stack_end; // NOLINT -+#endif -+ -+namespace v8 { -+namespace base { -+ -+namespace { -+ -+// 0 is never a valid thread id. -+const pthread_t kNoThread = static_cast(0); -+ -+bool g_hard_abort = false; -+ -+const char* g_gc_fake_mmap = nullptr; -+ -+DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator, -+ GetPlatformRandomNumberGenerator) -+static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER; -+ -+#if !V8_OS_FUCHSIA -+#if V8_OS_MACOSX -+// kMmapFd is used to pass vm_alloc flags to tag the region with the user -+// defined tag 255 This helps identify V8-allocated regions in memory analysis -+// tools like vmmap(1). -+const int kMmapFd = VM_MAKE_TAG(255); -+#else // !V8_OS_MACOSX -+const int kMmapFd = -1; -+#endif // !V8_OS_MACOSX -+ -+const int kMmapFdOffset = 0; -+ -+// TODO(v8:10026): Add the right permission flag to make executable pages -+// guarded. -+int GetProtectionFromMemoryPermission(OS::MemoryPermission access) { -+ switch (access) { -+ case OS::MemoryPermission::kNoAccess: -+ case OS::MemoryPermission::kNoAccessWillJitLater: -+ return PROT_NONE; -+ case OS::MemoryPermission::kRead: -+ return PROT_READ; -+ case OS::MemoryPermission::kReadWrite: -+ return PROT_READ | PROT_WRITE; -+ case OS::MemoryPermission::kReadWriteExecute: -+ return PROT_READ | PROT_WRITE | PROT_EXEC; -+ case OS::MemoryPermission::kReadExecute: -+ return PROT_READ | PROT_EXEC; -+ } -+ UNREACHABLE(); -+} -+ -+int GetFlagsForMemoryPermission(OS::MemoryPermission access) { -+ int flags = MAP_PRIVATE | MAP_ANONYMOUS; -+ if (access == OS::MemoryPermission::kNoAccess) { -+#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX -+ flags |= MAP_NORESERVE; -+#endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX -+#if V8_OS_QNX -+ flags |= MAP_LAZY; -+#endif // V8_OS_QNX -+ } -+#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT) -+ if (access == OS::MemoryPermission::kNoAccessWillJitLater) { -+ flags |= MAP_JIT; -+ } -+#endif -+ return flags; -+} -+ -+void* Allocate(void* hint, size_t size, OS::MemoryPermission access) { -+ int prot = GetProtectionFromMemoryPermission(access); -+ int flags = GetFlagsForMemoryPermission(access); -+ void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset); -+ if (result == MAP_FAILED) return nullptr; -+ return result; -+} -+ -+#endif // !V8_OS_FUCHSIA -+ -+} // namespace -+ -+#if V8_OS_LINUX || V8_OS_FREEBSD -+#ifdef __arm__ -+ -+bool OS::ArmUsingHardFloat() { -+ // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify -+ // the Floating Point ABI used (PCS stands for Procedure Call Standard). -+ // We use these as well as a couple of other defines to statically determine -+ // what FP ABI used. -+ // GCC versions 4.4 and below don't support hard-fp. -+ // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or -+ // __ARM_PCS_VFP. -+ -+#define GCC_VERSION \ -+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) -+#if GCC_VERSION >= 40600 && !defined(__clang__) -+#if defined(__ARM_PCS_VFP) -+ return true; -+#else -+ return false; -+#endif -+ -+#elif GCC_VERSION < 40500 && !defined(__clang__) -+ return false; -+ -+#else -+#if defined(__ARM_PCS_VFP) -+ return true; -+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \ -+ !defined(__VFP_FP__) -+ return false; -+#else -+#error \ -+ "Your version of compiler does not report the FP ABI compiled for." \ -+ "Please report it on this issue" \ -+ "http://code.google.com/p/v8/issues/detail?id=2140" -+ -+#endif -+#endif -+#undef GCC_VERSION -+} -+ -+#endif // def __arm__ -+#endif -+ -+void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { -+ g_hard_abort = hard_abort; -+ g_gc_fake_mmap = gc_fake_mmap; -+} -+ -+int OS::ActivationFrameAlignment() { -+#if V8_TARGET_ARCH_ARM -+ // On EABI ARM targets this is required for fp correctness in the -+ // runtime system. -+ return 8; -+#elif V8_TARGET_ARCH_MIPS -+ return 8; -+#elif V8_TARGET_ARCH_S390 -+ return 8; -+#else -+ // Otherwise we just assume 16 byte alignment, i.e.: -+ // - With gcc 4.4 the tree vectorization optimizer can generate code -+ // that requires 16 byte alignment such as movdqa on x86. -+ // - Mac OS X, PPC and Solaris (64-bit) activation frames must -+ // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide" -+ return 16; -+#endif -+} -+ -+// static -+size_t OS::AllocatePageSize() { -+ return static_cast(sysconf(_SC_PAGESIZE)); -+} -+ -+// static -+size_t OS::CommitPageSize() { -+ static size_t page_size = getpagesize(); -+ return page_size; -+} -+ -+// static -+void OS::SetRandomMmapSeed(int64_t seed) { -+ if (seed) { -+ MutexGuard guard(rng_mutex.Pointer()); -+ GetPlatformRandomNumberGenerator()->SetSeed(seed); -+ } -+} -+ -+// static -+void* OS::GetRandomMmapAddr() { -+ uintptr_t raw_addr; -+ { -+ MutexGuard guard(rng_mutex.Pointer()); -+ GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr)); -+ } -+#if defined(__APPLE__) -+#if V8_TARGET_ARCH_ARM64 -+ DCHECK_EQ(1 << 14, AllocatePageSize()); -+ raw_addr = RoundDown(raw_addr, 1 << 14); -+#endif -+#endif -+#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ -+ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) -+ // If random hint addresses interfere with address ranges hard coded in -+ // sanitizers, bad things happen. This address range is copied from TSAN -+ // source but works with all tools. -+ // See crbug.com/539863. -+ raw_addr &= 0x007fffff0000ULL; -+ raw_addr += 0x7e8000000000ULL; -+#else -+#if V8_TARGET_ARCH_X64 -+ // Currently available CPUs have 48 bits of virtual addressing. Truncate -+ // the hint address to 46 bits to give the kernel a fighting chance of -+ // fulfilling our placement request. -+ raw_addr &= uint64_t{0x3FFFFFFFF000}; -+#elif V8_TARGET_ARCH_PPC64 -+#if V8_OS_AIX -+ // AIX: 64 bits of virtual addressing, but we limit address range to: -+ // a) minimize Segment Lookaside Buffer (SLB) misses and -+ raw_addr &= uint64_t{0x3FFFF000}; -+ // Use extra address space to isolate the mmap regions. -+ raw_addr += uint64_t{0x400000000000}; -+#elif V8_TARGET_BIG_ENDIAN -+ // Big-endian Linux: 42 bits of virtual addressing. -+ raw_addr &= uint64_t{0x03FFFFFFF000}; -+#else -+ // Little-endian Linux: 46 bits of virtual addressing. -+ raw_addr &= uint64_t{0x3FFFFFFF0000}; -+#endif -+#elif V8_TARGET_ARCH_S390X -+ // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits -+ // of virtual addressing. Truncate to 40 bits to allow kernel chance to -+ // fulfill request. -+ raw_addr &= uint64_t{0xFFFFFFF000}; -+#elif V8_TARGET_ARCH_S390 -+ // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance -+ // to fulfill request. -+ raw_addr &= 0x1FFFF000; -+#elif V8_TARGET_ARCH_MIPS64 -+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance -+ // to fulfill request. -+ raw_addr &= uint64_t{0xFFFFFF0000}; -+#else -+ raw_addr &= 0x3FFFF000; -+ -+#ifdef __sun -+ // For our Solaris/illumos mmap hint, we pick a random address in the bottom -+ // half of the top half of the address space (that is, the third quarter). -+ // Because we do not MAP_FIXED, this will be treated only as a hint -- the -+ // system will not fail to mmap() because something else happens to already -+ // be mapped at our random address. We deliberately set the hint high enough -+ // to get well above the system's break (that is, the heap); Solaris and -+ // illumos will try the hint and if that fails allocate as if there were -+ // no hint at all. The high hint prevents the break from getting hemmed in -+ // at low values, ceding half of the address space to the system heap. -+ raw_addr += 0x80000000; -+#elif V8_OS_AIX -+ // The range 0x30000000 - 0xD0000000 is available on AIX; -+ // choose the upper range. -+ raw_addr += 0x90000000; -+#else -+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a -+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos -+ // 10.6 and 10.7. -+ raw_addr += 0x20000000; -+#endif -+#endif -+#endif -+ return reinterpret_cast(raw_addr); -+} -+ -+// TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files. -+#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA -+// static -+void* OS::Allocate(void* hint, size_t size, size_t alignment, -+ MemoryPermission access) { -+ size_t page_size = AllocatePageSize(); -+ DCHECK_EQ(0, size % page_size); -+ DCHECK_EQ(0, alignment % page_size); -+ hint = AlignedAddress(hint, alignment); -+ // Add the maximum misalignment so we are guaranteed an aligned base address. -+ size_t request_size = size + (alignment - page_size); -+ request_size = RoundUp(request_size, OS::AllocatePageSize()); -+ void* result = base::Allocate(hint, request_size, access); -+ if (result == nullptr) return nullptr; -+ -+ // Unmap memory allocated before the aligned base address. -+ uint8_t* base = static_cast(result); -+ uint8_t* aligned_base = reinterpret_cast( -+ RoundUp(reinterpret_cast(base), alignment)); -+ if (aligned_base != base) { -+ DCHECK_LT(base, aligned_base); -+ size_t prefix_size = static_cast(aligned_base - base); -+ CHECK(Free(base, prefix_size)); -+ request_size -= prefix_size; -+ } -+ // Unmap memory allocated after the potentially unaligned end. -+ if (size != request_size) { -+ DCHECK_LT(size, request_size); -+ size_t suffix_size = request_size - size; -+ CHECK(Free(aligned_base + size, suffix_size)); -+ request_size -= suffix_size; -+ } -+ -+ DCHECK_EQ(size, request_size); -+ return static_cast(aligned_base); -+} -+ -+// static -+bool OS::Free(void* address, const size_t size) { -+ DCHECK_EQ(0, reinterpret_cast(address) % AllocatePageSize()); -+ DCHECK_EQ(0, size % AllocatePageSize()); -+ return munmap(address, size) == 0; -+} -+ -+// static -+bool OS::Release(void* address, size_t size) { -+ DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); -+ DCHECK_EQ(0, size % CommitPageSize()); -+ return munmap(address, size) == 0; -+} -+ -+// static -+bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { -+ DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); -+ DCHECK_EQ(0, size % CommitPageSize()); -+ -+ int prot = GetProtectionFromMemoryPermission(access); -+ int ret = mprotect(address, size, prot); -+ -+ // MacOS 11.2 on Apple Silicon refuses to switch permissions from -+ // rwx to none. Just use madvise instead. -+#if defined(V8_OS_MACOSX) -+ if (ret != 0 && access == OS::MemoryPermission::kNoAccess) { -+ ret = madvise(address, size, MADV_FREE_REUSABLE); -+ return ret == 0; -+ } -+#endif -+ -+ if (ret == 0 && access == OS::MemoryPermission::kNoAccess) { -+ // This is advisory; ignore errors and continue execution. -+ USE(DiscardSystemPages(address, size)); -+ } -+ -+// For accounting purposes, we want to call MADV_FREE_REUSE on macOS after -+// changing permissions away from OS::MemoryPermission::kNoAccess. Since this -+// state is not kept at this layer, we always call this if access != kNoAccess. -+// The cost is a syscall that effectively no-ops. -+// TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary. -+// https://crbug.com/823915 -+#if defined(OS_MACOSX) -+ if (access != OS::MemoryPermission::kNoAccess) -+ madvise(address, size, MADV_FREE_REUSE); -+#endif -+ -+ return ret == 0; -+} -+ -+bool OS::DiscardSystemPages(void* address, size_t size) { -+ DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); -+ DCHECK_EQ(0, size % CommitPageSize()); -+#if defined(OS_MACOSX) -+ // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also -+ // marks the pages with the reusable bit, which allows both Activity Monitor -+ // and memory-infra to correctly track the pages. -+ int ret = madvise(address, size, MADV_FREE_REUSABLE); -+#elif defined(_AIX) || defined(V8_OS_SOLARIS) -+ int ret = madvise(reinterpret_cast(address), size, MADV_FREE); -+#else -+ int ret = madvise(address, size, MADV_FREE); -+#endif -+ if (ret != 0 && errno == ENOSYS) -+ return true; // madvise is not available on all systems. -+ if (ret != 0 && errno == EINVAL) { -+// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older -+// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't -+// imply runtime support. -+#if defined(_AIX) || defined(V8_OS_SOLARIS) -+ ret = madvise(reinterpret_cast(address), size, MADV_DONTNEED); -+#else -+ ret = madvise(address, size, MADV_DONTNEED); -+#endif -+ } -+ return ret == 0; -+} -+ -+// static -+bool OS::HasLazyCommits() { -+#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX -+ return true; -+#else -+ // TODO(bbudge) Return true for all POSIX platforms. -+ return false; -+#endif -+} -+#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA -+ -+const char* OS::GetGCFakeMMapFile() { -+ return g_gc_fake_mmap; -+} -+ -+ -+void OS::Sleep(TimeDelta interval) { -+ usleep(static_cast(interval.InMicroseconds())); -+} -+ -+ -+void OS::Abort() { -+ if (g_hard_abort) { -+ V8_IMMEDIATE_CRASH(); -+ } -+ // Redirect to std abort to signal abnormal program termination. -+ abort(); -+} -+ -+ -+void OS::DebugBreak() { -+#if V8_HOST_ARCH_ARM -+ asm("bkpt 0"); -+#elif V8_HOST_ARCH_ARM64 -+ asm("brk 0"); -+#elif V8_HOST_ARCH_MIPS -+ asm("break"); -+#elif V8_HOST_ARCH_MIPS64 -+ asm("break"); -+#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 -+ asm("twge 2,2"); -+#elif V8_HOST_ARCH_IA32 -+ asm("int $3"); -+#elif V8_HOST_ARCH_X64 -+ asm("int $3"); -+#elif V8_HOST_ARCH_S390 -+ // Software breakpoint instruction is 0x0001 -+ asm volatile(".word 0x0001"); -+#else -+#error Unsupported host architecture. -+#endif -+} -+ -+ -+class PosixMemoryMappedFile final : public OS::MemoryMappedFile { -+ public: -+ PosixMemoryMappedFile(FILE* file, void* memory, size_t size) -+ : file_(file), memory_(memory), size_(size) {} -+ ~PosixMemoryMappedFile() final; -+ void* memory() const final { return memory_; } -+ size_t size() const final { return size_; } -+ -+ private: -+ FILE* const file_; -+ void* const memory_; -+ size_t const size_; -+}; -+ -+ -+// static -+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, -+ FileMode mode) { -+ const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+"; -+ if (FILE* file = fopen(name, fopen_mode)) { -+ if (fseek(file, 0, SEEK_END) == 0) { -+ long size = ftell(file); // NOLINT(runtime/int) -+ if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0); -+ if (size > 0) { -+ int prot = PROT_READ; -+ int flags = MAP_PRIVATE; -+ if (mode == FileMode::kReadWrite) { -+ prot |= PROT_WRITE; -+ flags = MAP_SHARED; -+ } -+ void* const memory = -+ mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0); -+ if (memory != MAP_FAILED) { -+ return new PosixMemoryMappedFile(file, memory, size); -+ } -+ } -+ } -+ fclose(file); -+ } -+ return nullptr; -+} -+ -+// static -+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, -+ size_t size, void* initial) { -+ if (FILE* file = fopen(name, "w+")) { -+ if (size == 0) return new PosixMemoryMappedFile(file, 0, 0); -+ size_t result = fwrite(initial, 1, size, file); -+ if (result == size && !ferror(file)) { -+ void* memory = mmap(OS::GetRandomMmapAddr(), result, -+ PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); -+ if (memory != MAP_FAILED) { -+ return new PosixMemoryMappedFile(file, memory, result); -+ } -+ } -+ fclose(file); -+ } -+ return nullptr; -+} -+ -+ -+PosixMemoryMappedFile::~PosixMemoryMappedFile() { -+ if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize()))); -+ fclose(file_); -+} -+ -+ -+int OS::GetCurrentProcessId() { -+ return static_cast(getpid()); -+} -+ -+ -+int OS::GetCurrentThreadId() { -+#if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__)) -+ return static_cast(pthread_mach_thread_np(pthread_self())); -+#elif V8_OS_LINUX -+ return static_cast(syscall(__NR_gettid)); -+#elif V8_OS_ANDROID -+ return static_cast(gettid()); -+#elif V8_OS_AIX -+ return static_cast(thread_self()); -+#elif V8_OS_FUCHSIA -+ return static_cast(zx_thread_self()); -+#elif V8_OS_SOLARIS -+ return static_cast(pthread_self()); -+#else -+ return static_cast(reinterpret_cast(pthread_self())); -+#endif -+} -+ -+void OS::ExitProcess(int exit_code) { -+ // Use _exit instead of exit to avoid races between isolate -+ // threads and static destructors. -+ fflush(stdout); -+ fflush(stderr); -+ _exit(exit_code); -+} -+ -+// ---------------------------------------------------------------------------- -+// POSIX date/time support. -+// -+ -+#if !defined(V8_OS_FUCHSIA) -+int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { -+ struct rusage usage; -+ -+ if (getrusage(RUSAGE_SELF, &usage) < 0) return -1; -+ *secs = static_cast(usage.ru_utime.tv_sec); -+ *usecs = static_cast(usage.ru_utime.tv_usec); -+ return 0; -+} -+#endif -+ -+double OS::TimeCurrentMillis() { -+ return Time::Now().ToJsTime(); -+} -+ -+double PosixTimezoneCache::DaylightSavingsOffset(double time) { -+ if (std::isnan(time)) return std::numeric_limits::quiet_NaN(); -+ time_t tv = static_cast(std::floor(time/msPerSecond)); -+ struct tm tm; -+ struct tm* t = localtime_r(&tv, &tm); -+ if (nullptr == t) return std::numeric_limits::quiet_NaN(); -+ return t->tm_isdst > 0 ? 3600 * msPerSecond : 0; -+} -+ -+ -+int OS::GetLastError() { -+ return errno; -+} -+ -+ -+// ---------------------------------------------------------------------------- -+// POSIX stdio support. -+// -+ -+FILE* OS::FOpen(const char* path, const char* mode) { -+ FILE* file = fopen(path, mode); -+ if (file == nullptr) return nullptr; -+ struct stat file_stat; -+ if (fstat(fileno(file), &file_stat) != 0) { -+ fclose(file); -+ return nullptr; -+ } -+ bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0); -+ if (is_regular_file) return file; -+ fclose(file); -+ return nullptr; -+} -+ -+ -+bool OS::Remove(const char* path) { -+ return (remove(path) == 0); -+} -+ -+char OS::DirectorySeparator() { return '/'; } -+ -+bool OS::isDirectorySeparator(const char ch) { -+ return ch == DirectorySeparator(); -+} -+ -+ -+FILE* OS::OpenTemporaryFile() { -+ return tmpfile(); -+} -+ -+ -+const char* const OS::LogFileOpenMode = "w"; -+ -+ -+void OS::Print(const char* format, ...) { -+ va_list args; -+ va_start(args, format); -+ VPrint(format, args); -+ va_end(args); -+} -+ -+ -+void OS::VPrint(const char* format, va_list args) { -+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) -+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); -+#else -+ vprintf(format, args); -+#endif -+} -+ -+ -+void OS::FPrint(FILE* out, const char* format, ...) { -+ va_list args; -+ va_start(args, format); -+ VFPrint(out, format, args); -+ va_end(args); -+} -+ -+ -+void OS::VFPrint(FILE* out, const char* format, va_list args) { -+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) -+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); -+#else -+ vfprintf(out, format, args); -+#endif -+} -+ -+ -+void OS::PrintError(const char* format, ...) { -+ va_list args; -+ va_start(args, format); -+ VPrintError(format, args); -+ va_end(args); -+} -+ -+ -+void OS::VPrintError(const char* format, va_list args) { -+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) -+ __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args); -+#else -+ vfprintf(stderr, format, args); -+#endif -+} -+ -+ -+int OS::SNPrintF(char* str, int length, const char* format, ...) { -+ va_list args; -+ va_start(args, format); -+ int result = VSNPrintF(str, length, format, args); -+ va_end(args); -+ return result; -+} -+ -+ -+int OS::VSNPrintF(char* str, -+ int length, -+ const char* format, -+ va_list args) { -+ int n = vsnprintf(str, length, format, args); -+ if (n < 0 || n >= length) { -+ // If the length is zero, the assignment fails. -+ if (length > 0) -+ str[length - 1] = '\0'; -+ return -1; -+ } else { -+ return n; -+ } -+} -+ -+ -+// ---------------------------------------------------------------------------- -+// POSIX string support. -+// -+ -+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) { -+ strncpy(dest, src, n); -+} -+ -+ -+// ---------------------------------------------------------------------------- -+// POSIX thread support. -+// -+ -+class Thread::PlatformData { -+ public: -+ PlatformData() : thread_(kNoThread) {} -+ pthread_t thread_; // Thread handle for pthread. -+ // Synchronizes thread creation -+ Mutex thread_creation_mutex_; -+}; -+ -+Thread::Thread(const Options& options) -+ : data_(new PlatformData), -+ stack_size_(options.stack_size()), -+ start_semaphore_(nullptr) { -+ if (stack_size_ > 0 && static_cast(stack_size_) < PTHREAD_STACK_MIN) { -+ stack_size_ = PTHREAD_STACK_MIN; -+ } -+ set_name(options.name()); -+} -+ -+ -+Thread::~Thread() { -+ delete data_; -+} -+ -+ -+static void SetThreadName(const char* name) { -+#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD -+ pthread_set_name_np(pthread_self(), name); -+#elif V8_OS_NETBSD -+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP); -+ pthread_setname_np(pthread_self(), "%s", name); -+#elif V8_OS_MACOSX -+ // pthread_setname_np is only available in 10.6 or later, so test -+ // for it at runtime. -+ int (*dynamic_pthread_setname_np)(const char*); -+ *reinterpret_cast(&dynamic_pthread_setname_np) = -+ dlsym(RTLD_DEFAULT, "pthread_setname_np"); -+ if (dynamic_pthread_setname_np == nullptr) return; -+ -+ // Mac OS X does not expose the length limit of the name, so hardcode it. -+ static const int kMaxNameLength = 63; -+ STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength); -+ dynamic_pthread_setname_np(name); -+#elif defined(PR_SET_NAME) -+ prctl(PR_SET_NAME, -+ reinterpret_cast(name), // NOLINT -+ 0, 0, 0); -+#endif -+} -+ -+ -+static void* ThreadEntry(void* arg) { -+ Thread* thread = reinterpret_cast(arg); -+ // We take the lock here to make sure that pthread_create finished first since -+ // we don't know which thread will run first (the original thread or the new -+ // one). -+ { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); } -+ SetThreadName(thread->name()); -+ DCHECK_NE(thread->data()->thread_, kNoThread); -+ thread->NotifyStartedAndRun(); -+ return nullptr; -+} -+ -+ -+void Thread::set_name(const char* name) { -+ strncpy(name_, name, sizeof(name_) - 1); -+ name_[sizeof(name_) - 1] = '\0'; -+} -+ -+bool Thread::Start() { -+ int result; -+ pthread_attr_t attr; -+ memset(&attr, 0, sizeof(attr)); -+ result = pthread_attr_init(&attr); -+ if (result != 0) return false; -+ size_t stack_size = stack_size_; -+ if (stack_size == 0) { -+#if V8_OS_MACOSX -+ // Default on Mac OS X is 512kB -- bump up to 1MB -+ stack_size = 1 * 1024 * 1024; -+#elif V8_OS_AIX -+ // Default on AIX is 96kB -- bump up to 2MB -+ stack_size = 2 * 1024 * 1024; -+#endif -+ } -+ if (stack_size > 0) { -+ result = pthread_attr_setstacksize(&attr, stack_size); -+ if (result != 0) return pthread_attr_destroy(&attr), false; -+ } -+ { -+ MutexGuard lock_guard(&data_->thread_creation_mutex_); -+ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this); -+ if (result != 0 || data_->thread_ == kNoThread) { -+ return pthread_attr_destroy(&attr), false; -+ } -+ } -+ result = pthread_attr_destroy(&attr); -+ return result == 0; -+} -+ -+void Thread::Join() { pthread_join(data_->thread_, nullptr); } -+ -+static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) { -+#if V8_OS_CYGWIN -+ // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps -+ // because pthread_key_t is a pointer type on Cygwin. This will probably not -+ // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway. -+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); -+ intptr_t ptr_key = reinterpret_cast(pthread_key); -+ return static_cast(ptr_key); -+#else -+ return static_cast(pthread_key); -+#endif -+} -+ -+ -+static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) { -+#if V8_OS_CYGWIN -+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); -+ intptr_t ptr_key = static_cast(local_key); -+ return reinterpret_cast(ptr_key); -+#else -+ return static_cast(local_key); -+#endif -+} -+ -+ -+#ifdef V8_FAST_TLS_SUPPORTED -+ -+static std::atomic tls_base_offset_initialized{false}; -+intptr_t kMacTlsBaseOffset = 0; -+ -+// It's safe to do the initialization more that once, but it has to be -+// done at least once. -+static void InitializeTlsBaseOffset() { -+ const size_t kBufferSize = 128; -+ char buffer[kBufferSize]; -+ size_t buffer_size = kBufferSize; -+ int ctl_name[] = { CTL_KERN , KERN_OSRELEASE }; -+ if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) { -+ FATAL("V8 failed to get kernel version"); -+ } -+ // The buffer now contains a string of the form XX.YY.ZZ, where -+ // XX is the major kernel version component. -+ // Make sure the buffer is 0-terminated. -+ buffer[kBufferSize - 1] = '\0'; -+ char* period_pos = strchr(buffer, '.'); -+ *period_pos = '\0'; -+ int kernel_version_major = -+ static_cast(strtol(buffer, nullptr, 10)); // NOLINT -+ // The constants below are taken from pthreads.s from the XNU kernel -+ // sources archive at www.opensource.apple.com. -+ if (kernel_version_major < 11) { -+ // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the -+ // same offsets. -+#if V8_HOST_ARCH_IA32 -+ kMacTlsBaseOffset = 0x48; -+#else -+ kMacTlsBaseOffset = 0x60; -+#endif -+ } else { -+ // 11.x.x (Lion) changed the offset. -+ kMacTlsBaseOffset = 0; -+ } -+ -+ tls_base_offset_initialized.store(true, std::memory_order_release); -+} -+ -+ -+static void CheckFastTls(Thread::LocalStorageKey key) { -+ void* expected = reinterpret_cast(0x1234CAFE); -+ Thread::SetThreadLocal(key, expected); -+ void* actual = Thread::GetExistingThreadLocal(key); -+ if (expected != actual) { -+ FATAL("V8 failed to initialize fast TLS on current kernel"); -+ } -+ Thread::SetThreadLocal(key, nullptr); -+} -+ -+#endif // V8_FAST_TLS_SUPPORTED -+ -+ -+Thread::LocalStorageKey Thread::CreateThreadLocalKey() { -+#ifdef V8_FAST_TLS_SUPPORTED -+ bool check_fast_tls = false; -+ if (!tls_base_offset_initialized.load(std::memory_order_acquire)) { -+ check_fast_tls = true; -+ InitializeTlsBaseOffset(); -+ } -+#endif -+ pthread_key_t key; -+ int result = pthread_key_create(&key, nullptr); -+ DCHECK_EQ(0, result); -+ USE(result); -+ LocalStorageKey local_key = PthreadKeyToLocalKey(key); -+#ifdef V8_FAST_TLS_SUPPORTED -+ // If we just initialized fast TLS support, make sure it works. -+ if (check_fast_tls) CheckFastTls(local_key); -+#endif -+ return local_key; -+} -+ -+ -+void Thread::DeleteThreadLocalKey(LocalStorageKey key) { -+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key); -+ int result = pthread_key_delete(pthread_key); -+ DCHECK_EQ(0, result); -+ USE(result); -+} -+ -+ -+void* Thread::GetThreadLocal(LocalStorageKey key) { -+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key); -+ return pthread_getspecific(pthread_key); -+} -+ -+ -+void Thread::SetThreadLocal(LocalStorageKey key, void* value) { -+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key); -+ int result = pthread_setspecific(pthread_key, value); -+ DCHECK_EQ(0, result); -+ USE(result); -+} -+ -+// pthread_getattr_np used below is non portable (hence the _np suffix). We -+// keep this version in POSIX as most Linux-compatible derivatives will -+// support it. MacOS and FreeBSD are different here. -+#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX) && \ -+ !defined(V8_OS_SOLARIS) -+ -+// static -+void* Stack::GetStackStart() { -+ pthread_attr_t attr; -+ int error = pthread_getattr_np(pthread_self(), &attr); -+ if (!error) { -+ void* base; -+ size_t size; -+ error = pthread_attr_getstack(&attr, &base, &size); -+ CHECK(!error); -+ pthread_attr_destroy(&attr); -+ return reinterpret_cast(base) + size; -+ } -+ pthread_attr_destroy(&attr); -+ -+#if defined(V8_LIBC_GLIBC) -+ // pthread_getattr_np can fail for the main thread. In this case -+ // just like NaCl we rely on the __libc_stack_end to give us -+ // the start of the stack. -+ // See https://code.google.com/p/nativeclient/issues/detail?id=3431. -+ return __libc_stack_end; -+#endif // !defined(V8_LIBC_GLIBC) -+ return nullptr; -+} -+ -+#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && -+ // !defined(_AIX) && !defined(V8_OS_SOLARIS) -+ -+// static -+void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); } -+ -+#undef LOG_TAG -+#undef MAP_ANONYMOUS -+#undef MADV_FREE -+ -+} // namespace base -+} // namespace v8 -diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc -new file mode 100644 -index 00000000..ab54e2f5 ---- /dev/null -+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc -@@ -0,0 +1,3191 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/api/api-arguments.h" -+#include "src/codegen/code-factory.h" -+#include "src/debug/debug.h" -+#include "src/deoptimizer/deoptimizer.h" -+#include "src/execution/frame-constants.h" -+#include "src/execution/frames.h" -+#include "src/logging/counters.h" -+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/codegen/macro-assembler-inl.h" -+#include "src/codegen/register-configuration.h" -+#include "src/heap/heap-inl.h" -+#include "src/objects/cell.h" -+#include "src/objects/foreign.h" -+#include "src/objects/heap-number.h" -+#include "src/objects/js-generator.h" -+#include "src/objects/objects-inl.h" -+#include "src/objects/smi.h" -+#include "src/runtime/runtime.h" -+#include "src/wasm/wasm-linkage.h" -+#include "src/wasm/wasm-objects.h" -+ -+namespace v8 { -+namespace internal { -+ -+#define __ ACCESS_MASM(masm) -+ -+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { -+ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); -+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), -+ RelocInfo::CODE_TARGET); -+} -+ -+static void GenerateTailCallToReturnedCode(MacroAssembler* masm, -+ Runtime::FunctionId function_id) { -+ // ----------- S t a t e ------------- -+ // -- a1 : target function (preserved for callee) -+ // -- a3 : new target (preserved for callee) -+ // ----------------------------------- -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ // Push a copy of the function onto the stack. -+ // Push a copy of the target function and the new target. -+ __ Push(a1, a3, a1); -+ -+ __ CallRuntime(function_id, 1); -+ __ LoadCodeObjectEntry(a2, a0); -+ // Restore target function and new target. -+ __ Pop(a1, a3); -+ } -+ -+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); -+ __ Jump(a2); -+} -+ -+namespace { -+ -+enum StackLimitKind { kInterruptStackLimit, kRealStackLimit }; -+ -+void LoadStackLimit(MacroAssembler* masm, Register destination, -+ StackLimitKind kind) { -+ DCHECK(masm->root_array_available()); -+ Isolate* isolate = masm->isolate(); -+ ExternalReference limit = -+ kind == StackLimitKind::kRealStackLimit -+ ? ExternalReference::address_of_real_jslimit(isolate) -+ : ExternalReference::address_of_jslimit(isolate); -+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); -+ -+ intptr_t offset = -+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); -+ CHECK(is_int32(offset)); -+ __ Ld_d(destination, MemOperand(kRootRegister, static_cast(offset))); -+} -+ -+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : number of arguments -+ // -- a1 : constructor function -+ // -- a3 : new target -+ // -- cp : context -+ // -- ra : return address -+ // -- sp[...]: constructor arguments -+ // ----------------------------------- -+ -+ // Enter a construct frame. -+ { -+ FrameScope scope(masm, StackFrame::CONSTRUCT); -+ -+ // Preserve the incoming parameters on the stack. -+ __ SmiTag(a0); -+ __ Push(cp, a0); -+ __ SmiUntag(a0); -+ -+ // The receiver for the builtin/api call. -+ __ PushRoot(RootIndex::kTheHoleValue); -+ -+ // Set up pointer to last argument. -+ __ Add_d(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); -+ -+ // Copy arguments and receiver to the expression stack. -+ Label loop, entry; -+ __ mov(t3, a0); -+ // ----------- S t a t e ------------- -+ // -- a0: number of arguments (untagged) -+ // -- a3: new target -+ // -- t2: pointer to last argument -+ // -- t3: counter -+ // -- sp[0*kPointerSize]: the hole (receiver) -+ // -- sp[1*kPointerSize]: number of arguments (tagged) -+ // -- sp[2*kPointerSize]: context -+ // ----------------------------------- -+ __ jmp(&entry); -+ __ bind(&loop); -+ __ Alsl_d(t0, t3, t2, kPointerSizeLog2, t7); -+ __ Ld_d(t1, MemOperand(t0, 0)); -+ __ push(t1); -+ __ bind(&entry); -+ __ Add_d(t3, t3, Operand(-1)); -+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg)); -+ -+ // Call the function. -+ // a0: number of arguments (untagged) -+ // a1: constructor function -+ // a3: new target -+ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); -+ -+ // Restore context from the frame. -+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); -+ // Restore smi-tagged arguments count from the frame. -+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); -+ // Leave construct frame. -+ } -+ -+ // Remove caller arguments from the stack and return. -+ __ SmiScale(a4, a1, kPointerSizeLog2); -+ __ Add_d(sp, sp, a4); -+ __ Add_d(sp, sp, kPointerSize); -+ __ Ret(); -+} -+ -+static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, -+ Register scratch1, Register scratch2, -+ Label* stack_overflow) { -+ // Check the stack for overflow. We are not trying to catch -+ // interruptions (e.g. debug break and preemption) here, so the "real stack -+ // limit" is checked. -+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); -+ // Make scratch1 the space we have left. The stack might already be overflowed -+ // here which will cause scratch1 to become negative. -+ __ sub_d(scratch1, sp, scratch1); -+ // Check if the arguments will overflow the stack. -+ __ slli_d(scratch2, num_args, kPointerSizeLog2); -+ // Signed comparison. -+ __ Branch(stack_overflow, le, scratch1, Operand(scratch2)); -+} -+ -+} // namespace -+ -+// The construct stub for ES5 constructor functions and ES6 class constructors. -+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0: number of arguments (untagged) -+ // -- a1: constructor function -+ // -- a3: new target -+ // -- cp: context -+ // -- ra: return address -+ // -- sp[...]: constructor arguments -+ // ----------------------------------- -+ -+ // Enter a construct frame. -+ { -+ FrameScope scope(masm, StackFrame::CONSTRUCT); -+ Label post_instantiation_deopt_entry, not_create_implicit_receiver; -+ -+ // Preserve the incoming parameters on the stack. -+ __ SmiTag(a0); -+ __ Push(cp, a0, a1); -+ __ PushRoot(RootIndex::kTheHoleValue); -+ __ Push(a3); -+ -+ // ----------- S t a t e ------------- -+ // -- sp[0*kPointerSize]: new target -+ // -- sp[1*kPointerSize]: padding -+ // -- a1 and sp[2*kPointerSize]: constructor function -+ // -- sp[3*kPointerSize]: number of arguments (tagged) -+ // -- sp[4*kPointerSize]: context -+ // ----------------------------------- -+ -+ __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); -+ __ DecodeField(t2); -+ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor, -+ ¬_create_implicit_receiver); -+ -+ // If not derived class constructor: Allocate the new receiver object. -+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, -+ t2, t3); -+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), -+ RelocInfo::CODE_TARGET); -+ __ Branch(&post_instantiation_deopt_entry); -+ -+ // Else: use TheHoleValue as receiver for constructor call -+ __ bind(¬_create_implicit_receiver); -+ __ LoadRoot(a0, RootIndex::kTheHoleValue); -+ -+ // ----------- S t a t e ------------- -+ // -- a0: receiver -+ // -- Slot 4 / sp[0*kPointerSize]: new target -+ // -- Slot 3 / sp[1*kPointerSize]: padding -+ // -- Slot 2 / sp[2*kPointerSize]: constructor function -+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) -+ // -- Slot 0 / sp[4*kPointerSize]: context -+ // ----------------------------------- -+ // Deoptimizer enters here. -+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( -+ masm->pc_offset()); -+ __ bind(&post_instantiation_deopt_entry); -+ -+ // Restore new target. -+ __ Pop(a3); -+ // Push the allocated receiver to the stack. We need two copies -+ // because we may have to return the original one and the calling -+ // conventions dictate that the called function pops the receiver. -+ __ Push(a0, a0); -+ -+ // ----------- S t a t e ------------- -+ // -- r3: new target -+ // -- sp[0*kPointerSize]: implicit receiver -+ // -- sp[1*kPointerSize]: implicit receiver -+ // -- sp[2*kPointerSize]: padding -+ // -- sp[3*kPointerSize]: constructor function -+ // -- sp[4*kPointerSize]: number of arguments (tagged) -+ // -- sp[5*kPointerSize]: context -+ // ----------------------------------- -+ -+ // Restore constructor function and argument count. -+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); -+ __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); -+ __ SmiUntag(a0); -+ -+ // Set up pointer to last argument. -+ __ Add_d(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); -+ -+ Label enough_stack_space, stack_overflow; -+ Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow); -+ __ Branch(&enough_stack_space); -+ -+ __ bind(&stack_overflow); -+ // Restore the context from the frame. -+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); -+ __ CallRuntime(Runtime::kThrowStackOverflow); -+ // Unreachable code. -+ __ break_(0xCC); -+ -+ __ bind(&enough_stack_space); -+ -+ // Copy arguments and receiver to the expression stack. -+ Label loop, entry; -+ __ mov(t3, a0); -+ // ----------- S t a t e ------------- -+ // -- a0: number of arguments (untagged) -+ // -- a3: new target -+ // -- t2: pointer to last argument -+ // -- t3: counter -+ // -- sp[0*kPointerSize]: implicit receiver -+ // -- sp[1*kPointerSize]: implicit receiver -+ // -- sp[2*kPointerSize]: padding -+ // -- a1 and sp[3*kPointerSize]: constructor function -+ // -- sp[4*kPointerSize]: number of arguments (tagged) -+ // -- sp[5*kPointerSize]: context -+ // ----------------------------------- -+ __ jmp(&entry); -+ __ bind(&loop); -+ __ Alsl_d(t0, t3, t2, kPointerSizeLog2, t7); -+ __ Ld_d(t1, MemOperand(t0, 0)); -+ __ push(t1); -+ __ bind(&entry); -+ __ Add_d(t3, t3, Operand(-1)); -+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg)); -+ -+ // Call the function. -+ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); -+ -+ // ----------- S t a t e ------------- -+ // -- t5: constructor result -+ // -- sp[0*kPointerSize]: implicit receiver -+ // -- sp[1*kPointerSize]: padding -+ // -- sp[2*kPointerSize]: constructor function -+ // -- sp[3*kPointerSize]: number of arguments -+ // -- sp[4*kPointerSize]: context -+ // ----------------------------------- -+ -+ // Store offset of return address for deoptimizer. -+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( -+ masm->pc_offset()); -+ -+ // Restore the context from the frame. -+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); -+ -+ // If the result is an object (in the ECMA sense), we should get rid -+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7 -+ // on page 74. -+ Label use_receiver, do_throw, leave_frame; -+ -+ // If the result is undefined, we jump out to using the implicit receiver. -+ __ JumpIfRoot(a0, RootIndex::kUndefinedValue, &use_receiver); -+ -+ // Otherwise we do a smi check and fall through to check if the return value -+ // is a valid receiver. -+ -+ // If the result is a smi, it is *not* an object in the ECMA sense. -+ __ JumpIfSmi(a0, &use_receiver); -+ -+ // If the type of the result (stored in its map) is less than -+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. -+ __ GetObjectType(a0, t2, t2); -+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); -+ __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE)); -+ __ Branch(&use_receiver); -+ -+ __ bind(&do_throw); -+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); -+ -+ // Throw away the result of the constructor invocation and use the -+ // on-stack receiver as the result. -+ __ bind(&use_receiver); -+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize)); -+ __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw); -+ -+ __ bind(&leave_frame); -+ // Restore smi-tagged arguments count from the frame. -+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); -+ // Leave construct frame. -+ } -+ // Remove caller arguments from the stack and return. -+ __ SmiScale(a4, a1, kPointerSizeLog2); -+ __ Add_d(sp, sp, a4); -+ __ Add_d(sp, sp, kPointerSize); -+ __ Ret(); -+} -+ -+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { -+ Generate_JSBuiltinsConstructStubHelper(masm); -+} -+ -+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm, -+ Register sfi_data, -+ Register scratch1) { -+ Label done; -+ -+ __ GetObjectType(sfi_data, scratch1, scratch1); -+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); -+ __ Ld_d(sfi_data, -+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); -+ -+ __ bind(&done); -+} -+ -+// static -+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the value to pass to the generator -+ // -- a1 : the JSGeneratorObject to resume -+ // -- ra : return address -+ // ----------------------------------- -+ __ AssertGeneratorObject(a1); -+ -+ // Store input value into generator object. -+ __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); -+ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, a3, -+ kRAHasNotBeenSaved, kDontSaveFPRegs); -+ -+ // Load suspended function and context. -+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); -+ __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset)); -+ -+ // Flood function if we are stepping. -+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; -+ Label stepping_prepared; -+ ExternalReference debug_hook = -+ ExternalReference::debug_hook_on_function_call_address(masm->isolate()); -+ __ li(a5, debug_hook); -+ __ Ld_b(a5, MemOperand(a5, 0)); -+ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg)); -+ -+ // Flood function if we need to continue stepping in the suspended generator. -+ ExternalReference debug_suspended_generator = -+ ExternalReference::debug_suspended_generator_address(masm->isolate()); -+ __ li(a5, debug_suspended_generator); -+ __ Ld_d(a5, MemOperand(a5, 0)); -+ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5)); -+ __ bind(&stepping_prepared); -+ -+ // Check the stack for overflow. We are not trying to catch interruptions -+ // (i.e. debug break and preemption) here, so check the "real stack limit". -+ Label stack_overflow; -+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); -+ __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); -+ -+ // Push receiver. -+ __ Ld_d(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); -+ __ Push(a5); -+ -+ // ----------- S t a t e ------------- -+ // -- a1 : the JSGeneratorObject to resume -+ // -- a4 : generator function -+ // -- cp : generator context -+ // -- ra : return address -+ // -- sp[0] : generator receiver -+ // ----------------------------------- -+ -+ // Push holes for arguments to generator function. Since the parser forced -+ // context allocation for any variables in generators, the actual argument -+ // values have already been copied into the context and these dummy values -+ // will never be used. -+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_hu( -+ a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); -+ __ Ld_d(t1, FieldMemOperand( -+ a1, JSGeneratorObject::kParametersAndRegistersOffset)); -+ { -+ Label done_loop, loop; -+ __ Move(t2, zero_reg); -+ __ bind(&loop); -+ __ Sub_d(a3, a3, Operand(1)); -+ __ Branch(&done_loop, lt, a3, Operand(zero_reg)); -+ __ Alsl_d(kScratchReg, t2, t1, kPointerSizeLog2, t7); -+ __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); -+ __ Push(kScratchReg); -+ __ Add_d(t2, t2, Operand(1)); -+ __ Branch(&loop); -+ __ bind(&done_loop); -+ } -+ -+ // Underlying function needs to have bytecode available. -+ if (FLAG_debug_code) { -+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); -+ GetSharedFunctionInfoBytecode(masm, a3, t5); -+ __ GetObjectType(a3, a3, a3); -+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, -+ Operand(BYTECODE_ARRAY_TYPE)); -+ } -+ -+ // Resume (Ignition/TurboFan) generator object. -+ { -+ __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_hu(a0, FieldMemOperand( -+ a0, SharedFunctionInfo::kFormalParameterCountOffset)); -+ // We abuse new.target both to indicate that this is a resume call and to -+ // pass in the generator object. In ordinary calls, new.target is always -+ // undefined because generator functions are non-constructable. -+ __ Move(a3, a1); -+ __ Move(a1, a4); -+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); -+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); -+ __ JumpCodeObject(a2); -+ } -+ -+ __ bind(&prepare_step_in_if_stepping); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ Push(a1, a4); -+ // Push hole as receiver since we do not use it for stepping. -+ __ PushRoot(RootIndex::kTheHoleValue); -+ __ CallRuntime(Runtime::kDebugOnFunctionCall); -+ __ Pop(a1); -+ } -+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); -+ __ Branch(&stepping_prepared); -+ -+ __ bind(&prepare_step_in_suspended_generator); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ Push(a1); -+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); -+ __ Pop(a1); -+ } -+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); -+ __ Branch(&stepping_prepared); -+ -+ __ bind(&stack_overflow); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ CallRuntime(Runtime::kThrowStackOverflow); -+ __ break_(0xCC); // This should be unreachable. -+ } -+} -+ -+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ Push(a1); -+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable); -+} -+ -+// Clobbers scratch1 and scratch2; preserves all other registers. -+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, -+ Register scratch1, Register scratch2) { -+ // Check the stack for overflow. We are not trying to catch -+ // interruptions (e.g. debug break and preemption) here, so the "real stack -+ // limit" is checked. -+ Label okay; -+ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); -+ // Make a2 the space we have left. The stack might already be overflowed -+ // here which will cause r2 to become negative. -+ __ sub_d(scratch1, sp, scratch1); -+ // Check if the arguments will overflow the stack. -+ __ slli_d(scratch2, argc, kPointerSizeLog2); -+ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison. -+ -+ // Out of stack space. -+ __ CallRuntime(Runtime::kThrowStackOverflow); -+ -+ __ bind(&okay); -+} -+ -+namespace { -+ -+// Called with the native C calling convention. The corresponding function -+// signature is either: -+// -+// using JSEntryFunction = GeneratedCode; -+// or -+// using JSEntryFunction = GeneratedCode; -+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, -+ Builtins::Name entry_trampoline) { -+ Label invoke, handler_entry, exit; -+ -+ { -+ NoRootArrayScope no_root_array(masm); -+ -+ // TODO(plind): unify the ABI description here. -+ // Registers: -+ // either -+ // a0: root register value -+ // a1: entry address -+ // a2: function -+ // a3: receiver -+ // a4: argc -+ // a5: argv -+ // or -+ // a0: root register value -+ // a1: microtask_queue -+ // -+ // Stack: -+ // 0 arg slots on mips64 (4 args slots on mips) -+ -+ // Save callee saved registers on the stack. -+ __ MultiPush(kCalleeSaved | ra.bit()); -+ -+ // Save callee-saved FPU registers. -+ __ MultiPushFPU(kCalleeSavedFPU); -+ // Set up the reserved register for 0.0. -+ __ Move(kDoubleRegZero, 0.0); -+ -+ // Initialize the root register. -+ // C calling convention. The first argument is passed in a0. -+ __ mov(kRootRegister, a0); -+ } -+ -+ // a1: entry address -+ // a2: function -+ // a3: receiver -+ // a4: argc -+ // a5: argv -+ -+ // We build an EntryFrame. -+ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used. -+ __ li(s2, Operand(StackFrame::TypeToMarker(type))); -+ __ li(s3, Operand(StackFrame::TypeToMarker(type))); -+ ExternalReference c_entry_fp = ExternalReference::Create( -+ IsolateAddressId::kCEntryFPAddress, masm->isolate()); -+ __ li(s4, c_entry_fp); -+ __ Ld_d(s4, MemOperand(s4, 0)); -+ __ Push(s1, s2, s3, s4); -+ // Set up frame pointer for the frame to be pushed. -+ __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset); -+ -+ // Registers: -+ // either -+ // a1: entry address -+ // a2: function -+ // a3: receiver -+ // a4: argc -+ // a5: argv -+ // or -+ // a1: microtask_queue -+ // -+ // Stack: -+ // caller fp | -+ // function slot | entry frame -+ // context slot | -+ // bad fp (0xFF...F) | -+ // callee saved registers + ra -+ // [ O32: 4 args slots] -+ // args -+ -+ // If this is the outermost JS call, set js_entry_sp value. -+ Label non_outermost_js; -+ ExternalReference js_entry_sp = ExternalReference::Create( -+ IsolateAddressId::kJSEntrySPAddress, masm->isolate()); -+ __ li(s1, js_entry_sp); -+ __ Ld_d(s2, MemOperand(s1, 0)); -+ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg)); -+ __ St_d(fp, MemOperand(s1, 0)); -+ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); -+ Label cont; -+ __ b(&cont); -+ __ nop(); // Branch delay slot nop. -+ __ bind(&non_outermost_js); -+ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME)); -+ __ bind(&cont); -+ __ push(s3); -+ -+ // Jump to a faked try block that does the invoke, with a faked catch -+ // block that sets the pending exception. -+ __ jmp(&invoke); -+ __ bind(&handler_entry); -+ -+ // Store the current pc as the handler offset. It's used later to create the -+ // handler table. -+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos()); -+ -+ // Caught exception: Store result (exception) in the pending exception -+ // field in the JSEnv and return a failure sentinel. Coming in here the -+ // fp will be invalid because the PushStackHandler below sets it to 0 to -+ // signal the existence of the JSEntry frame. -+ __ li(s1, ExternalReference::Create( -+ IsolateAddressId::kPendingExceptionAddress, masm->isolate())); -+ __ St_d(a0, -+ MemOperand(s1, 0)); // We come back from 'invoke'. result is in a0. -+ __ LoadRoot(a0, RootIndex::kException); -+ __ b(&exit); // b exposes branch delay slot. -+ __ nop(); // Branch delay slot nop. -+ -+ // Invoke: Link this frame into the handler chain. -+ __ bind(&invoke); -+ __ PushStackHandler(); -+ // If an exception not caught by another handler occurs, this handler -+ // returns control to the code after the bal(&invoke) above, which -+ // restores all kCalleeSaved registers (including cp and fp) to their -+ // saved values before returning a failure to C. -+ // -+ // Registers: -+ // either -+ // a0: root register value -+ // a1: entry address -+ // a2: function -+ // a3: receiver -+ // a4: argc -+ // a5: argv -+ // or -+ // a0: root register value -+ // a1: microtask_queue -+ // -+ // Stack: -+ // handler frame -+ // entry frame -+ // callee saved registers + ra -+ // [ O32: 4 args slots] -+ // args -+ // -+ // Invoke the function by calling through JS entry trampoline builtin and -+ // pop the faked function when we return. -+ -+ Handle trampoline_code = -+ masm->isolate()->builtins()->builtin_handle(entry_trampoline); -+ __ Call(trampoline_code, RelocInfo::CODE_TARGET); -+ -+ // Unlink this frame from the handler chain. -+ __ PopStackHandler(); -+ -+ __ bind(&exit); // a0 holds result -+ // Check if the current stack frame is marked as the outermost JS frame. -+ Label non_outermost_js_2; -+ __ pop(a5); -+ __ Branch(&non_outermost_js_2, ne, a5, -+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); -+ __ li(a5, js_entry_sp); -+ __ St_d(zero_reg, MemOperand(a5, 0)); -+ __ bind(&non_outermost_js_2); -+ -+ // Restore the top frame descriptors from the stack. -+ __ pop(a5); -+ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, -+ masm->isolate())); -+ __ St_d(a5, MemOperand(a4, 0)); -+ -+ // Reset the stack to the callee saved registers. -+ __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset); -+ -+ // Restore callee-saved fpu registers. -+ __ MultiPopFPU(kCalleeSavedFPU); -+ -+ // Restore callee saved registers from the stack. -+ __ MultiPop(kCalleeSaved | ra.bit()); -+ // Return. -+ __ Jump(ra); -+} -+ -+} // namespace -+ -+void Builtins::Generate_JSEntry(MacroAssembler* masm) { -+ Generate_JSEntryVariant(masm, StackFrame::ENTRY, -+ Builtins::kJSEntryTrampoline); -+} -+ -+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) { -+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY, -+ Builtins::kJSConstructEntryTrampoline); -+} -+ -+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) { -+ Generate_JSEntryVariant(masm, StackFrame::ENTRY, -+ Builtins::kRunMicrotasksTrampoline); -+} -+ -+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, -+ bool is_construct) { -+ // ----------- S t a t e ------------- -+ // -- a1: new.target -+ // -- a2: function -+ // -- a3: receiver_pointer -+ // -- a4: argc -+ // -- a5: argv -+ // ----------------------------------- -+ -+ // Enter an internal frame. -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ -+ // Setup the context (we need to use the caller context from the isolate). -+ ExternalReference context_address = ExternalReference::Create( -+ IsolateAddressId::kContextAddress, masm->isolate()); -+ __ li(cp, context_address); -+ __ Ld_d(cp, MemOperand(cp, 0)); -+ -+ // Push the function and the receiver onto the stack. -+ __ Push(a2, a3); -+ -+ // Check if we have enough stack space to push all arguments. -+ // Clobbers a0 and a3. -+ Generate_CheckStackOverflow(masm, a4, t5, a3); -+ -+ // Setup new.target, function and argc. -+ __ mov(a3, a1); -+ __ mov(a1, a2); -+ __ mov(a0, a4); -+ -+ // a0: argc -+ // a1: function -+ // a3: new.target -+ // a5: argv -+ -+ // Copy arguments to the stack in a loop. -+ // a3: argc -+ // a5: argv, i.e. points to first arg -+ Label loop, entry; -+ __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7); -+ __ b(&entry); -+ __ nop(); // Branch delay slot nop. -+ // s1 points past last arg. -+ __ bind(&loop); -+ __ Ld_d(s2, MemOperand(a5, 0)); // Read next parameter. -+ __ addi_d(a5, a5, kPointerSize); -+ __ Ld_d(s2, MemOperand(s2, 0)); // Dereference handle. -+ __ push(s2); // Push parameter. -+ __ bind(&entry); -+ __ Branch(&loop, ne, a5, Operand(s1)); -+ -+ // a0: argc -+ // a1: function -+ // a3: new.target -+ -+ // Initialize all JavaScript callee-saved registers, since they will be seen -+ // by the garbage collector as part of handlers. -+ __ LoadRoot(a4, RootIndex::kUndefinedValue); -+ __ mov(a5, a4); -+ __ mov(s1, a4); -+ __ mov(s2, a4); -+ __ mov(s3, a4); -+ __ mov(s4, a4); -+ __ mov(s5, a4); -+ // s6 holds the root address. Do not clobber. -+ // s7 is cp. Do not init. -+ -+ // Invoke the code. -+ Handle builtin = is_construct -+ ? BUILTIN_CODE(masm->isolate(), Construct) -+ : masm->isolate()->builtins()->Call(); -+ __ Call(builtin, RelocInfo::CODE_TARGET); -+ -+ // Leave internal frame. -+ } -+ __ Jump(ra); -+} -+ -+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { -+ Generate_JSEntryTrampolineHelper(masm, false); -+} -+ -+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { -+ Generate_JSEntryTrampolineHelper(masm, true); -+} -+ -+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { -+ // a1: microtask_queue -+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1); -+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); -+} -+ -+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, -+ Register optimized_code, -+ Register closure, -+ Register scratch1, -+ Register scratch2) { -+ // Store code entry in the closure. -+ __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); -+ __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. -+ __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, -+ kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+} -+ -+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { -+ Register args_count = scratch; -+ -+ // Get the arguments + receiver count. -+ __ Ld_d(args_count, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); -+ __ Ld_w(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); -+ -+ // Leave the frame (also dropping the register file). -+ __ LeaveFrame(StackFrame::INTERPRETED); -+ -+ // Drop receiver + arguments. -+ __ Add_d(sp, sp, args_count); -+} -+ -+// Tail-call |function_id| if |smi_entry| == |marker| -+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, -+ Register smi_entry, -+ OptimizationMarker marker, -+ Runtime::FunctionId function_id) { -+ Label no_match; -+ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker))); -+ GenerateTailCallToReturnedCode(masm, function_id); -+ __ bind(&no_match); -+} -+ -+static void TailCallOptimizedCodeSlot(MacroAssembler* masm, -+ Register optimized_code_entry, -+ Register scratch1, Register scratch2) { -+ // ----------- S t a t e ------------- -+ // -- a3 : new target (preserved for callee if needed, and caller) -+ // -- a1 : target function (preserved for callee if needed, and caller) -+ // ----------------------------------- -+ DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2)); -+ -+ Register closure = a1; -+ -+ // Check if the optimized code is marked for deopt. If it is, call the -+ // runtime to clear it. -+ Label found_deoptimized_code; -+ __ Ld_d(a5, FieldMemOperand(optimized_code_entry, -+ Code::kCodeDataContainerOffset)); -+ __ Ld_w(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset)); -+ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit)); -+ __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg)); -+ -+ // Optimized code is good, get it into the closure and link the closure into -+ // the optimized functions list, then tail call the optimized code. -+ // The feedback vector is no longer used, so re-use it as a scratch -+ // register. -+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, -+ scratch1, scratch2); -+ -+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); -+ __ LoadCodeObjectEntry(a2, optimized_code_entry); -+ __ Jump(a2); -+ -+ // Optimized code slot contains deoptimized code, evict it and re-enter the -+ // closure's code. -+ __ bind(&found_deoptimized_code); -+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); -+} -+ -+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, -+ Register optimization_marker) { -+ // ----------- S t a t e ------------- -+ // -- a3 : new target (preserved for callee if needed, and caller) -+ // -- a1 : target function (preserved for callee if needed, and caller) -+ // -- feedback vector (preserved for caller if needed) -+ // -- optimization_marker : a Smi containing a non-zero optimization marker. -+ // ----------------------------------- -+ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker)); -+ -+ // TODO(v8:8394): The logging of first execution will break if -+ // feedback vectors are not allocated. We need to find a different way of -+ // logging these events if required. -+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, -+ OptimizationMarker::kLogFirstExecution, -+ Runtime::kFunctionFirstExecution); -+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, -+ OptimizationMarker::kCompileOptimized, -+ Runtime::kCompileOptimized_NotConcurrent); -+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, -+ OptimizationMarker::kCompileOptimizedConcurrent, -+ Runtime::kCompileOptimized_Concurrent); -+ -+ // Otherwise, the marker is InOptimizationQueue, so fall through hoping -+ // that an interrupt will eventually update the slot with optimized code. -+ if (FLAG_debug_code) { -+ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel, -+ optimization_marker, -+ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); -+ } -+} -+ -+// Advance the current bytecode offset. This simulates what all bytecode -+// handlers do upon completion of the underlying operation. Will bail out to a -+// label if the bytecode (without prefix) is a return bytecode. Will not advance -+// the bytecode offset if the current bytecode is a JumpLoop, instead just -+// re-executing the JumpLoop to jump to the correct bytecode. -+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, -+ Register bytecode_array, -+ Register bytecode_offset, -+ Register bytecode, Register scratch1, -+ Register scratch2, Register scratch3, -+ Label* if_return) { -+ Register bytecode_size_table = scratch1; -+ -+ // The bytecode offset value will be increased by one in wide and extra wide -+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we -+ // will restore the original bytecode. In order to simplify the code, we have -+ // a backup of it. -+ Register original_bytecode_offset = scratch3; -+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode, -+ bytecode_size_table, original_bytecode_offset)); -+ __ Move(original_bytecode_offset, bytecode_offset); -+ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); -+ -+ // Check if the bytecode is a Wide or ExtraWide prefix bytecode. -+ Label process_bytecode, extra_wide; -+ STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); -+ STATIC_ASSERT(1 == static_cast(interpreter::Bytecode::kExtraWide)); -+ STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); -+ STATIC_ASSERT(3 == -+ static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); -+ __ Branch(&process_bytecode, hi, bytecode, Operand(3)); -+ __ And(scratch2, bytecode, Operand(1)); -+ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg)); -+ -+ // Load the next bytecode and update table to the wide scaled table. -+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1)); -+ __ Add_d(scratch2, bytecode_array, bytecode_offset); -+ __ Ld_bu(bytecode, MemOperand(scratch2, 0)); -+ __ Add_d(bytecode_size_table, bytecode_size_table, -+ Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); -+ __ jmp(&process_bytecode); -+ -+ __ bind(&extra_wide); -+ // Load the next bytecode and update table to the extra wide scaled table. -+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1)); -+ __ Add_d(scratch2, bytecode_array, bytecode_offset); -+ __ Ld_bu(bytecode, MemOperand(scratch2, 0)); -+ __ Add_d(bytecode_size_table, bytecode_size_table, -+ Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); -+ -+ __ bind(&process_bytecode); -+ -+// Bailout to the return label if this is a return bytecode. -+#define JUMP_IF_EQUAL(NAME) \ -+ __ Branch(if_return, eq, bytecode, \ -+ Operand(static_cast(interpreter::Bytecode::k##NAME))); -+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) -+#undef JUMP_IF_EQUAL -+ -+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning -+ // of the loop. -+ Label end, not_jump_loop; -+ __ Branch(¬_jump_loop, ne, bytecode, -+ Operand(static_cast(interpreter::Bytecode::kJumpLoop))); -+ // We need to restore the original bytecode_offset since we might have -+ // increased it to skip the wide / extra-wide prefix bytecode. -+ __ Move(bytecode_offset, original_bytecode_offset); -+ __ jmp(&end); -+ -+ __ bind(¬_jump_loop); -+ // Otherwise, load the size of the current bytecode and advance the offset. -+ __ Alsl_d(scratch2, bytecode, bytecode_size_table, 2, t7); -+ __ Ld_w(scratch2, MemOperand(scratch2, 0)); -+ __ Add_d(bytecode_offset, bytecode_offset, scratch2); -+ -+ __ bind(&end); -+} -+ -+// Generate code for entering a JS function with the interpreter. -+// On entry to the function the receiver and arguments have been pushed on the -+// stack left to right. The actual argument count matches the formal parameter -+// count expected by the function. -+// -+// The live registers are: -+// o a1: the JS function object being called. -+// o a3: the incoming new target or generator object -+// o cp: our context -+// o fp: the caller's frame pointer -+// o sp: stack pointer -+// o ra: return address -+// -+// The function builds an interpreter frame. See InterpreterFrameConstants in -+// frames.h for its layout. -+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { -+ Register closure = a1; -+ Register feedback_vector = a2; -+ -+ // Get the bytecode array from the function object and load it into -+ // kInterpreterBytecodeArrayRegister. -+ __ Ld_d(t5, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_d(kInterpreterBytecodeArrayRegister, -+ FieldMemOperand(t5, SharedFunctionInfo::kFunctionDataOffset)); -+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4); -+ -+ // The bytecode array could have been flushed from the shared function info, -+ // if so, call into CompileLazy. -+ Label compile_lazy; -+ __ GetObjectType(kInterpreterBytecodeArrayRegister, t5, t5); -+ __ Branch(&compile_lazy, ne, t5, Operand(BYTECODE_ARRAY_TYPE)); -+ -+ // Load the feedback vector from the closure. -+ __ Ld_d(feedback_vector, -+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); -+ __ Ld_d(feedback_vector, -+ FieldMemOperand(feedback_vector, Cell::kValueOffset)); -+ -+ Label push_stack_frame; -+ // Check if feedback vector is valid. If valid, check for optimized code -+ // and update invocation count. Otherwise, setup the stack frame. -+ __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); -+ __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); -+ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); -+ -+ // Read off the optimized code slot in the feedback vector, and if there -+ // is optimized code or an optimization marker, call that instead. -+ Register optimized_code_entry = a4; -+ __ Ld_d(optimized_code_entry, -+ FieldMemOperand(feedback_vector, -+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); -+ -+ // Check if the optimized code slot is not empty. -+ Label optimized_code_slot_not_empty; -+ -+ __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry, -+ Operand(Smi::FromEnum(OptimizationMarker::kNone))); -+ -+ Label not_optimized; -+ __ bind(¬_optimized); -+ -+ // Increment invocation count for the function. -+ __ Ld_w(a4, FieldMemOperand(feedback_vector, -+ FeedbackVector::kInvocationCountOffset)); -+ __ Add_w(a4, a4, Operand(1)); -+ __ St_w(a4, FieldMemOperand(feedback_vector, -+ FeedbackVector::kInvocationCountOffset)); -+ -+ // Open a frame scope to indicate that there is a frame on the stack. The -+ // MANUAL indicates that the scope shouldn't actually generate code to set up -+ // the frame (that is done below). -+ __ bind(&push_stack_frame); -+ FrameScope frame_scope(masm, StackFrame::MANUAL); -+ __ PushStandardFrame(closure); -+ -+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are -+ // 8-bit fields next to each other, so we could just optimize by writing a -+ // 16-bit. These static asserts guard our assumption is valid. -+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == -+ BytecodeArray::kOsrNestingLevelOffset + kCharSize); -+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); -+ __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, -+ BytecodeArray::kOsrNestingLevelOffset)); -+ -+ // Load initial bytecode offset. -+ __ li(kInterpreterBytecodeOffsetRegister, -+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); -+ -+ // Push bytecode array and Smi tagged bytecode array offset. -+ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); -+ __ Push(kInterpreterBytecodeArrayRegister, a4); -+ -+ // Allocate the local and temporary register file on the stack. -+ Label stack_overflow; -+ { -+ // Load frame size (word) from the BytecodeArray object. -+ __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister, -+ BytecodeArray::kFrameSizeOffset)); -+ -+ // Do a stack check to ensure we don't go over the limit. -+ __ Sub_d(a5, sp, Operand(a4)); -+ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit); -+ __ Branch(&stack_overflow, lo, a5, Operand(a2)); -+ -+ // If ok, push undefined as the initial value for all register file entries. -+ Label loop_header; -+ Label loop_check; -+ __ LoadRoot(a5, RootIndex::kUndefinedValue); -+ __ Branch(&loop_check); -+ __ bind(&loop_header); -+ // TODO(rmcilroy): Consider doing more than one push per loop iteration. -+ __ push(a5); -+ // Continue loop if not done. -+ __ bind(&loop_check); -+ __ Sub_d(a4, a4, Operand(kPointerSize)); -+ __ Branch(&loop_header, ge, a4, Operand(zero_reg)); -+ } -+ -+ // If the bytecode array has a valid incoming new target or generator object -+ // register, initialize it with incoming value which was passed in r3. -+ Label no_incoming_new_target_or_generator_register; -+ __ Ld_w(a5, FieldMemOperand( -+ kInterpreterBytecodeArrayRegister, -+ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); -+ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5, -+ Operand(zero_reg)); -+ __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7); -+ __ St_d(a3, MemOperand(a5, 0)); -+ __ bind(&no_incoming_new_target_or_generator_register); -+ -+ // Perform interrupt stack check. -+ // TODO(solanes): Merge with the real stack limit check above. -+ Label stack_check_interrupt, after_stack_check_interrupt; -+ LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit); -+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5)); -+ __ bind(&after_stack_check_interrupt); -+ -+ // Load accumulator as undefined. -+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); -+ -+ // Load the dispatch table into a register and dispatch to the bytecode -+ // handler at the current bytecode offset. -+ Label do_dispatch; -+ __ bind(&do_dispatch); -+ __ li(kInterpreterDispatchTableRegister, -+ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); -+ __ Add_d(t5, kInterpreterBytecodeArrayRegister, -+ kInterpreterBytecodeOffsetRegister); -+ __ Ld_bu(a7, MemOperand(t5, 0)); -+ __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister, -+ kPointerSizeLog2, t7); -+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0)); -+ __ Call(kJavaScriptCallCodeStartRegister); -+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); -+ -+ // Any returns to the entry trampoline are either due to the return bytecode -+ // or the interpreter tail calling a builtin and then a dispatch. -+ -+ // Get bytecode array and bytecode offset from the stack frame. -+ __ Ld_d(kInterpreterBytecodeArrayRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); -+ __ Ld_d(kInterpreterBytecodeOffsetRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); -+ __ SmiUntag(kInterpreterBytecodeOffsetRegister); -+ -+ // Either return, or advance to the next bytecode and dispatch. -+ Label do_return; -+ __ Add_d(a1, kInterpreterBytecodeArrayRegister, -+ kInterpreterBytecodeOffsetRegister); -+ __ Ld_bu(a1, MemOperand(a1, 0)); -+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, -+ kInterpreterBytecodeOffsetRegister, a1, a2, a3, -+ a4, &do_return); -+ __ jmp(&do_dispatch); -+ -+ __ bind(&do_return); -+ // The return value is in a0. -+ LeaveInterpreterFrame(masm, t0); -+ __ Jump(ra); -+ -+ __ bind(&stack_check_interrupt); -+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset -+ // for the call to the StackGuard. -+ __ li(kInterpreterBytecodeOffsetRegister, -+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + -+ kFunctionEntryBytecodeOffset))); -+ __ St_d(kInterpreterBytecodeOffsetRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); -+ __ CallRuntime(Runtime::kStackGuard); -+ -+ // After the call, restore the bytecode array, bytecode offset and accumulator -+ // registers again. Also, restore the bytecode offset in the stack to its -+ // previous value. -+ __ Ld_d(kInterpreterBytecodeArrayRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); -+ __ li(kInterpreterBytecodeOffsetRegister, -+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); -+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); -+ -+ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister); -+ __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); -+ -+ __ jmp(&after_stack_check_interrupt); -+ -+ __ bind(&optimized_code_slot_not_empty); -+ Label maybe_has_optimized_code; -+ // Check if optimized code marker is actually a weak reference to the -+ // optimized code as opposed to an optimization marker. -+ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code, t7); -+ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); -+ // Fall through if there's no runnable optimized code. -+ __ jmp(¬_optimized); -+ -+ __ bind(&maybe_has_optimized_code); -+ // Load code entry from the weak reference, if it was cleared, resume -+ // execution of unoptimized code. -+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); -+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5); -+ -+ __ bind(&compile_lazy); -+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); -+ // Unreachable code. -+ __ break_(0xCC); -+ -+ __ bind(&stack_overflow); -+ __ CallRuntime(Runtime::kThrowStackOverflow); -+ // Unreachable code. -+ __ break_(0xCC); -+} -+ -+static void Generate_InterpreterPushArgs(MacroAssembler* masm, -+ Register num_args, Register index, -+ Register scratch, Register scratch2) { -+ // Find the address of the last argument. -+ __ mov(scratch2, num_args); -+ __ slli_d(scratch2, scratch2, kPointerSizeLog2); -+ __ Sub_d(scratch2, index, Operand(scratch2)); -+ -+ // Push the arguments. -+ Label loop_header, loop_check; -+ __ Branch(&loop_check); -+ __ bind(&loop_header); -+ __ Ld_d(scratch, MemOperand(index, 0)); -+ __ Add_d(index, index, Operand(-kPointerSize)); -+ __ push(scratch); -+ __ bind(&loop_check); -+ __ Branch(&loop_header, hi, index, Operand(scratch2)); -+} -+ -+// static -+void Builtins::Generate_InterpreterPushArgsThenCallImpl( -+ MacroAssembler* masm, ConvertReceiverMode receiver_mode, -+ InterpreterPushArgsMode mode) { -+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a2 : the address of the first argument to be pushed. Subsequent -+ // arguments should be consecutive above this, in the same order as -+ // they are to be pushed onto the stack. -+ // -- a1 : the target to call (can be any Object). -+ // ----------------------------------- -+ Label stack_overflow; -+ -+ __ Add_d(a3, a0, Operand(1)); // Add one for receiver. -+ -+ // Push "undefined" as the receiver arg if we need to. -+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { -+ __ PushRoot(RootIndex::kUndefinedValue); -+ __ Sub_d(a3, a3, Operand(1)); // Subtract one for receiver. -+ } -+ -+ Generate_StackOverflowCheck(masm, a3, a4, t0, &stack_overflow); -+ -+ // This function modifies a2, t0 and a4. -+ Generate_InterpreterPushArgs(masm, a3, a2, a4, t0); -+ -+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { -+ __ Pop(a2); // Pass the spread in a register -+ __ Sub_d(a0, a0, Operand(1)); // Subtract one for spread -+ } -+ -+ // Call the target. -+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { -+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), -+ RelocInfo::CODE_TARGET); -+ } else { -+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), -+ RelocInfo::CODE_TARGET); -+ } -+ -+ __ bind(&stack_overflow); -+ { -+ __ TailCallRuntime(Runtime::kThrowStackOverflow); -+ // Unreachable code. -+ __ break_(0xCC); -+ } -+} -+ -+// static -+void Builtins::Generate_InterpreterPushArgsThenConstructImpl( -+ MacroAssembler* masm, InterpreterPushArgsMode mode) { -+ // ----------- S t a t e ------------- -+ // -- a0 : argument count (not including receiver) -+ // -- a3 : new target -+ // -- a1 : constructor to call -+ // -- a2 : allocation site feedback if available, undefined otherwise. -+ // -- a4 : address of the first argument -+ // ----------------------------------- -+ Label stack_overflow; -+ -+ // Push a slot for the receiver. -+ __ push(zero_reg); -+ -+ Generate_StackOverflowCheck(masm, a0, a5, t0, &stack_overflow); -+ -+ // This function modifies t0, a4 and a5. -+ Generate_InterpreterPushArgs(masm, a0, a4, a5, t0); -+ -+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { -+ __ Pop(a2); // Pass the spread in a register -+ __ Sub_d(a0, a0, Operand(1)); // Subtract one for spread -+ } else { -+ __ AssertUndefinedOrAllocationSite(a2, t0); -+ } -+ -+ if (mode == InterpreterPushArgsMode::kArrayFunction) { -+ __ AssertFunction(a1); -+ -+ // Tail call to the function-specific construct stub (still in the caller -+ // context at this point). -+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), -+ RelocInfo::CODE_TARGET); -+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { -+ // Call the constructor with a0, a1, and a3 unmodified. -+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), -+ RelocInfo::CODE_TARGET); -+ } else { -+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); -+ // Call the constructor with a0, a1, and a3 unmodified. -+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); -+ } -+ -+ __ bind(&stack_overflow); -+ { -+ __ TailCallRuntime(Runtime::kThrowStackOverflow); -+ // Unreachable code. -+ __ break_(0xCC); -+ } -+} -+ -+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { -+ // Set the return address to the correct point in the interpreter entry -+ // trampoline. -+ Label builtin_trampoline, trampoline_loaded; -+ Smi interpreter_entry_return_pc_offset( -+ masm->isolate()->heap()->interpreter_entry_return_pc_offset()); -+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); -+ -+ // If the SFI function_data is an InterpreterData, the function will have a -+ // custom copy of the interpreter entry trampoline for profiling. If so, -+ // get the custom trampoline, otherwise grab the entry address of the global -+ // trampoline. -+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); -+ __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); -+ __ GetObjectType(t0, kInterpreterDispatchTableRegister, -+ kInterpreterDispatchTableRegister); -+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, -+ Operand(INTERPRETER_DATA_TYPE)); -+ -+ __ Ld_d(t0, -+ FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); -+ __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); -+ __ Branch(&trampoline_loaded); -+ -+ __ bind(&builtin_trampoline); -+ __ li(t0, ExternalReference:: -+ address_of_interpreter_entry_trampoline_instruction_start( -+ masm->isolate())); -+ __ Ld_d(t0, MemOperand(t0, 0)); -+ -+ __ bind(&trampoline_loaded); -+ __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value())); -+ -+ // Initialize the dispatch table register. -+ __ li(kInterpreterDispatchTableRegister, -+ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); -+ -+ // Get the bytecode array pointer from the frame. -+ __ Ld_d(kInterpreterBytecodeArrayRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); -+ -+ if (FLAG_debug_code) { -+ // Check function data field is actually a BytecodeArray object. -+ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg); -+ __ Assert(ne, -+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, -+ kScratchReg, Operand(zero_reg)); -+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); -+ __ Assert(eq, -+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, -+ a1, Operand(BYTECODE_ARRAY_TYPE)); -+ } -+ -+ // Get the target bytecode offset from the frame. -+ __ SmiUntag(kInterpreterBytecodeOffsetRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); -+ -+ if (FLAG_debug_code) { -+ Label okay; -+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, -+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); -+ // Unreachable code. -+ __ break_(0xCC); -+ __ bind(&okay); -+ } -+ -+ // Dispatch to the target bytecode. -+ __ Add_d(a1, kInterpreterBytecodeArrayRegister, -+ kInterpreterBytecodeOffsetRegister); -+ __ Ld_bu(a7, MemOperand(a1, 0)); -+ __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7); -+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0)); -+ __ Jump(kJavaScriptCallCodeStartRegister); -+} -+ -+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { -+ // Advance the current bytecode offset stored within the given interpreter -+ // stack frame. This simulates what all bytecode handlers do upon completion -+ // of the underlying operation. -+ __ Ld_d(kInterpreterBytecodeArrayRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); -+ __ Ld_d(kInterpreterBytecodeOffsetRegister, -+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); -+ __ SmiUntag(kInterpreterBytecodeOffsetRegister); -+ -+ Label enter_bytecode, function_entry_bytecode; -+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, -+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + -+ kFunctionEntryBytecodeOffset)); -+ -+ // Load the current bytecode. -+ __ Add_d(a1, kInterpreterBytecodeArrayRegister, -+ kInterpreterBytecodeOffsetRegister); -+ __ Ld_bu(a1, MemOperand(a1, 0)); -+ -+ // Advance to the next bytecode. -+ Label if_return; -+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, -+ kInterpreterBytecodeOffsetRegister, a1, a2, a3, -+ a4, &if_return); -+ -+ __ bind(&enter_bytecode); -+ // Convert new bytecode offset to a Smi and save in the stackframe. -+ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); -+ __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); -+ -+ Generate_InterpreterEnterBytecode(masm); -+ -+ __ bind(&function_entry_bytecode); -+ // If the code deoptimizes during the implicit function entry stack interrupt -+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is -+ // not a valid bytecode offset. Detect this case and advance to the first -+ // actual bytecode. -+ __ li(kInterpreterBytecodeOffsetRegister, -+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); -+ __ Branch(&enter_bytecode); -+ -+ // We should never take the if_return path. -+ __ bind(&if_return); -+ __ Abort(AbortReason::kInvalidBytecodeAdvance); -+} -+ -+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { -+ Generate_InterpreterEnterBytecode(masm); -+} -+ -+namespace { -+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, -+ bool java_script_builtin, -+ bool with_result) { -+ const RegisterConfiguration* config(RegisterConfiguration::Default()); -+ int allocatable_register_count = config->num_allocatable_general_registers(); -+ if (with_result) { -+ // Overwrite the hole inserted by the deoptimizer with the return value from -+ // the LAZY deopt point. -+ __ St_d(a0, -+ MemOperand( -+ sp, config->num_allocatable_general_registers() * kPointerSize + -+ BuiltinContinuationFrameConstants::kFixedFrameSize)); -+ } -+ for (int i = allocatable_register_count - 1; i >= 0; --i) { -+ int code = config->GetAllocatableGeneralCode(i); -+ __ Pop(Register::from_code(code)); -+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { -+ __ SmiUntag(Register::from_code(code)); -+ } -+ } -+ __ Ld_d( -+ fp, -+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); -+ // Load builtin index (stored as a Smi) and use it to get the builtin start -+ // address from the builtins table. -+ __ Pop(t0); -+ __ Add_d(sp, sp, -+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); -+ __ Pop(ra); -+ __ LoadEntryFromBuiltinIndex(t0); -+ __ Jump(t0); -+} -+} // namespace -+ -+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { -+ Generate_ContinueToBuiltinHelper(masm, false, false); -+} -+ -+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( -+ MacroAssembler* masm) { -+ Generate_ContinueToBuiltinHelper(masm, false, true); -+} -+ -+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { -+ Generate_ContinueToBuiltinHelper(masm, true, false); -+} -+ -+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( -+ MacroAssembler* masm) { -+ Generate_ContinueToBuiltinHelper(masm, true, true); -+} -+ -+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ CallRuntime(Runtime::kNotifyDeoptimized); -+ } -+ -+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code()); -+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize)); -+ __ Add_d(sp, sp, Operand(1 * kPointerSize)); // Remove state. -+ __ Ret(); -+} -+ -+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ CallRuntime(Runtime::kCompileForOnStackReplacement); -+ } -+ -+ // If the code object is null, just return to the caller. -+ __ Ret(eq, a0, Operand(Smi::zero())); -+ -+ // Drop the handler frame that is be sitting on top of the actual -+ // JavaScript frame. This is the case then OSR is triggered from bytecode. -+ __ LeaveFrame(StackFrame::STUB); -+ -+ // Load deoptimization data from the code object. -+ // = [#deoptimization_data_offset] -+ __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); -+ -+ // Load the OSR entrypoint offset from the deoptimization data. -+ // = [#header_size + #osr_pc_offset] -+ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( -+ DeoptimizationData::kOsrPcOffsetIndex) - -+ kHeapObjectTag)); -+ -+ // Compute the target address = code_obj + header_size + osr_offset -+ // = + #header_size + -+ __ Add_d(a0, a0, a1); -+ __ addi_d(ra, a0, Code::kHeaderSize - kHeapObjectTag); -+ -+ // And "return" to the OSR entry point of the function. -+ __ Ret(); -+} -+ -+// static -+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : argc -+ // -- sp[0] : argArray -+ // -- sp[4] : thisArg -+ // -- sp[8] : receiver -+ // ----------------------------------- -+ -+ Register argc = a0; -+ Register arg_array = a2; -+ Register receiver = a1; -+ Register this_arg = a5; -+ Register undefined_value = a3; -+ Register scratch = a4; -+ -+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); -+ -+ // 1. Load receiver into a1, argArray into a2 (if present), remove all -+ // arguments from the stack (including the receiver), and push thisArg (if -+ // present) instead. -+ { -+ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a -+ // consistent state for a simple pop operation. -+ -+ __ Sub_d(sp, sp, Operand(2 * kPointerSize)); -+ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); -+ __ mov(scratch, argc); -+ __ Pop(this_arg, arg_array); // Overwrite argc -+ __ Movz(arg_array, undefined_value, scratch); // if argc == 0 -+ __ Movz(this_arg, undefined_value, scratch); // if argc == 0 -+ __ Sub_d(scratch, scratch, Operand(1)); -+ __ Movz(arg_array, undefined_value, scratch); // if argc == 1 -+ __ Ld_d(receiver, MemOperand(sp, 0)); -+ __ St_d(this_arg, MemOperand(sp, 0)); -+ } -+ -+ // ----------- S t a t e ------------- -+ // -- a2 : argArray -+ // -- a1 : receiver -+ // -- a3 : undefined root value -+ // -- sp[0] : thisArg -+ // ----------------------------------- -+ -+ // 2. We don't need to check explicitly for callable receiver here, -+ // since that's the first thing the Call/CallWithArrayLike builtins -+ // will do. -+ -+ // 3. Tail call with no arguments if argArray is null or undefined. -+ Label no_arguments; -+ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); -+ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value)); -+ -+ // 4a. Apply the receiver to the given argArray. -+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), -+ RelocInfo::CODE_TARGET); -+ -+ // 4b. The argArray is either null or undefined, so we tail call without any -+ // arguments to the receiver. -+ __ bind(&no_arguments); -+ { -+ __ mov(a0, zero_reg); -+ DCHECK(receiver == a1); -+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); -+ } -+} -+ -+// static -+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { -+ // 1. Make sure we have at least one argument. -+ // a0: actual number of arguments -+ { -+ Label done; -+ __ Branch(&done, ne, a0, Operand(zero_reg)); -+ __ PushRoot(RootIndex::kUndefinedValue); -+ __ Add_d(a0, a0, Operand(1)); -+ __ bind(&done); -+ } -+ -+ // 2. Get the function to call (passed as receiver) from the stack. -+ // a0: actual number of arguments -+ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); -+ __ Ld_d(a1, MemOperand(kScratchReg, 0)); -+ -+ // 3. Shift arguments and return address one slot down on the stack -+ // (overwriting the original receiver). Adjust argument count to make -+ // the original first argument the new receiver. -+ // a0: actual number of arguments -+ // a1: function -+ { -+ Label loop; -+ // Calculate the copy start address (destination). Copy end address is sp. -+ __ Alsl_d(a2, a0, sp, kPointerSizeLog2, t7); -+ -+ __ bind(&loop); -+ __ Ld_d(kScratchReg, MemOperand(a2, -kPointerSize)); -+ __ St_d(kScratchReg, MemOperand(a2, 0)); -+ __ Sub_d(a2, a2, Operand(kPointerSize)); -+ __ Branch(&loop, ne, a2, Operand(sp)); -+ // Adjust the actual number of arguments and remove the top element -+ // (which is a copy of the last argument). -+ __ Sub_d(a0, a0, Operand(1)); -+ __ Pop(); -+ } -+ -+ // 4. Call the callable. -+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); -+} -+ -+void Builtins::Generate_ReflectApply(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : argc -+ // -- sp[0] : argumentsList (if argc ==3) -+ // -- sp[4] : thisArgument (if argc >=2) -+ // -- sp[8] : target (if argc >=1) -+ // -- sp[12] : receiver -+ // ----------------------------------- -+ -+ Register argc = a0; -+ Register arguments_list = a2; -+ Register target = a1; -+ Register this_argument = a5; -+ Register undefined_value = a3; -+ Register scratch = a4; -+ -+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); -+ -+ // 1. Load target into a1 (if present), argumentsList into a2 (if present), -+ // remove all arguments from the stack (including the receiver), and push -+ // thisArgument (if present) instead. -+ { -+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a -+ // consistent state for a simple pop operation. -+ -+ __ Sub_d(sp, sp, Operand(3 * kPointerSize)); -+ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); -+ __ mov(scratch, argc); -+ __ Pop(target, this_argument, arguments_list); -+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0 -+ __ Movz(this_argument, undefined_value, scratch); // if argc == 0 -+ __ Movz(target, undefined_value, scratch); // if argc == 0 -+ __ Sub_d(scratch, scratch, Operand(1)); -+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1 -+ __ Movz(this_argument, undefined_value, scratch); // if argc == 1 -+ __ Sub_d(scratch, scratch, Operand(1)); -+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 -+ -+ __ St_d(this_argument, MemOperand(sp, 0)); // Overwrite receiver -+ } -+ -+ // ----------- S t a t e ------------- -+ // -- a2 : argumentsList -+ // -- a1 : target -+ // -- a3 : undefined root value -+ // -- sp[0] : thisArgument -+ // ----------------------------------- -+ -+ // 2. We don't need to check explicitly for callable target here, -+ // since that's the first thing the Call/CallWithArrayLike builtins -+ // will do. -+ -+ // 3. Apply the target to the given argumentsList. -+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), -+ RelocInfo::CODE_TARGET); -+} -+ -+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : argc -+ // -- sp[0] : new.target (optional) (dummy value if argc <= 2) -+ // -- sp[4] : argumentsList (dummy value if argc <= 1) -+ // -- sp[8] : target (dummy value if argc == 0) -+ // -- sp[12] : receiver -+ // ----------------------------------- -+ Register argc = a0; -+ Register arguments_list = a2; -+ Register target = a1; -+ Register new_target = a3; -+ Register undefined_value = a4; -+ Register scratch = a5; -+ -+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); -+ -+ // 1. Load target into a1 (if present), argumentsList into a2 (if present), -+ // new.target into a3 (if present, otherwise use target), remove all -+ // arguments from the stack (including the receiver), and push thisArgument -+ // (if present) instead. -+ { -+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a -+ // consistent state for a simple pop operation. -+ -+ __ Sub_d(sp, sp, Operand(3 * kPointerSize)); -+ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); -+ __ mov(scratch, argc); -+ __ Pop(target, arguments_list, new_target); -+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0 -+ __ Movz(new_target, undefined_value, scratch); // if argc == 0 -+ __ Movz(target, undefined_value, scratch); // if argc == 0 -+ __ Sub_d(scratch, scratch, Operand(1)); -+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1 -+ __ Movz(new_target, target, scratch); // if argc == 1 -+ __ Sub_d(scratch, scratch, Operand(1)); -+ __ Movz(new_target, target, scratch); // if argc == 2 -+ -+ __ St_d(undefined_value, MemOperand(sp, 0)); // Overwrite receiver -+ } -+ -+ // ----------- S t a t e ------------- -+ // -- a2 : argumentsList -+ // -- a1 : target -+ // -- a3 : new.target -+ // -- sp[0] : receiver (undefined) -+ // ----------------------------------- -+ -+ // 2. We don't need to check explicitly for constructor target here, -+ // since that's the first thing the Construct/ConstructWithArrayLike -+ // builtins will do. -+ -+ // 3. We don't need to check explicitly for constructor new.target here, -+ // since that's the second thing the Construct/ConstructWithArrayLike -+ // builtins will do. -+ -+ // 4. Construct the target with the given new.target and argumentsList. -+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike), -+ RelocInfo::CODE_TARGET); -+} -+ -+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { -+ __ SmiTag(a0); -+ __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); -+ __ Push(ra, fp, a4, a1, a0); -+ __ Push(Smi::zero()); // Padding. -+ __ Add_d(fp, sp, -+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); -+} -+ -+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : result being passed through -+ // ----------------------------------- -+ // Get the number of arguments passed (as a smi), tear down the frame and -+ // then tear down the parameters. -+ __ Ld_d(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); -+ __ mov(sp, fp); -+ __ Pop(ra, fp); -+ __ SmiScale(a4, a1, kPointerSizeLog2); -+ __ Add_d(sp, sp, a4); -+ // Adjust for the receiver. -+ __ Add_d(sp, sp, Operand(kPointerSize)); -+} -+ -+// static -+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, -+ Handle code) { -+ // ----------- S t a t e ------------- -+ // -- a1 : target -+ // -- a0 : number of parameters on the stack (not including the receiver) -+ // -- a2 : arguments list (a FixedArray) -+ // -- a4 : len (number of elements to push from args) -+ // -- a3 : new.target (for [[Construct]]) -+ // ----------------------------------- -+ if (masm->emit_debug_code()) { -+ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0. -+ Label ok, fail; -+ __ AssertNotSmi(a2); -+ __ GetObjectType(a2, t8, t8); -+ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE)); -+ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE)); -+ __ Branch(&ok, eq, a4, Operand(zero_reg)); -+ // Fall through. -+ __ bind(&fail); -+ __ Abort(AbortReason::kOperandIsNotAFixedArray); -+ -+ __ bind(&ok); -+ } -+ -+ Register args = a2; -+ Register len = a4; -+ -+ // Check for stack overflow. -+ Label stack_overflow; -+ Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow); -+ -+ // Push arguments onto the stack (thisArgument is already on the stack). -+ { -+ Label done, push, loop; -+ Register src = a6; -+ Register scratch = len; -+ -+ __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag); -+ __ Add_d(a0, a0, len); // The 'len' argument for Call() or Construct(). -+ __ Branch(&done, eq, len, Operand(zero_reg)); -+ __ slli_d(scratch, len, kPointerSizeLog2); -+ __ Sub_d(scratch, sp, Operand(scratch)); -+ __ LoadRoot(t1, RootIndex::kTheHoleValue); -+ __ bind(&loop); -+ __ Ld_d(a5, MemOperand(src, 0)); -+ __ Branch(&push, ne, a5, Operand(t1)); -+ __ LoadRoot(a5, RootIndex::kUndefinedValue); -+ __ bind(&push); -+ __ addi_d(src, src, kPointerSize); -+ __ Push(a5); -+ __ Branch(&loop, ne, scratch, Operand(sp)); -+ __ bind(&done); -+ } -+ -+ // Tail-call to the actual Call or Construct builtin. -+ __ Jump(code, RelocInfo::CODE_TARGET); -+ -+ __ bind(&stack_overflow); -+ __ TailCallRuntime(Runtime::kThrowStackOverflow); -+} -+ -+// static -+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, -+ CallOrConstructMode mode, -+ Handle code) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a3 : the new.target (for [[Construct]] calls) -+ // -- a1 : the target to call (can be any Object) -+ // -- a2 : start index (to support rest parameters) -+ // ----------------------------------- -+ -+ // Check if new.target has a [[Construct]] internal method. -+ if (mode == CallOrConstructMode::kConstruct) { -+ Label new_target_constructor, new_target_not_constructor; -+ __ JumpIfSmi(a3, &new_target_not_constructor); -+ __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); -+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); -+ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask)); -+ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); -+ __ bind(&new_target_not_constructor); -+ { -+ FrameScope scope(masm, StackFrame::MANUAL); -+ __ EnterFrame(StackFrame::INTERNAL); -+ __ Push(a3); -+ __ CallRuntime(Runtime::kThrowNotConstructor); -+ } -+ __ bind(&new_target_constructor); -+ } -+ -+ // Check if we have an arguments adaptor frame below the function frame. -+ Label arguments_adaptor, arguments_done; -+ __ Ld_d(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); -+ __ Ld_d(a7, MemOperand(a6, CommonFrameConstants::kContextOrFrameTypeOffset)); -+ __ Branch(&arguments_adaptor, eq, a7, -+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); -+ { -+ __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); -+ __ Ld_d(a7, FieldMemOperand(a7, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_hu(a7, FieldMemOperand( -+ a7, SharedFunctionInfo::kFormalParameterCountOffset)); -+ __ mov(a6, fp); -+ } -+ __ Branch(&arguments_done); -+ __ bind(&arguments_adaptor); -+ { -+ // Just get the length from the ArgumentsAdaptorFrame. -+ __ SmiUntag(a7, -+ MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset)); -+ } -+ __ bind(&arguments_done); -+ -+ Label stack_done, stack_overflow; -+ __ Sub_w(a7, a7, a2); -+ __ Branch(&stack_done, le, a7, Operand(zero_reg)); -+ { -+ // Check for stack overflow. -+ Generate_StackOverflowCheck(masm, a7, a4, a5, &stack_overflow); -+ -+ // Forward the arguments from the caller frame. -+ { -+ Label loop; -+ __ Add_d(a0, a0, a7); -+ __ bind(&loop); -+ { -+ __ Alsl_d(kScratchReg, a7, a6, kPointerSizeLog2, t7); -+ __ Ld_d(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize)); -+ __ push(kScratchReg); -+ __ Sub_w(a7, a7, Operand(1)); -+ __ Branch(&loop, ne, a7, Operand(zero_reg)); -+ } -+ } -+ } -+ __ Branch(&stack_done); -+ __ bind(&stack_overflow); -+ __ TailCallRuntime(Runtime::kThrowStackOverflow); -+ __ bind(&stack_done); -+ -+ // Tail-call to the {code} handler. -+ __ Jump(code, RelocInfo::CODE_TARGET); -+} -+ -+// static -+void Builtins::Generate_CallFunction(MacroAssembler* masm, -+ ConvertReceiverMode mode) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the function to call (checked to be a JSFunction) -+ // ----------------------------------- -+ __ AssertFunction(a1); -+ -+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) -+ // Check that function is not a "classConstructor". -+ Label class_constructor; -+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); -+ __ And(kScratchReg, a3, -+ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); -+ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg)); -+ -+ // Enter the context of the function; ToObject has to run in the function -+ // context, and we also need to take the global proxy from the function -+ // context in case of conversion. -+ __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); -+ // We need to convert the receiver for non-native sloppy mode functions. -+ Label done_convert; -+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); -+ __ And(kScratchReg, a3, -+ Operand(SharedFunctionInfo::IsNativeBit::kMask | -+ SharedFunctionInfo::IsStrictBit::kMask)); -+ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); -+ { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the function to call (checked to be a JSFunction) -+ // -- a2 : the shared function info. -+ // -- cp : the function context. -+ // ----------------------------------- -+ -+ if (mode == ConvertReceiverMode::kNullOrUndefined) { -+ // Patch receiver to global proxy. -+ __ LoadGlobalProxy(a3); -+ } else { -+ Label convert_to_object, convert_receiver; -+ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); -+ __ Ld_d(a3, MemOperand(kScratchReg, 0)); -+ __ JumpIfSmi(a3, &convert_to_object); -+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); -+ __ GetObjectType(a3, a4, a4); -+ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE)); -+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) { -+ Label convert_global_proxy; -+ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); -+ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); -+ __ bind(&convert_global_proxy); -+ { -+ // Patch receiver to global proxy. -+ __ LoadGlobalProxy(a3); -+ } -+ __ Branch(&convert_receiver); -+ } -+ __ bind(&convert_to_object); -+ { -+ // Convert receiver using ToObject. -+ // TODO(bmeurer): Inline the allocation here to avoid building the frame -+ // in the fast case? (fall back to AllocateInNewSpace?) -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ SmiTag(a0); -+ __ Push(a0, a1); -+ __ mov(a0, a3); -+ __ Push(cp); -+ __ Call(BUILTIN_CODE(masm->isolate(), ToObject), -+ RelocInfo::CODE_TARGET); -+ __ Pop(cp); -+ __ mov(a3, a0); -+ __ Pop(a0, a1); -+ __ SmiUntag(a0); -+ } -+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); -+ __ bind(&convert_receiver); -+ } -+ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); -+ __ St_d(a3, MemOperand(kScratchReg, 0)); -+ } -+ __ bind(&done_convert); -+ -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the function to call (checked to be a JSFunction) -+ // -- a2 : the shared function info. -+ // -- cp : the function context. -+ // ----------------------------------- -+ -+ __ Ld_hu( -+ a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); -+ __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION); -+ -+ // The function is a "classConstructor", need to raise an exception. -+ __ bind(&class_constructor); -+ { -+ FrameScope frame(masm, StackFrame::INTERNAL); -+ __ Push(a1); -+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError); -+ } -+} -+ -+// static -+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the function to call (checked to be a JSBoundFunction) -+ // ----------------------------------- -+ __ AssertBoundFunction(a1); -+ -+ // Patch the receiver to [[BoundThis]]. -+ { -+ __ Ld_d(kScratchReg, -+ FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); -+ __ Alsl_d(a4, a0, sp, kPointerSizeLog2, t7); -+ __ St_d(kScratchReg, MemOperand(a4, 0)); -+ } -+ -+ // Load [[BoundArguments]] into a2 and length of that into a4. -+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); -+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); -+ -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the function to call (checked to be a JSBoundFunction) -+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) -+ // -- a4 : the number of [[BoundArguments]] -+ // ----------------------------------- -+ -+ // Reserve stack space for the [[BoundArguments]]. -+ { -+ Label done; -+ __ slli_d(a5, a4, kPointerSizeLog2); -+ __ Sub_d(sp, sp, Operand(a5)); -+ // Check the stack for overflow. We are not trying to catch interruptions -+ // (i.e. debug break and preemption) here, so check the "real stack limit". -+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); -+ __ Branch(&done, hs, sp, Operand(kScratchReg)); -+ // Restore the stack pointer. -+ __ Add_d(sp, sp, Operand(a5)); -+ { -+ FrameScope scope(masm, StackFrame::MANUAL); -+ __ EnterFrame(StackFrame::INTERNAL); -+ __ CallRuntime(Runtime::kThrowStackOverflow); -+ } -+ __ bind(&done); -+ } -+ -+ // Relocate arguments down the stack. -+ { -+ Label loop, done_loop; -+ __ mov(a5, zero_reg); -+ __ bind(&loop); -+ __ Branch(&done_loop, gt, a5, Operand(a0)); -+ __ Alsl_d(a6, a4, sp, kPointerSizeLog2, t7); -+ __ Ld_d(kScratchReg, MemOperand(a6, 0)); -+ __ Alsl_d(a6, a5, sp, kPointerSizeLog2, t7); -+ __ St_d(kScratchReg, MemOperand(a6, 0)); -+ __ Add_d(a4, a4, Operand(1)); -+ __ Add_d(a5, a5, Operand(1)); -+ __ Branch(&loop); -+ __ bind(&done_loop); -+ } -+ -+ // Copy [[BoundArguments]] to the stack (below the arguments). -+ { -+ Label loop, done_loop; -+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); -+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); -+ __ bind(&loop); -+ __ Sub_d(a4, a4, Operand(1)); -+ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); -+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7); -+ __ Ld_d(kScratchReg, MemOperand(a5, 0)); -+ __ Alsl_d(a5, a0, sp, kPointerSizeLog2, t7); -+ __ St_d(kScratchReg, MemOperand(a5, 0)); -+ __ Add_d(a0, a0, Operand(1)); -+ __ Branch(&loop); -+ __ bind(&done_loop); -+ } -+ -+ // Call the [[BoundTargetFunction]] via the Call builtin. -+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); -+ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), -+ RelocInfo::CODE_TARGET); -+} -+ -+// static -+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the target to call (can be any Object). -+ // ----------------------------------- -+ -+ Label non_callable, non_smi; -+ __ JumpIfSmi(a1, &non_callable); -+ __ bind(&non_smi); -+ __ GetObjectType(a1, t1, t2); -+ __ Jump(masm->isolate()->builtins()->CallFunction(mode), -+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); -+ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), -+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); -+ -+ // Check if target has a [[Call]] internal method. -+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); -+ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask)); -+ __ Branch(&non_callable, eq, t1, Operand(zero_reg)); -+ -+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, -+ t2, Operand(JS_PROXY_TYPE)); -+ -+ // 2. Call to something else, which might have a [[Call]] internal method (if -+ // not we raise an exception). -+ // Overwrite the original receiver with the (original) target. -+ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); -+ __ St_d(a1, MemOperand(kScratchReg, 0)); -+ // Let the "call_as_function_delegate" take care of the rest. -+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); -+ __ Jump(masm->isolate()->builtins()->CallFunction( -+ ConvertReceiverMode::kNotNullOrUndefined), -+ RelocInfo::CODE_TARGET); -+ -+ // 3. Call to something that is not callable. -+ __ bind(&non_callable); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ Push(a1); -+ __ CallRuntime(Runtime::kThrowCalledNonCallable); -+ } -+} -+ -+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the constructor to call (checked to be a JSFunction) -+ // -- a3 : the new target (checked to be a constructor) -+ // ----------------------------------- -+ __ AssertConstructor(a1); -+ __ AssertFunction(a1); -+ -+ // Calling convention for function specific ConstructStubs require -+ // a2 to contain either an AllocationSite or undefined. -+ __ LoadRoot(a2, RootIndex::kUndefinedValue); -+ -+ Label call_generic_stub; -+ -+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. -+ __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset)); -+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); -+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg)); -+ -+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), -+ RelocInfo::CODE_TARGET); -+ -+ __ bind(&call_generic_stub); -+ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), -+ RelocInfo::CODE_TARGET); -+} -+ -+// static -+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the function to call (checked to be a JSBoundFunction) -+ // -- a3 : the new target (checked to be a constructor) -+ // ----------------------------------- -+ __ AssertConstructor(a1); -+ __ AssertBoundFunction(a1); -+ -+ // Load [[BoundArguments]] into a2 and length of that into a4. -+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); -+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); -+ -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the function to call (checked to be a JSBoundFunction) -+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) -+ // -- a3 : the new target (checked to be a constructor) -+ // -- a4 : the number of [[BoundArguments]] -+ // ----------------------------------- -+ -+ // Reserve stack space for the [[BoundArguments]]. -+ { -+ Label done; -+ __ slli_d(a5, a4, kPointerSizeLog2); -+ __ Sub_d(sp, sp, Operand(a5)); -+ // Check the stack for overflow. We are not trying to catch interruptions -+ // (i.e. debug break and preemption) here, so check the "real stack limit". -+ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); -+ __ Branch(&done, hs, sp, Operand(kScratchReg)); -+ // Restore the stack pointer. -+ __ Add_d(sp, sp, Operand(a5)); -+ { -+ FrameScope scope(masm, StackFrame::MANUAL); -+ __ EnterFrame(StackFrame::INTERNAL); -+ __ CallRuntime(Runtime::kThrowStackOverflow); -+ } -+ __ bind(&done); -+ } -+ -+ // Relocate arguments down the stack. -+ { -+ Label loop, done_loop; -+ __ mov(a5, zero_reg); -+ __ bind(&loop); -+ __ Branch(&done_loop, ge, a5, Operand(a0)); -+ __ Alsl_d(a6, a4, sp, kPointerSizeLog2, t7); -+ __ Ld_d(kScratchReg, MemOperand(a6, 0)); -+ __ Alsl_d(a6, a5, sp, kPointerSizeLog2, t7); -+ __ St_d(kScratchReg, MemOperand(a6, 0)); -+ __ Add_d(a4, a4, Operand(1)); -+ __ Add_d(a5, a5, Operand(1)); -+ __ Branch(&loop); -+ __ bind(&done_loop); -+ } -+ -+ // Copy [[BoundArguments]] to the stack (below the arguments). -+ { -+ Label loop, done_loop; -+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); -+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); -+ __ bind(&loop); -+ __ Sub_d(a4, a4, Operand(1)); -+ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); -+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7); -+ __ Ld_d(kScratchReg, MemOperand(a5, 0)); -+ __ Alsl_d(a5, a0, sp, kPointerSizeLog2, t7); -+ __ St_d(kScratchReg, MemOperand(a5, 0)); -+ __ Add_d(a0, a0, Operand(1)); -+ __ Branch(&loop); -+ __ bind(&done_loop); -+ } -+ -+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target. -+ { -+ Label skip_load; -+ __ Branch(&skip_load, ne, a1, Operand(a3)); -+ __ Ld_d(a3, -+ FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); -+ __ bind(&skip_load); -+ } -+ -+ // Construct the [[BoundTargetFunction]] via the Construct builtin. -+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); -+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); -+} -+ -+// static -+void Builtins::Generate_Construct(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- a0 : the number of arguments (not including the receiver) -+ // -- a1 : the constructor to call (can be any Object) -+ // -- a3 : the new target (either the same as the constructor or -+ // the JSFunction on which new was invoked initially) -+ // ----------------------------------- -+ -+ // Check if target is a Smi. -+ Label non_constructor, non_proxy; -+ __ JumpIfSmi(a1, &non_constructor); -+ -+ // Check if target has a [[Construct]] internal method. -+ __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); -+ __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); -+ __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask)); -+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); -+ -+ // Dispatch based on instance type. -+ __ Ld_hu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset)); -+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), -+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); -+ -+ // Only dispatch to bound functions after checking whether they are -+ // constructors. -+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), -+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); -+ -+ // Only dispatch to proxies after checking whether they are constructors. -+ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE)); -+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), -+ RelocInfo::CODE_TARGET); -+ -+ // Called Construct on an exotic Object with a [[Construct]] internal method. -+ __ bind(&non_proxy); -+ { -+ // Overwrite the original receiver with the (original) target. -+ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); -+ __ St_d(a1, MemOperand(kScratchReg, 0)); -+ // Let the "call_as_constructor_delegate" take care of the rest. -+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); -+ __ Jump(masm->isolate()->builtins()->CallFunction(), -+ RelocInfo::CODE_TARGET); -+ } -+ -+ // Called Construct on an Object that doesn't have a [[Construct]] internal -+ // method. -+ __ bind(&non_constructor); -+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable), -+ RelocInfo::CODE_TARGET); -+} -+ -+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { -+ // State setup as expected by MacroAssembler::InvokePrologue. -+ // ----------- S t a t e ------------- -+ // -- a0: actual arguments count -+ // -- a1: function (passed through to callee) -+ // -- a2: expected arguments count -+ // -- a3: new target (passed through to callee) -+ // ----------------------------------- -+ -+ Label invoke, dont_adapt_arguments, stack_overflow; -+ -+ Label enough, too_few; -+ __ Branch(&dont_adapt_arguments, eq, a2, -+ Operand(kDontAdaptArgumentsSentinel)); -+ // We use Uless as the number of argument should always be greater than 0. -+ __ Branch(&too_few, Uless, a0, Operand(a2)); -+ -+ { // Enough parameters: actual >= expected. -+ // a0: actual number of arguments as a smi -+ // a1: function -+ // a2: expected number of arguments -+ // a3: new target (passed through to callee) -+ __ bind(&enough); -+ EnterArgumentsAdaptorFrame(masm); -+ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); -+ -+ // Calculate copy start address into a0 and copy end address into a4. -+ __ SmiScale(a0, a0, kPointerSizeLog2); -+ __ Add_d(a0, fp, a0); -+ // Adjust for return address and receiver. -+ __ Add_d(a0, a0, Operand(2 * kPointerSize)); -+ // Compute copy end address. -+ __ slli_d(a4, a2, kPointerSizeLog2); -+ __ sub_d(a4, a0, a4); -+ -+ // Copy the arguments (including the receiver) to the new stack frame. -+ // a0: copy start address -+ // a1: function -+ // a2: expected number of arguments -+ // a3: new target (passed through to callee) -+ // a4: copy end address -+ -+ Label copy; -+ __ bind(©); -+ __ Ld_d(a5, MemOperand(a0, 0)); -+ __ push(a5); -+ __ addi_d(a0, a0, -kPointerSize); -+ __ Branch(©, ge, a0, Operand(a4)); -+ -+ __ jmp(&invoke); -+ } -+ -+ { // Too few parameters: Actual < expected. -+ __ bind(&too_few); -+ EnterArgumentsAdaptorFrame(masm); -+ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); -+ -+ // Calculate copy start address into a0 and copy end address into a7. -+ // a0: actual number of arguments as a smi -+ // a1: function -+ // a2: expected number of arguments -+ // a3: new target (passed through to callee) -+ __ SmiScale(a0, a0, kPointerSizeLog2); -+ __ Add_d(a0, fp, a0); -+ // Adjust for return address and receiver. -+ __ Add_d(a0, a0, Operand(2 * kPointerSize)); -+ // Compute copy end address. Also adjust for return address. -+ __ Add_d(a7, fp, kPointerSize); -+ -+ // Copy the arguments (including the receiver) to the new stack frame. -+ // a0: copy start address -+ // a1: function -+ // a2: expected number of arguments -+ // a3: new target (passed through to callee) -+ // a7: copy end address -+ Label copy; -+ __ bind(©); -+ __ Ld_d(a4, -+ MemOperand(a0, 0)); // Adjusted above for return addr and receiver. -+ __ Sub_d(sp, sp, kPointerSize); -+ __ Sub_d(a0, a0, kPointerSize); -+ __ St_d(a4, MemOperand(sp, 0)); -+ __ Branch(©, ne, a0, Operand(a7)); -+ -+ // Fill the remaining expected arguments with undefined. -+ // a1: function -+ // a2: expected number of arguments -+ // a3: new target (passed through to callee) -+ __ LoadRoot(a5, RootIndex::kUndefinedValue); -+ __ slli_d(a6, a2, kPointerSizeLog2); -+ __ Sub_d(a4, fp, Operand(a6)); -+ // Adjust for frame. -+ __ Sub_d(a4, a4, -+ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + -+ kPointerSize)); -+ -+ Label fill; -+ __ bind(&fill); -+ __ Sub_d(sp, sp, kPointerSize); -+ __ St_d(a5, MemOperand(sp, 0)); -+ __ Branch(&fill, ne, sp, Operand(a4)); -+ } -+ -+ // Call the entry point. -+ __ bind(&invoke); -+ __ mov(a0, a2); -+ // a0 : expected number of arguments -+ // a1 : function (passed through to callee) -+ // a3: new target (passed through to callee) -+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); -+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); -+ __ CallCodeObject(a2); -+ -+ // Store offset of return address for deoptimizer. -+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); -+ -+ // Exit frame and return. -+ LeaveArgumentsAdaptorFrame(masm); -+ __ Ret(); -+ -+ // ------------------------------------------- -+ // Don't adapt arguments. -+ // ------------------------------------------- -+ __ bind(&dont_adapt_arguments); -+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); -+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); -+ __ JumpCodeObject(a2); -+ -+ __ bind(&stack_overflow); -+ { -+ FrameScope frame(masm, StackFrame::MANUAL); -+ __ CallRuntime(Runtime::kThrowStackOverflow); -+ __ break_(0xCC); -+ } -+} -+ -+void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { -+ // The function index was put in t0 by the jump table trampoline. -+ // Convert to Smi for the runtime call -+ __ SmiTag(kWasmCompileLazyFuncIndexRegister); -+ { -+ HardAbortScope hard_abort(masm); // Avoid calls to Abort. -+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); -+ -+ // Save all parameter registers (see wasm-linkage.cc). They might be -+ // overwritten in the runtime call below. We don't have any callee-saved -+ // registers in wasm, so no need to store anything else. -+ constexpr RegList gp_regs = Register::ListOf(a0, a2, a3, a4, a5, a6, a7); -+ constexpr RegList fp_regs = -+ DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14); -+ __ MultiPush(gp_regs); -+ __ MultiPushFPU(fp_regs); -+ -+ // Pass instance and function index as an explicit arguments to the runtime -+ // function. -+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); -+ // Initialize the JavaScript context with 0. CEntry will use it to -+ // set the current context on the isolate. -+ __ Move(kContextRegister, Smi::zero()); -+ __ CallRuntime(Runtime::kWasmCompileLazy, 2); -+ __ mov(t8, a0); -+ -+ // Restore registers. -+ __ MultiPopFPU(fp_regs); -+ __ MultiPop(gp_regs); -+ } -+ // Finally, jump to the entrypoint. -+ __ Jump(t8); -+} -+ -+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { -+ HardAbortScope hard_abort(masm); // Avoid calls to Abort. -+ { -+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK); -+ -+ // Save all parameter registers. They might hold live values, we restore -+ // them after the runtime call. -+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs); -+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); -+ -+ // Initialize the JavaScript context with 0. CEntry will use it to -+ // set the current context on the isolate. -+ __ Move(cp, Smi::zero()); -+ __ CallRuntime(Runtime::kWasmDebugBreak, 0); -+ -+ // Restore registers. -+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); -+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs); -+ } -+ __ Ret(); -+} -+ -+void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, -+ SaveFPRegsMode save_doubles, ArgvMode argv_mode, -+ bool builtin_exit_frame) { -+ // Called from JavaScript; parameters are on stack as if calling JS function -+ // a0: number of arguments including receiver -+ // a1: pointer to builtin function -+ // fp: frame pointer (restored after C call) -+ // sp: stack pointer (restored as callee's sp after C call) -+ // cp: current context (C callee-saved) -+ // -+ // If argv_mode == kArgvInRegister: -+ // a2: pointer to the first argument -+ -+ if (argv_mode == kArgvInRegister) { -+ // Move argv into the correct register. -+ __ mov(s1, a2); -+ } else { -+ // Compute the argv pointer in a callee-saved register. -+ __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7); -+ __ Sub_d(s1, s1, kPointerSize); -+ } -+ -+ // Enter the exit frame that transitions from JavaScript to C++. -+ FrameScope scope(masm, StackFrame::MANUAL); -+ __ EnterExitFrame( -+ save_doubles == kSaveFPRegs, 0, -+ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); -+ -+ // s0: number of arguments including receiver (C callee-saved) -+ // s1: pointer to first argument (C callee-saved) -+ // s2: pointer to builtin function (C callee-saved) -+ -+ // Prepare arguments for C routine. -+ // a0 = argc -+ __ mov(s0, a0); -+ __ mov(s2, a1); -+ -+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We -+ // also need to reserve the 4 argument slots on the stack. -+ -+ __ AssertStackIsAligned(); -+ -+ // a0 = argc, a1 = argv, a2 = isolate -+ __ li(a2, ExternalReference::isolate_address(masm->isolate())); -+ __ mov(a1, s1); -+ -+ __ StoreReturnAddressAndCall(s2); -+ -+ // Result returned in a0 or a1:a0 - do not destroy these registers! -+ -+ // Check result for exception sentinel. -+ Label exception_returned; -+ __ LoadRoot(a4, RootIndex::kException); -+ __ Branch(&exception_returned, eq, a4, Operand(a0)); -+ -+ // Check that there is no pending exception, otherwise we -+ // should have returned the exception sentinel. -+ if (FLAG_debug_code) { -+ Label okay; -+ ExternalReference pending_exception_address = ExternalReference::Create( -+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()); -+ __ li(a2, pending_exception_address); -+ __ Ld_d(a2, MemOperand(a2, 0)); -+ __ LoadRoot(a4, RootIndex::kTheHoleValue); -+ // Cannot use check here as it attempts to generate call into runtime. -+ __ Branch(&okay, eq, a4, Operand(a2)); -+ __ stop(); -+ __ bind(&okay); -+ } -+ -+ // Exit C frame and return. -+ // a0:a1: result -+ // sp: stack pointer -+ // fp: frame pointer -+ Register argc = argv_mode == kArgvInRegister -+ // We don't want to pop arguments so set argc to no_reg. -+ ? no_reg -+ // s0: still holds argc (callee-saved). -+ : s0; -+ __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN); -+ -+ // Handling of exception. -+ __ bind(&exception_returned); -+ -+ ExternalReference pending_handler_context_address = ExternalReference::Create( -+ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate()); -+ ExternalReference pending_handler_entrypoint_address = -+ ExternalReference::Create( -+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate()); -+ ExternalReference pending_handler_fp_address = ExternalReference::Create( -+ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate()); -+ ExternalReference pending_handler_sp_address = ExternalReference::Create( -+ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate()); -+ -+ // Ask the runtime for help to determine the handler. This will set a0 to -+ // contain the current pending exception, don't clobber it. -+ ExternalReference find_handler = -+ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler); -+ { -+ FrameScope scope(masm, StackFrame::MANUAL); -+ __ PrepareCallCFunction(3, 0, a0); -+ __ mov(a0, zero_reg); -+ __ mov(a1, zero_reg); -+ __ li(a2, ExternalReference::isolate_address(masm->isolate())); -+ __ CallCFunction(find_handler, 3); -+ } -+ -+ // Retrieve the handler context, SP and FP. -+ __ li(cp, pending_handler_context_address); -+ __ Ld_d(cp, MemOperand(cp, 0)); -+ __ li(sp, pending_handler_sp_address); -+ __ Ld_d(sp, MemOperand(sp, 0)); -+ __ li(fp, pending_handler_fp_address); -+ __ Ld_d(fp, MemOperand(fp, 0)); -+ -+ // If the handler is a JS frame, restore the context to the frame. Note that -+ // the context will be set to (cp == 0) for non-JS frames. -+ Label zero; -+ __ Branch(&zero, eq, cp, Operand(zero_reg)); -+ __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); -+ __ bind(&zero); -+ -+ // Reset the masking register. This is done independent of the underlying -+ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work -+ // with both configurations. It is safe to always do this, because the -+ // underlying register is caller-saved and can be arbitrarily clobbered. -+ __ ResetSpeculationPoisonRegister(); -+ -+ // Compute the handler entry address and jump to it. -+ __ li(t7, pending_handler_entrypoint_address); -+ __ Ld_d(t7, MemOperand(t7, 0)); -+ __ Jump(t7); -+} -+ -+void Builtins::Generate_DoubleToI(MacroAssembler* masm) { -+ Label done; -+ Register result_reg = t0; -+ -+ Register scratch = GetRegisterThatIsNotOneOf(result_reg); -+ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch); -+ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2); -+ DoubleRegister double_scratch = kScratchDoubleReg; -+ -+ // Account for saved regs. -+ const int kArgumentOffset = 4 * kPointerSize; -+ -+ __ Push(result_reg); -+ __ Push(scratch, scratch2, scratch3); -+ -+ // Load double input. -+ __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset)); -+ -+ // Clear cumulative exception flags and save the FCSR. -+ // __ movfcsr2gr(scratch2, FCSR); -+ // __ movgr2fcsr(FCSR, zero_reg); -+ -+ // Try a conversion to a signed integer. -+ __ ftintrz_w_d(double_scratch, double_scratch); -+ // Move the converted value into the result register. -+ __ movfr2gr_s(scratch3, double_scratch); -+ -+ // Retrieve and restore the FCSR. -+ __ movfcsr2gr(scratch); // __ cfc1(scratch, FCSR); -+ // __ ctc1(scratch2, FCSR); -+ -+ // Check for overflow and NaNs. -+ __ And( -+ scratch, scratch, -+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); -+ // If we had no exceptions then set result_reg and we are done. -+ Label error; -+ __ Branch(&error, ne, scratch, Operand(zero_reg)); -+ __ Move(result_reg, scratch3); -+ __ Branch(&done); -+ __ bind(&error); -+ -+ // Load the double value and perform a manual truncation. -+ Register input_high = scratch2; -+ Register input_low = scratch3; -+ -+ __ Ld_w(input_low, -+ MemOperand(sp, kArgumentOffset + Register::kMantissaOffset)); -+ __ Ld_w(input_high, -+ MemOperand(sp, kArgumentOffset + Register::kExponentOffset)); -+ -+ Label normal_exponent; -+ // Extract the biased exponent in result. -+ __ bstrpick_w(result_reg, input_high, -+ HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1, -+ HeapNumber::kExponentShift); -+ -+ // Check for Infinity and NaNs, which should return 0. -+ __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask); -+ __ Movz(result_reg, zero_reg, scratch); -+ __ Branch(&done, eq, scratch, Operand(zero_reg)); -+ -+ // Express exponent as delta to (number of mantissa bits + 31). -+ __ Sub_w(result_reg, result_reg, -+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); -+ -+ // If the delta is strictly positive, all bits would be shifted away, -+ // which means that we can return 0. -+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); -+ __ mov(result_reg, zero_reg); -+ __ Branch(&done); -+ -+ __ bind(&normal_exponent); -+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; -+ // Calculate shift. -+ __ Add_w(scratch, result_reg, -+ Operand(kShiftBase + HeapNumber::kMantissaBits)); -+ -+ // Save the sign. -+ Register sign = result_reg; -+ result_reg = no_reg; -+ __ And(sign, input_high, Operand(HeapNumber::kSignMask)); -+ -+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need -+ // to check for this specific case. -+ Label high_shift_needed, high_shift_done; -+ __ Branch(&high_shift_needed, lt, scratch, Operand(32)); -+ __ mov(input_high, zero_reg); -+ __ Branch(&high_shift_done); -+ __ bind(&high_shift_needed); -+ -+ // Set the implicit 1 before the mantissa part in input_high. -+ __ Or(input_high, input_high, -+ Operand(1 << HeapNumber::kMantissaBitsInTopWord)); -+ // Shift the mantissa bits to the correct position. -+ // We don't need to clear non-mantissa bits as they will be shifted away. -+ // If they weren't, it would mean that the answer is in the 32bit range. -+ __ sll_w(input_high, input_high, scratch); -+ -+ __ bind(&high_shift_done); -+ -+ // Replace the shifted bits with bits from the lower mantissa word. -+ Label pos_shift, shift_done; -+ __ li(kScratchReg, 32); -+ __ sub_w(scratch, kScratchReg, scratch); -+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); -+ -+ // Negate scratch. -+ __ Sub_w(scratch, zero_reg, scratch); -+ __ sll_w(input_low, input_low, scratch); -+ __ Branch(&shift_done); -+ -+ __ bind(&pos_shift); -+ __ srl_w(input_low, input_low, scratch); -+ -+ __ bind(&shift_done); -+ __ Or(input_high, input_high, Operand(input_low)); -+ // Restore sign if necessary. -+ __ mov(scratch, sign); -+ result_reg = sign; -+ sign = no_reg; -+ __ Sub_w(result_reg, zero_reg, input_high); -+ __ Movz(result_reg, input_high, scratch); -+ -+ __ bind(&done); -+ -+ __ St_d(result_reg, MemOperand(sp, kArgumentOffset)); -+ __ Pop(scratch, scratch2, scratch3); -+ __ Pop(result_reg); -+ __ Ret(); -+} -+ -+namespace { -+ -+int AddressOffset(ExternalReference ref0, ExternalReference ref1) { -+ int64_t offset = (ref0.address() - ref1.address()); -+ DCHECK(static_cast(offset) == offset); -+ return static_cast(offset); -+} -+ -+// Calls an API function. Allocates HandleScope, extracts returned value -+// from handle and propagates exceptions. Restores context. stack_space -+// - space to be unwound on exit (includes the call JS arguments space and -+// the additional space allocated for the fast call). -+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, -+ ExternalReference thunk_ref, int stack_space, -+ MemOperand* stack_space_operand, -+ MemOperand return_value_operand) { -+ Isolate* isolate = masm->isolate(); -+ ExternalReference next_address = -+ ExternalReference::handle_scope_next_address(isolate); -+ const int kNextOffset = 0; -+ const int kLimitOffset = AddressOffset( -+ ExternalReference::handle_scope_limit_address(isolate), next_address); -+ const int kLevelOffset = AddressOffset( -+ ExternalReference::handle_scope_level_address(isolate), next_address); -+ -+ DCHECK(function_address == a1 || function_address == a2); -+ -+ Label profiler_enabled, end_profiler_check; -+ __ li(t7, ExternalReference::is_profiling_address(isolate)); -+ __ Ld_b(t7, MemOperand(t7, 0)); -+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg)); -+ __ li(t7, ExternalReference::address_of_runtime_stats_flag()); -+ __ Ld_w(t7, MemOperand(t7, 0)); -+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg)); -+ { -+ // Call the api function directly. -+ __ mov(t7, function_address); -+ __ Branch(&end_profiler_check); -+ } -+ -+ __ bind(&profiler_enabled); -+ { -+ // Additional parameter is the address of the actual callback. -+ __ li(t7, thunk_ref); -+ } -+ __ bind(&end_profiler_check); -+ -+ // Allocate HandleScope in callee-save registers. -+ __ li(s5, next_address); -+ __ Ld_d(s0, MemOperand(s5, kNextOffset)); -+ __ Ld_d(s1, MemOperand(s5, kLimitOffset)); -+ __ Ld_w(s2, MemOperand(s5, kLevelOffset)); -+ __ Add_w(s2, s2, Operand(1)); -+ __ St_w(s2, MemOperand(s5, kLevelOffset)); -+ -+ __ StoreReturnAddressAndCall(t7); -+ -+ Label promote_scheduled_exception; -+ Label delete_allocated_handles; -+ Label leave_exit_frame; -+ Label return_value_loaded; -+ -+ // Load value from ReturnValue. -+ __ Ld_d(a0, return_value_operand); -+ __ bind(&return_value_loaded); -+ -+ // No more valid handles (the result handle was the last one). Restore -+ // previous handle scope. -+ __ St_d(s0, MemOperand(s5, kNextOffset)); -+ if (__ emit_debug_code()) { -+ __ Ld_w(a1, MemOperand(s5, kLevelOffset)); -+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, -+ Operand(s2)); -+ } -+ __ Sub_w(s2, s2, Operand(1)); -+ __ St_w(s2, MemOperand(s5, kLevelOffset)); -+ __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset)); -+ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg)); -+ -+ // Leave the API exit frame. -+ __ bind(&leave_exit_frame); -+ -+ if (stack_space_operand == nullptr) { -+ DCHECK_NE(stack_space, 0); -+ __ li(s0, Operand(stack_space)); -+ } else { -+ DCHECK_EQ(stack_space, 0); -+ STATIC_ASSERT(kCArgSlotCount == 0); -+ __ Ld_d(s0, *stack_space_operand); -+ } -+ -+ static constexpr bool kDontSaveDoubles = false; -+ static constexpr bool kRegisterContainsSlotCount = false; -+ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, -+ kRegisterContainsSlotCount); -+ -+ // Check if the function scheduled an exception. -+ __ LoadRoot(a4, RootIndex::kTheHoleValue); -+ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); -+ __ Ld_d(a5, MemOperand(kScratchReg, 0)); -+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); -+ -+ __ Ret(); -+ -+ // Re-throw by promoting a scheduled exception. -+ __ bind(&promote_scheduled_exception); -+ __ TailCallRuntime(Runtime::kPromoteScheduledException); -+ -+ // HandleScope limit has changed. Delete allocated extensions. -+ __ bind(&delete_allocated_handles); -+ __ St_d(s1, MemOperand(s5, kLimitOffset)); -+ __ mov(s0, a0); -+ __ PrepareCallCFunction(1, s1); -+ __ li(a0, ExternalReference::isolate_address(isolate)); -+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); -+ __ mov(a0, s0); -+ __ jmp(&leave_exit_frame); -+} -+ -+} // namespace -+ -+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- cp : context -+ // -- a1 : api function address -+ // -- a2 : arguments count (not including the receiver) -+ // -- a3 : call data -+ // -- a0 : holder -+ // -- -+ // -- sp[0] : last argument -+ // -- ... -+ // -- sp[(argc - 1) * 8] : first argument -+ // -- sp[(argc + 0) * 8] : receiver -+ // ----------------------------------- -+ -+ Register api_function_address = a1; -+ Register argc = a2; -+ Register call_data = a3; -+ Register holder = a0; -+ Register scratch = t0; -+ Register base = t1; // For addressing MemOperands on the stack. -+ -+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch, -+ base)); -+ -+ using FCA = FunctionCallbackArguments; -+ -+ STATIC_ASSERT(FCA::kArgsLength == 6); -+ STATIC_ASSERT(FCA::kNewTargetIndex == 5); -+ STATIC_ASSERT(FCA::kDataIndex == 4); -+ STATIC_ASSERT(FCA::kReturnValueOffset == 3); -+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); -+ STATIC_ASSERT(FCA::kIsolateIndex == 1); -+ STATIC_ASSERT(FCA::kHolderIndex == 0); -+ -+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows: -+ // -+ // Target state: -+ // sp[0 * kPointerSize]: kHolder -+ // sp[1 * kPointerSize]: kIsolate -+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) -+ // sp[3 * kPointerSize]: undefined (kReturnValue) -+ // sp[4 * kPointerSize]: kData -+ // sp[5 * kPointerSize]: undefined (kNewTarget) -+ -+ // Set up the base register for addressing through MemOperands. It will point -+ // at the receiver (located at sp + argc * kPointerSize). -+ __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7); -+ -+ // Reserve space on the stack. -+ __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize)); -+ -+ // kHolder. -+ __ St_d(holder, MemOperand(sp, 0 * kPointerSize)); -+ -+ // kIsolate. -+ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); -+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); -+ -+ // kReturnValueDefaultValue and kReturnValue. -+ __ LoadRoot(scratch, RootIndex::kUndefinedValue); -+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize)); -+ __ St_d(scratch, MemOperand(sp, 3 * kPointerSize)); -+ -+ // kData. -+ __ St_d(call_data, MemOperand(sp, 4 * kPointerSize)); -+ -+ // kNewTarget. -+ __ St_d(scratch, MemOperand(sp, 5 * kPointerSize)); -+ -+ // Keep a pointer to kHolder (= implicit_args) in a scratch register. -+ // We use it below to set up the FunctionCallbackInfo object. -+ __ mov(scratch, sp); -+ -+ // Allocate the v8::Arguments structure in the arguments' space since -+ // it's not controlled by GC. -+ static constexpr int kApiStackSpace = 4; -+ static constexpr bool kDontSaveDoubles = false; -+ FrameScope frame_scope(masm, StackFrame::MANUAL); -+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); -+ -+ // EnterExitFrame may align the sp. -+ -+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). -+ // Arguments are after the return address (pushed by EnterExitFrame()). -+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); -+ -+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed -+ // on the stack). -+ __ Sub_d(scratch, base, Operand(1 * kPointerSize)); -+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize)); -+ -+ // FunctionCallbackInfo::length_. -+ // Stored as int field, 32-bit integers within struct on stack always left -+ // justified by n64 ABI. -+ __ St_w(argc, MemOperand(sp, 3 * kPointerSize)); -+ -+ // We also store the number of bytes to drop from the stack after returning -+ // from the API function here. -+ // Note: Unlike on other architectures, this stores the number of slots to -+ // drop, not the number of bytes. -+ __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */)); -+ __ St_d(scratch, MemOperand(sp, 4 * kPointerSize)); -+ -+ // v8::InvocationCallback's argument. -+ DCHECK(!AreAliased(api_function_address, scratch, a0)); -+ __ Add_d(a0, sp, Operand(1 * kPointerSize)); -+ -+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); -+ -+ // There are two stack slots above the arguments we constructed on the stack. -+ // TODO(jgruber): Document what these arguments are. -+ static constexpr int kStackSlotsAboveFCA = 2; -+ MemOperand return_value_operand( -+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); -+ -+ static constexpr int kUseStackSpaceOperand = 0; -+ MemOperand stack_space_operand(sp, 4 * kPointerSize); -+ -+ AllowExternalCallThatCantCauseGC scope(masm); -+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, -+ kUseStackSpaceOperand, &stack_space_operand, -+ return_value_operand); -+} -+ -+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { -+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property -+ // name below the exit frame to make GC aware of them. -+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0); -+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1); -+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2); -+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3); -+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4); -+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5); -+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6); -+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7); -+ -+ Register receiver = ApiGetterDescriptor::ReceiverRegister(); -+ Register holder = ApiGetterDescriptor::HolderRegister(); -+ Register callback = ApiGetterDescriptor::CallbackRegister(); -+ Register scratch = a4; -+ DCHECK(!AreAliased(receiver, holder, callback, scratch)); -+ -+ Register api_function_address = a2; -+ -+ // Here and below +1 is for name() pushed after the args_ array. -+ using PCA = PropertyCallbackArguments; -+ __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); -+ __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); -+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); -+ __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); -+ __ LoadRoot(scratch, RootIndex::kUndefinedValue); -+ __ St_d(scratch, -+ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); -+ __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * -+ kPointerSize)); -+ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); -+ __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); -+ __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); -+ // should_throw_on_error -> false -+ DCHECK_EQ(0, Smi::zero().ptr()); -+ __ St_d(zero_reg, -+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); -+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); -+ __ St_d(scratch, MemOperand(sp, 0 * kPointerSize)); -+ -+ // v8::PropertyCallbackInfo::args_ array and name handle. -+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; -+ -+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle. -+ __ mov(a0, sp); // a0 = Handle -+ __ Add_d(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ -+ -+ const int kApiStackSpace = 1; -+ FrameScope frame_scope(masm, StackFrame::MANUAL); -+ __ EnterExitFrame(false, kApiStackSpace); -+ -+ // Create v8::PropertyCallbackInfo object on the stack and initialize -+ // it's args_ field. -+ __ St_d(a1, MemOperand(sp, 1 * kPointerSize)); -+ __ Add_d(a1, sp, Operand(1 * kPointerSize)); -+ // a1 = v8::PropertyCallbackInfo& -+ -+ ExternalReference thunk_ref = -+ ExternalReference::invoke_accessor_getter_callback(); -+ -+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); -+ __ Ld_d(api_function_address, -+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); -+ -+ // +3 is to skip prolog, return address and name handle. -+ MemOperand return_value_operand( -+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); -+ MemOperand* const kUseStackSpaceConstant = nullptr; -+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, -+ kStackUnwindSpace, kUseStackSpaceConstant, -+ return_value_operand); -+} -+ -+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { -+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general -+ // purpose Code object) to be able to call into C functions that may trigger -+ // GC and thus move the caller. -+ // -+ // DirectCEntry places the return address on the stack (updated by the GC), -+ // making the call GC safe. The irregexp backend relies on this. -+ -+ // Make place for arguments to fit C calling convention. Callers use -+ // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't -+ // have to do that here. Any caller must drop kCArgsSlotsSize stack space -+ // after the call. -+ __ addi_d(sp, sp, -kCArgsSlotsSize); -+ -+ __ St_d(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address. -+ __ Call(t7); // Call the C++ function. -+ __ Ld_d(t7, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code. -+ -+ if (FLAG_debug_code && FLAG_enable_slow_asserts) { -+ // In case of an error the return address may point to a memory area -+ // filled with kZapValue by the GC. Dereference the address and check for -+ // this. -+ __ Ld_d(a4, MemOperand(t7, 0)); -+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4, -+ Operand(reinterpret_cast(kZapValue))); -+ } -+ -+ __ Jump(t7); -+} -+ -+#undef __ -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h -index d56b3725..97be9eb7 100644 ---- a/deps/v8/src/codegen/assembler-arch.h -+++ b/deps/v8/src/codegen/assembler-arch.h -@@ -21,6 +21,8 @@ - #include "src/codegen/mips/assembler-mips.h" - #elif V8_TARGET_ARCH_MIPS64 - #include "src/codegen/mips64/assembler-mips64.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/codegen/loong64/assembler-loong64.h" - #elif V8_TARGET_ARCH_S390 - #include "src/codegen/s390/assembler-s390.h" - #else -diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h -index 8c81315d..e14dbe81 100644 ---- a/deps/v8/src/codegen/assembler-inl.h -+++ b/deps/v8/src/codegen/assembler-inl.h -@@ -21,6 +21,8 @@ - #include "src/codegen/mips/assembler-mips-inl.h" - #elif V8_TARGET_ARCH_MIPS64 - #include "src/codegen/mips64/assembler-mips64-inl.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/codegen/loong64/assembler-loong64-inl.h" - #elif V8_TARGET_ARCH_S390 - #include "src/codegen/s390/assembler-s390-inl.h" - #else -diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h -index 7a222c96..b885cecc 100644 ---- a/deps/v8/src/codegen/constants-arch.h -+++ b/deps/v8/src/codegen/constants-arch.h -@@ -15,6 +15,8 @@ - #include "src/codegen/mips/constants-mips.h" // NOLINT - #elif V8_TARGET_ARCH_MIPS64 - #include "src/codegen/mips64/constants-mips64.h" // NOLINT -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/codegen/loong64/constants-loong64.h" // NOLINT - #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 - #include "src/codegen/ppc/constants-ppc.h" // NOLINT - #elif V8_TARGET_ARCH_S390 -diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h -index 14c94eba..64baaed5 100644 ---- a/deps/v8/src/codegen/cpu-features.h -+++ b/deps/v8/src/codegen/cpu-features.h -@@ -47,6 +47,9 @@ enum CpuFeature { - MIPSr6, - MIPS_SIMD, // MSA instructions - -+#elif V8_TARGET_ARCH_LOONG64 -+ FPU, // TODO -+ - #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 - FPU, - FPR_GPR_MOV, -diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc -index 512b64e5..6b764f23 100644 ---- a/deps/v8/src/codegen/external-reference.cc -+++ b/deps/v8/src/codegen/external-reference.cc -@@ -472,6 +472,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { - #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState - #elif V8_TARGET_ARCH_MIPS64 - #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState -+#elif V8_TARGET_ARCH_LOONG64 -+#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState - #elif V8_TARGET_ARCH_S390 - #define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState - #else -diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc -index 503da3cb..a65e2ede 100644 ---- a/deps/v8/src/codegen/interface-descriptors.cc -+++ b/deps/v8/src/codegen/interface-descriptors.cc -@@ -130,7 +130,8 @@ const char* CallInterfaceDescriptor::DebugName() const { - return ""; - } - --#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) -+#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ -+ !defined(V8_TARGET_ARCH_LOONG64) - bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { - return true; - } -@@ -408,7 +409,8 @@ void WasmAtomicNotifyDescriptor::InitializePlatformSpecific( - DefaultInitializePlatformSpecific(data, kParameterCount); - } - --#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) -+#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ -+ !defined(V8_TARGET_ARCH_LOONG64) - void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - DefaultInitializePlatformSpecific(data, kParameterCount); -diff --git a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h -new file mode 100644 -index 00000000..805ef1f5 ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h -@@ -0,0 +1,268 @@ -+// Copyright (c) 1994-2006 Sun Microsystems Inc. -+// All Rights Reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// - Redistributions of source code must retain the above copyright notice, -+// this list of conditions and the following disclaimer. -+// -+// - Redistribution in binary form must reproduce the above copyright -+// notice, this list of conditions and the following disclaimer in the -+// documentation and/or other materials provided with the distribution. -+// -+// - Neither the name of Sun Microsystems or the names of contributors may -+// be used to endorse or promote products derived from this software without -+// specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// The original source code covered by the above license above has been -+// modified significantly by Google Inc. -+// Copyright 2012 the V8 project authors. All rights reserved. -+ -+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_ -+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_ -+ -+#include "src/codegen/loong64/assembler-loong64.h" -+ -+#include "src/codegen/assembler.h" -+#include "src/debug/debug.h" -+#include "src/objects/objects-inl.h" -+ -+namespace v8 { -+namespace internal { -+ -+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } -+ -+bool CpuFeatures::SupportsWasmSimd128() { return false; } -+ -+// ----------------------------------------------------------------------------- -+// Operand and MemOperand. -+ -+bool Operand::is_reg() const { return rm_.is_valid(); } -+ -+int64_t Operand::immediate() const { -+ DCHECK(!is_reg()); -+ DCHECK(!IsHeapObjectRequest()); -+ return value_.immediate; -+} -+ -+// ----------------------------------------------------------------------------- -+// RelocInfo. -+ -+void RelocInfo::apply(intptr_t delta) { -+ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { -+ // Absolute code pointer inside code object moves with the code object. -+ Assembler::RelocateInternalReference(rmode_, pc_, delta); -+ } -+} -+ -+Address RelocInfo::target_address() { -+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_)); -+ return Assembler::target_address_at(pc_, constant_pool_); -+} -+ -+Address RelocInfo::target_address_address() { -+ DCHECK(HasTargetAddressAddress()); -+ // Read the address of the word containing the target_address in an -+ // instruction stream. -+ // The only architecture-independent user of this function is the serializer. -+ // The serializer uses it to find out how many raw bytes of instruction to -+ // output before the next target. -+ // For an instruction like LUI/ORI where the target bits are mixed into the -+ // instruction bits, the size of the target will be zero, indicating that the -+ // serializer should not step forward in memory after a target is resolved -+ // and written. In this case the target_address_address function should -+ // return the end of the instructions to be patched, allowing the -+ // deserializer to deserialize the instructions as raw bytes and put them in -+ // place, ready to be patched with the target. After jump optimization, -+ // that is the address of the instruction that follows J/JAL/JR/JALR -+ // instruction. -+ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize; -+} -+ -+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } -+ -+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } -+ -+void Assembler::deserialization_set_special_target_at( -+ Address instruction_payload, Code code, Address target) { -+ set_target_address_at(instruction_payload, -+ !code.is_null() ? code.constant_pool() : kNullAddress, -+ target); -+} -+ -+int Assembler::deserialization_special_target_size( -+ Address instruction_payload) { -+ return kSpecialTargetSize; -+} -+ -+void Assembler::set_target_internal_reference_encoded_at(Address pc, -+ Address target) { -+ // TODO, see AssembleJumpTable, loong64 does not generate internal reference? -+ abort(); -+} -+ -+void Assembler::deserialization_set_target_internal_reference_at( -+ Address pc, Address target, RelocInfo::Mode mode) { -+ if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { -+ DCHECK(IsJ(instr_at(pc))); -+ set_target_internal_reference_encoded_at(pc, target); -+ } else { -+ DCHECK(mode == RelocInfo::INTERNAL_REFERENCE); -+ Memory
(pc) = target; -+ } -+} -+ -+HeapObject RelocInfo::target_object() { -+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); -+ return HeapObject::cast( -+ Object(Assembler::target_address_at(pc_, constant_pool_))); -+} -+ -+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { -+ return target_object(); -+} -+ -+Handle RelocInfo::target_object_handle(Assembler* origin) { -+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); -+ return Handle(reinterpret_cast( -+ Assembler::target_address_at(pc_, constant_pool_))); -+} -+ -+void RelocInfo::set_target_object(Heap* heap, HeapObject target, -+ WriteBarrierMode write_barrier_mode, -+ ICacheFlushMode icache_flush_mode) { -+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); -+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), -+ icache_flush_mode); -+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && -+ !FLAG_disable_write_barriers) { -+ WriteBarrierForCode(host(), this, target); -+ } -+} -+ -+Address RelocInfo::target_external_reference() { -+ DCHECK(rmode_ == EXTERNAL_REFERENCE); -+ return Assembler::target_address_at(pc_, constant_pool_); -+} -+ -+void RelocInfo::set_target_external_reference( -+ Address target, ICacheFlushMode icache_flush_mode) { -+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); -+ Assembler::set_target_address_at(pc_, constant_pool_, target, -+ icache_flush_mode); -+} -+ -+Address RelocInfo::target_internal_reference() { -+ if (rmode_ == INTERNAL_REFERENCE) { -+ return Memory
(pc_); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+Address RelocInfo::target_internal_reference_address() { -+ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED); -+ return pc_; -+} -+ -+Address RelocInfo::target_runtime_entry(Assembler* origin) { -+ DCHECK(IsRuntimeEntry(rmode_)); -+ return target_address(); -+} -+ -+void RelocInfo::set_target_runtime_entry(Address target, -+ WriteBarrierMode write_barrier_mode, -+ ICacheFlushMode icache_flush_mode) { -+ DCHECK(IsRuntimeEntry(rmode_)); -+ if (target_address() != target) -+ set_target_address(target, write_barrier_mode, icache_flush_mode); -+} -+ -+Address RelocInfo::target_off_heap_target() { -+ DCHECK(IsOffHeapTarget(rmode_)); -+ return Assembler::target_address_at(pc_, constant_pool_); -+} -+ -+void RelocInfo::WipeOut() { -+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || -+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || -+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || -+ IsOffHeapTarget(rmode_)); -+ if (IsInternalReference(rmode_)) { -+ Memory
(pc_) = kNullAddress; -+ } else if (IsInternalReferenceEncoded(rmode_)) { -+ Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); -+ } else { -+ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); -+ } -+} -+ -+// ----------------------------------------------------------------------------- -+// Assembler. -+ -+void Assembler::CheckBuffer() { -+ if (buffer_space() <= kGap) { -+ GrowBuffer(); -+ } -+} -+ -+void Assembler::EmitHelper(Instr x) { -+ *reinterpret_cast(pc_) = x; -+ pc_ += kInstrSize; -+ CheckTrampolinePoolQuick(); -+} -+ -+template <> -+inline void Assembler::EmitHelper(uint8_t x); -+ -+template -+void Assembler::EmitHelper(T x) { -+ *reinterpret_cast(pc_) = x; -+ pc_ += sizeof(x); -+ CheckTrampolinePoolQuick(); -+} -+ -+template <> -+void Assembler::EmitHelper(uint8_t x) { -+ *reinterpret_cast(pc_) = x; -+ pc_ += sizeof(x); -+ if (reinterpret_cast(pc_) % kInstrSize == 0) { -+ CheckTrampolinePoolQuick(); -+ } -+} -+ -+void Assembler::emit(Instr x) { -+ if (!is_buffer_growth_blocked()) { -+ CheckBuffer(); -+ } -+ EmitHelper(x); -+} -+ -+void Assembler::emit(uint64_t data) { -+ // CheckForEmitInForbiddenSlot(); -+ if (!is_buffer_growth_blocked()) { -+ CheckBuffer(); -+ } -+ EmitHelper(data); -+} -+ -+EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_ -diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc -new file mode 100644 -index 00000000..395cc4e1 ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc -@@ -0,0 +1,2685 @@ -+// Copyright (c) 1994-2006 Sun Microsystems Inc. -+// All Rights Reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// - Redistributions of source code must retain the above copyright notice, -+// this list of conditions and the following disclaimer. -+// -+// - Redistribution in binary form must reproduce the above copyright -+// notice, this list of conditions and the following disclaimer in the -+// documentation and/or other materials provided with the distribution. -+// -+// - Neither the name of Sun Microsystems or the names of contributors may -+// be used to endorse or promote products derived from this software without -+// specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// The original source code covered by the above license above has been -+// modified significantly by Google Inc. -+// Copyright 2012 the V8 project authors. All rights reserved. -+ -+#include "src/codegen/loong64/assembler-loong64.h" -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/base/cpu.h" -+#include "src/codegen/loong64/assembler-loong64-inl.h" -+#include "src/codegen/safepoint-table.h" -+#include "src/codegen/string-constants.h" -+#include "src/deoptimizer/deoptimizer.h" -+#include "src/objects/heap-number-inl.h" -+ -+namespace v8 { -+namespace internal { -+ -+void CpuFeatures::ProbeImpl(bool cross_compile) { -+ supported_ |= 1u << FPU; -+ -+ // Only use statically determined features for cross compile (snapshot). -+ if (cross_compile) return; -+ -+#if defined(_loongisa_vec) -+ supported_ |= 0u; -+#endif -+ // If the compiler is allowed to use fpu then we can use fpu too in our -+ // code generation. -+#ifdef __loongarch__ -+ // Probe for additional features at runtime. -+ base::CPU cpu; -+ supported_ |= 0u; -+#endif -+} -+ -+void CpuFeatures::PrintTarget() {} -+void CpuFeatures::PrintFeatures() {} -+ -+int ToNumber(Register reg) { -+ DCHECK(reg.is_valid()); -+ const int kNumbers[] = { -+ 0, // zero_reg -+ 1, // r1 ra -+ 2, // r2 gp -+ 3, // r3 sp -+ 4, // a0 v0 -+ 5, // a1 v1 -+ 6, // a2 -+ 7, // a3 -+ 8, // a4 -+ 9, // a5 -+ 10, // a6 -+ 11, // a7 -+ 12, // t0 -+ 13, // t1 -+ 14, // t2 -+ 15, // t3 -+ 16, // t4 -+ 17, // t5 -+ 18, // t6 -+ 19, // t7 -+ 20, // t8 -+ 21, // tp -+ 22, // fp -+ 23, // s0 -+ 24, // s1 -+ 25, // s2 -+ 26, // s3 -+ 27, // s4 -+ 28, // s5 -+ 29, // s6 -+ 30, // s7 -+ 31, // s8 -+ }; -+ return kNumbers[reg.code()]; -+} -+ -+Register ToRegister(int num) { -+ DCHECK(num >= 0 && num < kNumRegisters); -+ const Register kRegisters[] = { -+ zero_reg, ra, gp, sp, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, -+ t4, t5, t6, t7, t8, tp, fp, s0, s1, s2, s3, s4, s5, s6, s7, s8}; -+ return kRegisters[num]; -+} -+ -+// ----------------------------------------------------------------------------- -+// Implementation of RelocInfo. -+ -+const int RelocInfo::kApplyMask = -+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | -+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED); -+ -+bool RelocInfo::IsCodedSpecially() { -+ // The deserializer needs to know whether a pointer is specially coded. Being -+ // specially coded on loongisa means that it is a lui/ori instruction, and -+ // that is always the case inside code objects. -+ return true; -+} -+ -+bool RelocInfo::IsInConstantPool() { return false; } -+ -+uint32_t RelocInfo::wasm_call_tag() const { -+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); -+ return static_cast( -+ Assembler::target_address_at(pc_, constant_pool_)); -+} -+ -+// ----------------------------------------------------------------------------- -+// Implementation of Operand and MemOperand. -+// See assembler-loong64-inl.h for inlined constructors. -+ -+Operand::Operand(Handle handle) -+ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) { -+ value_.immediate = static_cast(handle.address()); -+} -+ -+Operand Operand::EmbeddedNumber(double value) { -+ int32_t smi; -+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); -+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); -+ result.is_heap_object_request_ = true; -+ result.value_.heap_object_request = HeapObjectRequest(value); -+ return result; -+} -+ -+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { -+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); -+ result.is_heap_object_request_ = true; -+ result.value_.heap_object_request = HeapObjectRequest(str); -+ return result; -+} -+ -+MemOperand::MemOperand(Register base, int32_t offset) -+ : base_(base), index_(no_reg), offset_(offset) {} -+ -+MemOperand::MemOperand(Register base, Register index) -+ : base_(base), index_(index), offset_(0) {} -+ -+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { -+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); -+ for (auto& request : heap_object_requests_) { -+ Handle object; -+ switch (request.kind()) { -+ case HeapObjectRequest::kHeapNumber: -+ object = isolate->factory()->NewHeapNumber( -+ request.heap_number()); -+ break; -+ case HeapObjectRequest::kStringConstant: -+ const StringConstantBase* str = request.string(); -+ CHECK_NOT_NULL(str); -+ object = str->AllocateStringConstant(isolate); -+ break; -+ } -+ Address pc = reinterpret_cast
(buffer_start_) + request.offset(); -+ set_target_value_at(pc, reinterpret_cast(object.location())); -+ } -+} -+ -+// ----------------------------------------------------------------------------- -+// Specific instructions, constants, and masks. -+ -+// addi_d(sp, sp, 8) aka Pop() operation or part of Pop(r) -+// operations as post-increment of sp. -+const Instr kPopInstruction = ADDI_D | (kPointerSize & kImm12Mask) << kRkShift | -+ (sp.code() << kRjShift) | sp.code(); // NOLINT -+// addi_d(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. -+const Instr kPushInstruction = ADDI_D | -+ (-kPointerSize & kImm12Mask) << kRkShift | -+ (sp.code() << kRjShift) | sp.code(); // NOLINT -+// St_d(r, MemOperand(sp, 0)) -+const Instr kPushRegPattern = ST_D | (sp.code() << kRjShift); // NOLINT -+// Ld_d(r, MemOperand(sp, 0)) -+const Instr kPopRegPattern = LD_D | (sp.code() << kRjShift); // NOLINT -+ -+Assembler::Assembler(const AssemblerOptions& options, -+ std::unique_ptr buffer) -+ : AssemblerBase(options, std::move(buffer)), -+ scratch_register_list_(t7.bit() | t6.bit()) { -+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); -+ -+ last_trampoline_pool_end_ = 0; -+ no_trampoline_pool_before_ = 0; -+ trampoline_pool_blocked_nesting_ = 0; -+ // We leave space (16 * kTrampolineSlotsSize) -+ // for BlockTrampolinePoolScope buffer. -+ next_buffer_check_ = FLAG_force_long_branches -+ ? kMaxInt -+ : kMax16BranchOffset - kTrampolineSlotsSize * 16; -+ internal_trampoline_exception_ = false; -+ last_bound_pos_ = 0; -+ -+ trampoline_emitted_ = FLAG_force_long_branches; // TODO remove this -+ unbound_labels_count_ = 0; -+ block_buffer_growth_ = false; -+} -+ -+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, -+ SafepointTableBuilder* safepoint_table_builder, -+ int handler_table_offset) { -+ // EmitForbiddenSlotInstruction(); // TODO why? -+ -+ int code_comments_size = WriteCodeComments(); -+ -+ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. -+ -+ AllocateAndInstallRequestedHeapObjects(isolate); -+ -+ // Set up code descriptor. -+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to -+ // this point to make CodeDesc initialization less fiddly. -+ -+ static constexpr int kConstantPoolSize = 0; -+ const int instruction_size = pc_offset(); -+ const int code_comments_offset = instruction_size - code_comments_size; -+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize; -+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) -+ ? constant_pool_offset -+ : handler_table_offset; -+ const int safepoint_table_offset = -+ (safepoint_table_builder == kNoSafepointTable) -+ ? handler_table_offset2 -+ : safepoint_table_builder->GetCodeOffset(); -+ const int reloc_info_offset = -+ static_cast(reloc_info_writer.pos() - buffer_->start()); -+ CodeDesc::Initialize(desc, this, safepoint_table_offset, -+ handler_table_offset2, constant_pool_offset, -+ code_comments_offset, reloc_info_offset); -+} -+ -+void Assembler::Align(int m) { -+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); -+ while ((pc_offset() & (m - 1)) != 0) { -+ nop(); -+ } -+} -+ -+void Assembler::CodeTargetAlign() { -+ // No advantage to aligning branch/call targets to more than -+ // single instruction, that I am aware of. -+ Align(4); -+} -+ -+Register Assembler::GetRkReg(Instr instr) { -+ return Register::from_code((instr & kRkFieldMask) >> kRkShift); -+} -+ -+Register Assembler::GetRjReg(Instr instr) { -+ return Register::from_code((instr & kRjFieldMask) >> kRjShift); -+} -+ -+Register Assembler::GetRdReg(Instr instr) { -+ return Register::from_code((instr & kRdFieldMask) >> kRdShift); -+} -+ -+uint32_t Assembler::GetRk(Instr instr) { -+ return (instr & kRkFieldMask) >> kRkShift; -+} -+ -+uint32_t Assembler::GetRkField(Instr instr) { return instr & kRkFieldMask; } -+ -+uint32_t Assembler::GetRj(Instr instr) { -+ return (instr & kRjFieldMask) >> kRjShift; -+} -+ -+uint32_t Assembler::GetRjField(Instr instr) { return instr & kRjFieldMask; } -+ -+uint32_t Assembler::GetRd(Instr instr) { -+ return (instr & kRdFieldMask) >> kRdShift; -+} -+ -+uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; } -+ -+uint32_t Assembler::GetSa2(Instr instr) { -+ return (instr & kSa2FieldMask) >> kSaShift; -+} -+ -+uint32_t Assembler::GetSa2Field(Instr instr) { return instr & kSa2FieldMask; } -+ -+uint32_t Assembler::GetSa3(Instr instr) { -+ return (instr & kSa3FieldMask) >> kSaShift; -+} -+ -+uint32_t Assembler::GetSa3Field(Instr instr) { return instr & kSa3FieldMask; } -+ -+bool Assembler::IsPop(Instr instr) { -+ return (instr & 0xffc003e0) == kPopRegPattern; -+} -+ -+bool Assembler::IsPush(Instr instr) { -+ return (instr & 0xffc003e0) == kPushRegPattern; -+} -+ -+// Labels refer to positions in the (to be) generated code. -+// There are bound, linked, and unused labels. -+// -+// Bound labels refer to known positions in the already -+// generated code. pos() is the position the label refers to. -+// -+// Linked labels refer to unknown positions in the code -+// to be generated; pos() is the position of the last -+// instruction using the label. -+ -+// The link chain is terminated by a value in the instruction of -1, -+// which is an otherwise illegal value (branch -1 is inf loop). -+// The instruction 16-bit offset field addresses 32-bit words, but in -+// code is conv to an 18-bit value addressing bytes, hence the -4 value. -+ -+const int kEndOfChain = 0; -+// Determines the end of the Jump chain (a subset of the label link chain). -+const int kEndOfJumpChain = 0; -+ -+bool Assembler::IsBranch(Instr instr) { -+ uint32_t opcode = (instr >> 26) << 26; -+ // Checks if the instruction is a branch. -+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ || -+ opcode == B || opcode == BL || opcode == BEQ || -+ opcode == BNE || opcode == BLT || opcode == BGE || -+ opcode == BLTU || opcode == BGEU; -+ return isBranch; -+} -+ -+bool Assembler::IsB(Instr instr) { -+ uint32_t opcode = (instr >> 26) << 26; -+ // Checks if the instruction is a b. -+ bool isBranch = opcode == B || opcode == BL; -+ return isBranch; -+} -+ -+bool Assembler::IsBz(Instr instr) { -+ uint32_t opcode = (instr >> 26) << 26; -+ // Checks if the instruction is a branch. -+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ; -+ return isBranch; -+} -+ -+bool Assembler::IsEmittedConstant(Instr instr) { -+ // Add GetLabelConst function? -+ uint32_t label_constant = instr & ~kImm16Mask; -+ return label_constant == 0; // Emitted label const in reg-exp engine. -+} -+ -+bool Assembler::IsJ(Instr instr) { -+ uint32_t opcode = (instr >> 26) << 26; -+ // Checks if the instruction is a jump. -+ return opcode == JIRL; -+} -+ -+bool Assembler::IsLu12i_w(Instr instr) { -+ uint32_t opcode = (instr >> 25) << 25; -+ return opcode == LU12I_W; -+} -+ -+bool Assembler::IsOri(Instr instr) { -+ uint32_t opcode = (instr >> 22) << 22; -+ return opcode == ORI; -+} -+ -+bool Assembler::IsLu32i_d(Instr instr) { -+ uint32_t opcode = (instr >> 25) << 25; -+ return opcode == LU32I_D; -+} -+ -+bool Assembler::IsLu52i_d(Instr instr) { -+ uint32_t opcode = (instr >> 22) << 22; -+ return opcode == LU52I_D; -+} -+ -+bool Assembler::IsMov(Instr instr, Register rd, Register rj) { -+ // Checks if the instruction is a OR with zero_reg argument (aka MOV). -+ Instr instr1 = -+ OR | zero_reg.code() << kRkShift | rj.code() << kRjShift | rd.code(); -+ return instr == instr1; -+} -+ -+bool Assembler::IsPcAddi(Instr instr, Register rd, int32_t si20) { -+ DCHECK(is_int20(si20)); -+ Instr instr1 = PCADDI | (si20 & 0xfffff) << kRjShift | rd.code(); -+ return instr == instr1; -+} -+ -+bool Assembler::IsNop(Instr instr, unsigned int type) { -+ // See Assembler::nop(type). -+ DCHECK_LT(type, 32); -+ // Traditional loongisa nop == andi(zero_reg, zero_reg, 0) -+ // When marking non-zero type, use andi(zero_reg, t7, type) -+ // to avoid use of ssnop and ehb special encodings of the -+ // andi instruction. -+ -+ Register nop_rt_reg = (type == 0) ? zero_reg : t7; -+ Instr instr1 = ANDI | ((type & kImm12Mask) << kRkShift) | -+ (nop_rt_reg.code() << kRjShift); -+ -+ return instr == instr1; -+} -+ -+static inline int32_t GetOffsetOfBranch(Instr instr, -+ Assembler::OffsetSize bits) { -+ int32_t result = 0; -+ if (bits == 16) { -+ result = (instr << 6) >> 16; -+ } else if (bits == 21) { -+ uint32_t low16 = instr << 6; -+ low16 = low16 >> 16; -+ low16 &= 0xffff; -+ int32_t hi5 = (instr << 27) >> 11; -+ result = hi5 | low16; -+ } else { -+ uint32_t low16 = instr << 6; -+ low16 = low16 >> 16; -+ low16 &= 0xffff; -+ int32_t hi10 = (instr << 22) >> 6; -+ result = hi10 | low16; -+ DCHECK_EQ(bits, 26); -+ } -+ return result << 2; -+} -+ -+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) { -+ if (Assembler::IsB(instr)) { -+ return Assembler::OffsetSize::kOffset26; -+ } else if (Assembler::IsBz(instr)) { -+ return Assembler::OffsetSize::kOffset21; -+ } else { -+ DCHECK(Assembler::IsBranch(instr)); -+ return Assembler::OffsetSize::kOffset16; -+ } -+} -+ -+static inline int32_t AddBranchOffset(int pos, Instr instr) { -+ Assembler::OffsetSize bits = OffsetSizeInBits(instr); -+ -+ int32_t imm = GetOffsetOfBranch(instr, bits); -+ -+ if (imm == kEndOfChain) { -+ // EndOfChain sentinel is returned directly, not relative to pc or pos. -+ return kEndOfChain; -+ } else { -+ // Handle the case that next branch position is 0. -+ // TODO: Define -4 as a constant -+ int32_t offset = pos + Assembler::kBranchPCOffset + imm; -+ return offset == 0 ? -4 : offset; -+ } -+} -+ -+int Assembler::target_at(int pos, bool is_internal) { -+ if (is_internal) { -+ int64_t* p = reinterpret_cast(buffer_start_ + pos); -+ int64_t address = *p; -+ if (address == kEndOfJumpChain) { -+ return kEndOfChain; -+ } else { -+ int64_t instr_address = reinterpret_cast(p); -+ DCHECK(instr_address - address < INT_MAX); -+ int delta = static_cast(instr_address - address); -+ DCHECK(pos > delta); -+ return pos - delta; -+ } -+ } -+ Instr instr = instr_at(pos); -+ -+ // TODO remove after remove label_at_put? -+ if ((instr & ~kImm16Mask) == 0) { -+ // Emitted label constant, not part of a branch. -+ if (instr == 0) { -+ return kEndOfChain; -+ } else { -+ int32_t imm18 = ((instr & static_cast(kImm16Mask)) << 16) >> 14; -+ return (imm18 + pos); -+ } -+ } -+ -+ // Check we have a branch or jump instruction. -+ DCHECK(IsBranch(instr) || IsJ(instr) || IsLu12i_w(instr) || -+ IsPcAddi(instr, t8, 16)); -+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming -+ // the compiler uses arithmetic shifts for signed integers. -+ if (IsBranch(instr)) { -+ return AddBranchOffset(pos, instr); -+ } else if (IsPcAddi(instr, t8, 16)) { -+ // see BranchLong(Label* L) and BranchAndLinkLong ?? -+ int32_t imm32; -+ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize); -+ Instr instr_ori = instr_at(pos + 2 * kInstrSize); -+ DCHECK(IsLu12i_w(instr_lu12i_w)); -+ // DCHECK(IsOri(instr_ori)); -+ imm32 = ((instr_lu12i_w >> 5) & 0xfffff) << 12; -+ imm32 |= ((instr_ori >> 10) & static_cast(kImm12Mask)); -+ if (imm32 == kEndOfJumpChain) { -+ // EndOfChain sentinel is returned directly, not relative to pc or pos. -+ return kEndOfChain; -+ } -+ return pos + imm32; -+ } else if (IsLu12i_w(instr)) { -+ abort(); -+ // TODO no used?? -+ /* Instr instr_lui = instr_at(pos + 0 * kInstrSize); -+ Instr instr_ori = instr_at(pos + 1 * kInstrSize); -+ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize); -+ DCHECK(IsOri(instr_ori)); -+ DCHECK(IsOri(instr_ori2)); -+ -+ // TODO(plind) create named constants for shift values. -+ int64_t imm = static_cast(instr_lui & kImm16Mask) << 48; -+ imm |= static_cast(instr_ori & kImm16Mask) << 32; -+ imm |= static_cast(instr_ori2 & kImm16Mask) << 16; -+ // Sign extend address; -+ imm >>= 16; -+ -+ if (imm == kEndOfJumpChain) { -+ // EndOfChain sentinel is returned directly, not relative to pc or -+ pos. return kEndOfChain; } else { uint64_t instr_address = -+ reinterpret_cast(buffer_start_ + pos); DCHECK(instr_address - -+ imm < INT_MAX); int delta = static_cast(instr_address - imm); -+ DCHECK(pos > delta); -+ return pos - delta; -+ }*/ -+ } else { -+ DCHECK(IsJ(instr)); -+ // TODO not used??? -+ abort(); -+ } -+} -+ -+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos, -+ Instr instr) { -+ int32_t bits = OffsetSizeInBits(instr); -+ int32_t imm = target_pos - pos; -+ DCHECK_EQ(imm & 3, 0); -+ imm >>= 2; -+ -+ DCHECK(is_intn(imm, bits)); -+ -+ if (bits == 16) { -+ const int32_t mask = ((1 << 16) - 1) << 10; -+ instr &= ~mask; -+ return instr | ((imm << 10) & mask); -+ } else if (bits == 21) { -+ const int32_t mask = 0x3fffc1f; -+ instr &= ~mask; -+ uint32_t low16 = (imm & kImm16Mask) << 10; -+ int32_t hi5 = (imm >> 16) & 0x1f; -+ return instr | low16 | hi5; -+ } else { -+ DCHECK_EQ(bits, 26); -+ const int32_t mask = 0x3ffffff; -+ instr &= ~mask; -+ uint32_t low16 = (imm & kImm16Mask) << 10; -+ int32_t hi10 = (imm >> 16) & 0x3ff; -+ return instr | low16 | hi10; -+ } -+} -+ -+void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { -+ if (is_internal) { -+ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; -+ *reinterpret_cast(buffer_start_ + pos) = imm; -+ return; -+ } -+ Instr instr = instr_at(pos); -+ if ((instr & ~kImm16Mask) == 0) { -+ DCHECK(target_pos == kEndOfChain || target_pos >= 0); -+ // Emitted label constant, not part of a branch. -+ // Make label relative to Code pointer of generated Code object. -+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); -+ return; -+ } -+ -+ if (IsBranch(instr)) { -+ instr = SetBranchOffset(pos, target_pos, instr); -+ instr_at_put(pos, instr); -+ } else if (0 == 1 /*IsLui(instr)*/) { -+ /* if (IsPcAddi(instr, t8, 16)) { -+ Instr instr_lui = instr_at(pos + 0 * kInstrSize); -+ Instr instr_ori = instr_at(pos + 2 * kInstrSize); -+ DCHECK(IsLui(instr_lui)); -+ DCHECK(IsOri(instr_ori)); -+ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); -+ DCHECK_EQ(imm & 3, 0); -+ if (is_int16(imm + Assembler::kLongBranchPCOffset - -+ Assembler::kBranchPCOffset)) { -+ // Optimize by converting to regular branch and link with 16-bit -+ // offset. -+ Instr instr_b = REGIMM | BGEZAL; // Branch and link. -+ instr_b = SetBranchOffset(pos, target_pos, instr_b); -+ // Correct ra register to point to one instruction after jalr from -+ // TurboAssembler::BranchAndLinkLong. -+ Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift -+ | kOptimizedBranchAndLinkLongReturnOffset; -+ -+ instr_at_put(pos, instr_b); -+ instr_at_put(pos + 1 * kInstrSize, instr_a); -+ } else { -+ instr_lui &= ~kImm16Mask; -+ instr_ori &= ~kImm16Mask; -+ -+ instr_at_put(pos + 0 * kInstrSize, -+ instr_lui | ((imm >> kLuiShift) & kImm16Mask)); -+ instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask)); -+ } -+ } else { -+ Instr instr_lui = instr_at(pos + 0 * kInstrSize); -+ Instr instr_ori = instr_at(pos + 1 * kInstrSize); -+ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize); -+ DCHECK(IsOri(instr_ori)); -+ DCHECK(IsOri(instr_ori2)); -+ -+ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; -+ DCHECK_EQ(imm & 3, 0); -+ -+ instr_lui &= ~kImm16Mask; -+ instr_ori &= ~kImm16Mask; -+ instr_ori2 &= ~kImm16Mask; -+ -+ instr_at_put(pos + 0 * kInstrSize, -+ instr_lui | ((imm >> 32) & kImm16Mask)); -+ instr_at_put(pos + 1 * kInstrSize, -+ instr_ori | ((imm >> 16) & kImm16Mask)); -+ instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask)); -+ }*/ -+ } else if (IsPcAddi(instr, t8, 16)) { -+ abort(); /* -+ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize); -+ Instr instr_ori = instr_at(pos + 2 * kInstrSize); -+ DCHECK(IsLu12i_w(instr_lu12i_w)); -+ //DCHECK(IsOri(instr_ori)); -+ -+ int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset); -+ -+ if (is_int21(imm_short)) { -+ // Optimize by converting to regular branch with 21-bit -+ // offset -+ Instr instr_b = B; -+ instr_b = SetBranchOffset(pos, target_pos, instr_b); -+ -+ instr_at_put(pos, instr_b); -+ } else { -+ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); -+ DCHECK_EQ(imm & 3, 0); -+ -+ instr_lu12i_w &= 0xfe00001fu; // opcode:7 | bit20 | rd:5 -+ instr_ori &= 0xffc003ffu; // opcode:10 | bit12 | rj:5 | rd:5 -+ -+ instr_at_put(pos + 1 * kInstrSize, -+ instr_lu12i_w | (((imm >> 12) & 0xfffff) << 5)); -+ instr_at_put(pos + 2 * kInstrSize, instr_ori | -+ ((imm & 0xfff) << 10)); -+ }*/ -+ } else if (IsJ(instr)) { -+ /* -+ int32_t imm28 = target_pos - pos; -+ DCHECK_EQ(imm28 & 3, 0); -+ -+ uint32_t imm26 = static_cast(imm28 >> 2); -+ DCHECK(is_uint26(imm26)); -+ // Place 26-bit signed offset with markings. -+ // When code is committed it will be resolved to j/jal. -+ int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark; -+ instr_at_put(pos, mark | (imm26 & kImm26Mask));*/ -+ abort(); -+ } else { -+ /* int32_t imm28 = target_pos - pos; -+ DCHECK_EQ(imm28 & 3, 0); -+ -+ uint32_t imm26 = static_cast(imm28 >> 2); -+ DCHECK(is_uint26(imm26)); -+ // Place raw 26-bit signed offset. -+ // When code is committed it will be resolved to j/jal. -+ instr &= ~kImm26Mask; -+ instr_at_put(pos, instr | (imm26 & kImm26Mask));*/ -+ abort(); -+ } -+} -+ -+void Assembler::print(const Label* L) { -+ if (L->is_unused()) { -+ PrintF("unused label\n"); -+ } else if (L->is_bound()) { -+ PrintF("bound label to %d\n", L->pos()); -+ } else if (L->is_linked()) { -+ Label l; -+ l.link_to(L->pos()); -+ PrintF("unbound label"); -+ while (l.is_linked()) { -+ PrintF("@ %d ", l.pos()); -+ Instr instr = instr_at(l.pos()); -+ if ((instr & ~kImm16Mask) == 0) { -+ PrintF("value\n"); -+ } else { -+ PrintF("%d\n", instr); -+ } -+ next(&l, is_internal_reference(&l)); -+ } -+ } else { -+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_); -+ } -+} -+ -+void Assembler::bind_to(Label* L, int pos) { -+ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. -+ int trampoline_pos = kInvalidSlotPos; -+ bool is_internal = false; -+ if (L->is_linked() && !trampoline_emitted_) { -+ unbound_labels_count_--; -+ if (!is_internal_reference(L)) { -+ next_buffer_check_ += kTrampolineSlotsSize; -+ } -+ } -+ -+ while (L->is_linked()) { -+ int fixup_pos = L->pos(); -+ int dist = pos - fixup_pos; -+ is_internal = is_internal_reference(L); -+ next(L, is_internal); // Call next before overwriting link with target at -+ // fixup_pos. -+ Instr instr = instr_at(fixup_pos); -+ if (is_internal) { -+ target_at_put(fixup_pos, pos, is_internal); -+ } else { -+ if (IsBranch(instr)) { -+ int branch_offset = BranchOffset(instr); -+ if (dist > branch_offset) { -+ if (trampoline_pos == kInvalidSlotPos) { -+ trampoline_pos = get_trampoline_entry(fixup_pos); -+ CHECK_NE(trampoline_pos, kInvalidSlotPos); -+ } -+ CHECK((trampoline_pos - fixup_pos) <= branch_offset); -+ target_at_put(fixup_pos, trampoline_pos, false); -+ fixup_pos = trampoline_pos; -+ } -+ target_at_put(fixup_pos, pos, false); -+ } else { -+ DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) || -+ IsPcAddi(instr, t8, 8)); -+ target_at_put(fixup_pos, pos, false); -+ } -+ } -+ } -+ L->bind_to(pos); -+ -+ // Keep track of the last bound label so we don't eliminate any instructions -+ // before a bound label. -+ if (pos > last_bound_pos_) last_bound_pos_ = pos; -+} -+ -+void Assembler::bind(Label* L) { -+ DCHECK(!L->is_bound()); // Label can only be bound once. -+ bind_to(L, pc_offset()); -+} -+ -+void Assembler::next(Label* L, bool is_internal) { -+ DCHECK(L->is_linked()); -+ int link = target_at(L->pos(), is_internal); -+ if (link == kEndOfChain) { -+ L->Unuse(); -+ } else if (link == -4) { -+ // Next position is pc_offset == 0 -+ L->link_to(0); -+ } else { -+ DCHECK_GE(link, 0); -+ L->link_to(link); -+ } -+} -+ -+bool Assembler::is_near_c(Label* L) { -+ DCHECK(L->is_bound()); -+ return pc_offset() - L->pos() < kMax16BranchOffset - 4 * kInstrSize; -+} -+ -+bool Assembler::is_near(Label* L, OffsetSize bits) { -+ DCHECK(L->is_bound()); -+ return ((pc_offset() - L->pos()) < -+ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize); -+} -+ -+bool Assembler::is_near_a(Label* L) { -+ DCHECK(L->is_bound()); -+ return pc_offset() - L->pos() <= kMax26BranchOffset - 4 * kInstrSize; -+} -+ -+int Assembler::BranchOffset(Instr instr) { -+ int bits = OffsetSize::kOffset16; -+ -+ uint32_t opcode = (instr >> 26) << 26; -+ switch (opcode) { -+ case B: -+ case BL: -+ bits = OffsetSize::kOffset26; -+ break; -+ case BNEZ: -+ case BEQZ: -+ case BCZ: -+ bits = OffsetSize::kOffset21; -+ break; -+ case BNE: -+ case BEQ: -+ case BLT: -+ case BGE: -+ case BLTU: -+ case BGEU: -+ case JIRL: -+ bits = OffsetSize::kOffset16; -+ break; -+ default: -+ break; -+ } -+ -+ return (1 << (bits + 2 - 1)) - 1; -+} -+ -+// We have to use a temporary register for things that can be relocated even -+// if they can be encoded in the LA's 16 bits of immediate-offset instruction -+// space. There is no guarantee that the relocated location can be similarly -+// encoded. -+bool Assembler::MustUseReg(RelocInfo::Mode rmode) { -+ return !RelocInfo::IsNone(rmode); -+} -+ -+void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK((BEQZ == opcode || BNEZ == opcode) && is_int21(si21) && rj.is_valid()); -+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | -+ (rj.code() << kRjShift) | ((si21 & 0x1fffff) >> 16); -+ emit(instr); -+} -+ -+void Assembler::GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK(BCZ == opcode && is_int21(si21)); -+ DCHECK(cj >= 0 && cj <= 7); -+ int32_t sc = (isEq ? cj : cj + 8); -+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | (sc << kRjShift) | -+ ((si21 & 0x1fffff) >> 16); -+ emit(instr); -+} -+ -+void Assembler::GenB(Opcode opcode, int32_t si26) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK((B == opcode || BL == opcode) && is_int26(si26)); -+ Instr instr = -+ opcode | ((si26 & kImm16Mask) << kRkShift) | ((si26 & kImm26Mask) >> 16); -+ emit(instr); -+} -+ -+void Assembler::GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK(is_int16(si16)); -+ Instr instr = opcode | ((si16 & kImm16Mask) << kRkShift) | -+ (rj.code() << kRjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, -+ FPURegister fj, CFRegister cd) { -+ DCHECK(opcode == FCMP_COND_S || opcode == FCMP_COND_D); -+ Instr instr = opcode | cond << kCondShift | (fk.code() << kFkShift) | -+ (fj.code() << kFjShift) | cd; -+ emit(instr); -+} -+ -+void Assembler::GenSel(Opcode opcode, CFRegister ca, FPURegister fk, -+ FPURegister fj, FPURegister rd) { -+ DCHECK((opcode == FSEL)); -+ Instr instr = opcode | ca << kCondShift | (fk.code() << kFkShift) | -+ (fj.code() << kFjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, Register rj, Register rd, -+ bool rjrd) { -+ DCHECK(rjrd); -+ Instr instr = 0; -+ instr = opcode | (rj.code() << kRjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, FPURegister fj, FPURegister fd) { -+ Instr instr = opcode | (fj.code() << kFjShift) | fd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, Register rj, FPURegister fd) { -+ DCHECK((opcode == MOVGR2FR_W) || (opcode == MOVGR2FR_D) || -+ (opcode == MOVGR2FRH_W)); -+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, FPURegister fj, Register rd) { -+ DCHECK((opcode == MOVFR2GR_S) || (opcode == MOVFR2GR_D) || -+ (opcode == MOVFRH2GR_S)); -+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, Register rj, FPUControlRegister fd) { -+ DCHECK((opcode == MOVGR2FCSR)); -+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, FPUControlRegister fj, Register rd) { -+ DCHECK((opcode == MOVFCSR2GR)); -+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, FPURegister fj, CFRegister cd) { -+ DCHECK((opcode == MOVFR2CF)); -+ Instr instr = opcode | (fj.code() << kFjShift) | cd; -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, CFRegister cj, FPURegister fd) { -+ DCHECK((opcode == MOVCF2FR)); -+ Instr instr = opcode | cj << kFjShift | fd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, Register rj, CFRegister cd) { -+ DCHECK((opcode == MOVGR2CF)); -+ Instr instr = opcode | (rj.code() << kRjShift) | cd; -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, CFRegister cj, Register rd) { -+ DCHECK((opcode == MOVCF2GR)); -+ Instr instr = opcode | cj << kFjShift | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj, -+ Register rd) { -+ Instr instr = -+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, FPURegister fk, FPURegister fj, -+ FPURegister fd) { -+ Instr instr = -+ opcode | (fk.code() << kFkShift) | (fj.code() << kFjShift) | fd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, FPURegister fa, FPURegister fk, -+ FPURegister fj, FPURegister fd) { -+ Instr instr = opcode | (fa.code() << kFaShift) | (fk.code() << kFkShift) | -+ (fj.code() << kFjShift) | fd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj, -+ FPURegister fd) { -+ Instr instr = -+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | fd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj, -+ Register rd) { -+ DCHECK(is_uint3(bit3)); -+ Instr instr = opcode | (bit3 & 0x7) << kSaShift | (rk.code() << kRkShift) | -+ (rj.code() << kRjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj, -+ Register rd) { -+ DCHECK(is_uint6(bit6m) && is_uint6(bit6l)); -+ Instr instr = opcode | (bit6m & 0x3f) << 16 | (bit6l & 0x3f) << kRkShift | -+ (rj.code() << kRjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenImm(Opcode opcode, int32_t bit20, Register rd) { -+ // DCHECK(is_uint20(bit20) || is_int20(bit20)); -+ Instr instr = opcode | (bit20 & 0xfffff) << kRjShift | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenImm(Opcode opcode, int32_t bit15) { -+ DCHECK(is_uint15(bit15)); -+ Instr instr = opcode | (bit15 & 0x7fff); -+ emit(instr); -+} -+ -+void Assembler::GenImm(Opcode opcode, int32_t value, Register rj, Register rd, -+ int32_t value_bits) { -+ DCHECK(value_bits == 6 || value_bits == 12 || value_bits == 14 || -+ value_bits == 16); -+ uint32_t imm = value & 0x3f; -+ if (value_bits == 12) { -+ imm = value & kImm12Mask; -+ } else if (value_bits == 14) { -+ imm = value & 0x3fff; -+ } else if (value_bits == 16) { -+ imm = value & kImm16Mask; -+ } -+ Instr instr = opcode | imm << kRkShift | (rj.code() << kRjShift) | rd.code(); -+ emit(instr); -+} -+ -+void Assembler::GenImm(Opcode opcode, int32_t bit12, Register rj, -+ FPURegister fd) { -+ DCHECK(is_int12(bit12)); -+ Instr instr = opcode | ((bit12 & kImm12Mask) << kRkShift) | -+ (rj.code() << kRjShift) | fd.code(); -+ emit(instr); -+} -+ -+// Returns the next free trampoline entry. -+int32_t Assembler::get_trampoline_entry(int32_t pos) { -+ int32_t trampoline_entry = kInvalidSlotPos; -+ if (!internal_trampoline_exception_) { -+ if (trampoline_.start() > pos) { -+ trampoline_entry = trampoline_.take_slot(); -+ } -+ -+ if (kInvalidSlotPos == trampoline_entry) { -+ internal_trampoline_exception_ = true; -+ } -+ } -+ return trampoline_entry; -+} -+ -+uint64_t Assembler::jump_address(Label* L) { -+ int64_t target_pos; -+ if (L->is_bound()) { -+ target_pos = L->pos(); -+ } else { -+ if (L->is_linked()) { -+ target_pos = L->pos(); // L's link. -+ L->link_to(pc_offset()); -+ } else { -+ L->link_to(pc_offset()); -+ return kEndOfJumpChain; -+ } -+ } -+ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; -+ DCHECK_EQ(imm & 3, 0); -+ -+ return imm; -+} -+ -+uint64_t Assembler::branch_long_offset(Label* L) { -+ int64_t target_pos; -+ -+ if (L->is_bound()) { -+ target_pos = L->pos(); -+ } else { -+ if (L->is_linked()) { -+ target_pos = L->pos(); // L's link. -+ L->link_to(pc_offset()); -+ } else { -+ L->link_to(pc_offset()); -+ return kEndOfJumpChain; -+ } -+ } -+ int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset); -+ DCHECK_EQ(offset & 3, 0); -+ -+ return static_cast(offset); -+} -+ -+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { -+ int32_t target_pos; -+ -+ if (L->is_bound()) { -+ target_pos = L->pos(); -+ } else { -+ if (L->is_linked()) { -+ target_pos = L->pos(); -+ L->link_to(pc_offset()); -+ } else { -+ L->link_to(pc_offset()); -+ if (!trampoline_emitted_) { -+ unbound_labels_count_++; -+ next_buffer_check_ -= kTrampolineSlotsSize; -+ } -+ return kEndOfChain; -+ } -+ } -+ -+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); -+ DCHECK(is_intn(offset, bits + 2)); -+ DCHECK_EQ(offset & 3, 0); -+ -+ return offset; -+} -+ -+void Assembler::label_at_put(Label* L, int at_offset) { -+ int target_pos; -+ if (L->is_bound()) { -+ target_pos = L->pos(); -+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); -+ } else { -+ if (L->is_linked()) { -+ target_pos = L->pos(); // L's link. -+ int32_t imm18 = target_pos - at_offset; -+ DCHECK_EQ(imm18 & 3, 0); -+ int32_t imm16 = imm18 >> 2; -+ DCHECK(is_int16(imm16)); -+ instr_at_put(at_offset, (imm16 & kImm16Mask)); -+ } else { -+ target_pos = kEndOfChain; -+ instr_at_put(at_offset, 0); -+ if (!trampoline_emitted_) { -+ unbound_labels_count_++; -+ next_buffer_check_ -= kTrampolineSlotsSize; -+ } -+ } -+ L->link_to(at_offset); -+ } -+ // TODO PushBackTrack() -+} -+ -+//------- Branch and jump instructions -------- -+ -+void Assembler::b(int32_t offset) { GenB(B, offset); } -+ -+void Assembler::bl(int32_t offset) { GenB(BL, offset); } -+ -+void Assembler::beq(Register rj, Register rd, int32_t offset) { -+ GenBJ(BEQ, rj, rd, offset); -+} -+ -+void Assembler::bne(Register rj, Register rd, int32_t offset) { -+ GenBJ(BNE, rj, rd, offset); -+} -+ -+void Assembler::blt(Register rj, Register rd, int32_t offset) { -+ GenBJ(BLT, rj, rd, offset); -+} -+ -+void Assembler::bge(Register rj, Register rd, int32_t offset) { -+ GenBJ(BGE, rj, rd, offset); -+} -+ -+void Assembler::bltu(Register rj, Register rd, int32_t offset) { -+ GenBJ(BLTU, rj, rd, offset); -+} -+ -+void Assembler::bgeu(Register rj, Register rd, int32_t offset) { -+ GenBJ(BGEU, rj, rd, offset); -+} -+ -+void Assembler::beqz(Register rj, int32_t offset) { GenB(BEQZ, rj, offset); } -+void Assembler::bnez(Register rj, int32_t offset) { GenB(BNEZ, rj, offset); } -+ -+void Assembler::jirl(Register rd, Register rj, int32_t offset) { -+ GenBJ(JIRL, rj, rd, offset); -+} -+ -+void Assembler::bceqz(CFRegister cj, int32_t si21) { -+ GenB(BCZ, cj, si21, true); -+} -+ -+void Assembler::bcnez(CFRegister cj, int32_t si21) { -+ GenB(BCZ, cj, si21, false); -+} -+ -+// -------Data-processing-instructions--------- -+ -+// Arithmetic. -+void Assembler::add_w(Register rd, Register rj, Register rk) { -+ GenRegister(ADD_W, rk, rj, rd); -+} -+ -+void Assembler::add_d(Register rd, Register rj, Register rk) { -+ GenRegister(ADD_D, rk, rj, rd); -+} -+ -+void Assembler::sub_w(Register rd, Register rj, Register rk) { -+ GenRegister(SUB_W, rk, rj, rd); -+} -+ -+void Assembler::sub_d(Register rd, Register rj, Register rk) { -+ GenRegister(SUB_D, rk, rj, rd); -+} -+ -+void Assembler::addi_w(Register rd, Register rj, int32_t si12) { -+ GenImm(ADDI_W, si12, rj, rd, 12); -+} -+ -+void Assembler::addi_d(Register rd, Register rj, int32_t si12) { -+ GenImm(ADDI_D, si12, rj, rd, 12); -+} -+ -+void Assembler::addu16i_d(Register rd, Register rj, int32_t si16) { -+ GenImm(ADDU16I_D, si16, rj, rd, 16); -+} -+ -+void Assembler::alsl_w(Register rd, Register rj, Register rk, int32_t sa2) { -+ DCHECK(is_uint2(sa2 - 1)); -+ GenImm(ALSL_W, sa2 - 1, rk, rj, rd); -+} -+ -+void Assembler::alsl_wu(Register rd, Register rj, Register rk, int32_t sa2) { -+ DCHECK(is_uint2(sa2 - 1)); -+ GenImm(ALSL_WU, sa2 + 3, rk, rj, rd); -+} -+ -+void Assembler::alsl_d(Register rd, Register rj, Register rk, int32_t sa2) { -+ DCHECK(is_uint2(sa2 - 1)); -+ GenImm(ALSL_D, sa2 - 1, rk, rj, rd); -+} -+ -+void Assembler::lu12i_w(Register rd, int32_t si20) { -+ GenImm(LU12I_W, si20, rd); -+} -+ -+void Assembler::lu32i_d(Register rd, int32_t si20) { -+ GenImm(LU32I_D, si20, rd); -+} -+ -+void Assembler::lu52i_d(Register rd, Register rj, int32_t si12) { -+ GenImm(LU52I_D, si12, rj, rd, 12); -+} -+ -+void Assembler::slt(Register rd, Register rj, Register rk) { -+ GenRegister(SLT, rk, rj, rd); -+} -+ -+void Assembler::sltu(Register rd, Register rj, Register rk) { -+ GenRegister(SLTU, rk, rj, rd); -+} -+ -+void Assembler::slti(Register rd, Register rj, int32_t si12) { -+ GenImm(SLTI, si12, rj, rd, 12); -+} -+ -+void Assembler::sltui(Register rd, Register rj, int32_t si12) { -+ GenImm(SLTUI, si12, rj, rd, 12); -+} -+ -+void Assembler::pcaddi(Register rd, int32_t si20) { GenImm(PCADDI, si20, rd); } -+ -+void Assembler::pcaddu12i(Register rd, int32_t si20) { -+ GenImm(PCADDU12I, si20, rd); -+} -+ -+void Assembler::pcaddu18i(Register rd, int32_t si20) { -+ GenImm(PCADDU18I, si20, rd); -+} -+ -+void Assembler::pcalau12i(Register rd, int32_t si20) { -+ GenImm(PCALAU12I, si20, rd); -+} -+ -+void Assembler::and_(Register rd, Register rj, Register rk) { -+ GenRegister(AND, rk, rj, rd); -+} -+ -+void Assembler::or_(Register rd, Register rj, Register rk) { -+ GenRegister(OR, rk, rj, rd); -+} -+ -+void Assembler::xor_(Register rd, Register rj, Register rk) { -+ GenRegister(XOR, rk, rj, rd); -+} -+ -+void Assembler::nor(Register rd, Register rj, Register rk) { -+ GenRegister(NOR, rk, rj, rd); -+} -+ -+void Assembler::andn(Register rd, Register rj, Register rk) { -+ GenRegister(ANDN, rk, rj, rd); -+} -+ -+void Assembler::orn(Register rd, Register rj, Register rk) { -+ GenRegister(ORN, rk, rj, rd); -+} -+ -+void Assembler::andi(Register rd, Register rj, int32_t ui12) { -+ GenImm(ANDI, ui12, rj, rd, 12); -+} -+ -+void Assembler::ori(Register rd, Register rj, int32_t ui12) { -+ GenImm(ORI, ui12, rj, rd, 12); -+} -+ -+void Assembler::xori(Register rd, Register rj, int32_t ui12) { -+ GenImm(XORI, ui12, rj, rd, 12); -+} -+ -+void Assembler::mul_w(Register rd, Register rj, Register rk) { -+ GenRegister(MUL_W, rk, rj, rd); -+} -+ -+void Assembler::mulh_w(Register rd, Register rj, Register rk) { -+ GenRegister(MULH_W, rk, rj, rd); -+} -+ -+void Assembler::mulh_wu(Register rd, Register rj, Register rk) { -+ GenRegister(MULH_WU, rk, rj, rd); -+} -+ -+void Assembler::mul_d(Register rd, Register rj, Register rk) { -+ GenRegister(MUL_D, rk, rj, rd); -+} -+ -+void Assembler::mulh_d(Register rd, Register rj, Register rk) { -+ GenRegister(MULH_D, rk, rj, rd); -+} -+ -+void Assembler::mulh_du(Register rd, Register rj, Register rk) { -+ GenRegister(MULH_DU, rk, rj, rd); -+} -+ -+void Assembler::mulw_d_w(Register rd, Register rj, Register rk) { -+ GenRegister(MULW_D_W, rk, rj, rd); -+} -+ -+void Assembler::mulw_d_wu(Register rd, Register rj, Register rk) { -+ GenRegister(MULW_D_WU, rk, rj, rd); -+} -+ -+void Assembler::div_w(Register rd, Register rj, Register rk) { -+ GenRegister(DIV_W, rk, rj, rd); -+} -+ -+void Assembler::mod_w(Register rd, Register rj, Register rk) { -+ GenRegister(MOD_W, rk, rj, rd); -+} -+ -+void Assembler::div_wu(Register rd, Register rj, Register rk) { -+ GenRegister(DIV_WU, rk, rj, rd); -+} -+ -+void Assembler::mod_wu(Register rd, Register rj, Register rk) { -+ GenRegister(MOD_WU, rk, rj, rd); -+} -+ -+void Assembler::div_d(Register rd, Register rj, Register rk) { -+ GenRegister(DIV_D, rk, rj, rd); -+} -+ -+void Assembler::mod_d(Register rd, Register rj, Register rk) { -+ GenRegister(MOD_D, rk, rj, rd); -+} -+ -+void Assembler::div_du(Register rd, Register rj, Register rk) { -+ GenRegister(DIV_DU, rk, rj, rd); -+} -+ -+void Assembler::mod_du(Register rd, Register rj, Register rk) { -+ GenRegister(MOD_DU, rk, rj, rd); -+} -+ -+// Shifts. -+void Assembler::sll_w(Register rd, Register rj, Register rk) { -+ GenRegister(SLL_W, rk, rj, rd); -+} -+ -+void Assembler::srl_w(Register rd, Register rj, Register rk) { -+ GenRegister(SRL_W, rk, rj, rd); -+} -+ -+void Assembler::sra_w(Register rd, Register rj, Register rk) { -+ GenRegister(SRA_W, rk, rj, rd); -+} -+ -+void Assembler::rotr_w(Register rd, Register rj, Register rk) { -+ GenRegister(ROTR_W, rk, rj, rd); -+} -+ -+void Assembler::slli_w(Register rd, Register rj, int32_t ui5) { -+ DCHECK(is_uint5(ui5)); -+ GenImm(SLLI_W, ui5 + 0x20, rj, rd, 6); -+} -+ -+void Assembler::srli_w(Register rd, Register rj, int32_t ui5) { -+ DCHECK(is_uint5(ui5)); -+ GenImm(SRLI_W, ui5 + 0x20, rj, rd, 6); -+} -+ -+void Assembler::srai_w(Register rd, Register rj, int32_t ui5) { -+ DCHECK(is_uint5(ui5)); -+ GenImm(SRAI_W, ui5 + 0x20, rj, rd, 6); -+} -+ -+void Assembler::rotri_w(Register rd, Register rj, int32_t ui5) { -+ DCHECK(is_uint5(ui5)); -+ GenImm(ROTRI_W, ui5 + 0x20, rj, rd, 6); -+} -+ -+void Assembler::sll_d(Register rd, Register rj, Register rk) { -+ GenRegister(SLL_D, rk, rj, rd); -+} -+ -+void Assembler::srl_d(Register rd, Register rj, Register rk) { -+ GenRegister(SRL_D, rk, rj, rd); -+} -+ -+void Assembler::sra_d(Register rd, Register rj, Register rk) { -+ GenRegister(SRA_D, rk, rj, rd); -+} -+ -+void Assembler::rotr_d(Register rd, Register rj, Register rk) { -+ GenRegister(ROTR_D, rk, rj, rd); -+} -+ -+void Assembler::slli_d(Register rd, Register rj, int32_t ui6) { -+ GenImm(SLLI_D, ui6, rj, rd, 6); -+} -+ -+void Assembler::srli_d(Register rd, Register rj, int32_t ui6) { -+ GenImm(SRLI_D, ui6, rj, rd, 6); -+} -+ -+void Assembler::srai_d(Register rd, Register rj, int32_t ui6) { -+ GenImm(SRAI_D, ui6, rj, rd, 6); -+} -+ -+void Assembler::rotri_d(Register rd, Register rj, int32_t ui6) { -+ GenImm(ROTRI_D, ui6, rj, rd, 6); -+} -+ -+// Bit twiddling. -+void Assembler::ext_w_b(Register rd, Register rj) { -+ GenRegister(EXT_W_B, rj, rd); -+} -+ -+void Assembler::ext_w_h(Register rd, Register rj) { -+ GenRegister(EXT_W_H, rj, rd); -+} -+ -+void Assembler::clo_w(Register rd, Register rj) { GenRegister(CLO_W, rj, rd); } -+ -+void Assembler::clz_w(Register rd, Register rj) { GenRegister(CLZ_W, rj, rd); } -+ -+void Assembler::cto_w(Register rd, Register rj) { GenRegister(CTO_W, rj, rd); } -+ -+void Assembler::ctz_w(Register rd, Register rj) { GenRegister(CTZ_W, rj, rd); } -+ -+void Assembler::clo_d(Register rd, Register rj) { GenRegister(CLO_D, rj, rd); } -+ -+void Assembler::clz_d(Register rd, Register rj) { GenRegister(CLZ_D, rj, rd); } -+ -+void Assembler::cto_d(Register rd, Register rj) { GenRegister(CTO_D, rj, rd); } -+ -+void Assembler::ctz_d(Register rd, Register rj) { GenRegister(CTZ_D, rj, rd); } -+ -+void Assembler::bytepick_w(Register rd, Register rj, Register rk, int32_t sa2) { -+ DCHECK(is_uint2(sa2)); -+ GenImm(BYTEPICK_W, sa2, rk, rj, rd); -+} -+ -+void Assembler::bytepick_d(Register rd, Register rj, Register rk, int32_t sa3) { -+ GenImm(BYTEPICK_D, sa3, rk, rj, rd); -+} -+ -+void Assembler::revb_2h(Register rd, Register rj) { -+ GenRegister(REVB_2H, rj, rd); -+} -+ -+void Assembler::revb_4h(Register rd, Register rj) { -+ GenRegister(REVB_4H, rj, rd); -+} -+ -+void Assembler::revb_2w(Register rd, Register rj) { -+ GenRegister(REVB_2W, rj, rd); -+} -+ -+void Assembler::revb_d(Register rd, Register rj) { -+ GenRegister(REVB_D, rj, rd); -+} -+ -+void Assembler::revh_2w(Register rd, Register rj) { -+ GenRegister(REVH_2W, rj, rd); -+} -+ -+void Assembler::revh_d(Register rd, Register rj) { -+ GenRegister(REVH_D, rj, rd); -+} -+ -+void Assembler::bitrev_4b(Register rd, Register rj) { -+ GenRegister(BITREV_4B, rj, rd); -+} -+ -+void Assembler::bitrev_8b(Register rd, Register rj) { -+ GenRegister(BITREV_8B, rj, rd); -+} -+ -+void Assembler::bitrev_w(Register rd, Register rj) { -+ GenRegister(BITREV_W, rj, rd); -+} -+ -+void Assembler::bitrev_d(Register rd, Register rj) { -+ GenRegister(BITREV_D, rj, rd); -+} -+ -+void Assembler::bstrins_w(Register rd, Register rj, int32_t msbw, -+ int32_t lsbw) { -+ DCHECK(is_uint5(msbw) && is_uint5(lsbw)); -+ GenImm(BSTR_W, msbw + 0x20, lsbw, rj, rd); -+} -+ -+void Assembler::bstrins_d(Register rd, Register rj, int32_t msbd, -+ int32_t lsbd) { -+ GenImm(BSTRINS_D, msbd, lsbd, rj, rd); -+} -+ -+void Assembler::bstrpick_w(Register rd, Register rj, int32_t msbw, -+ int32_t lsbw) { -+ DCHECK(is_uint5(msbw) && is_uint5(lsbw)); -+ GenImm(BSTR_W, msbw + 0x20, lsbw + 0x20, rj, rd); -+} -+ -+void Assembler::bstrpick_d(Register rd, Register rj, int32_t msbd, -+ int32_t lsbd) { -+ GenImm(BSTRPICK_D, msbd, lsbd, rj, rd); -+} -+ -+void Assembler::maskeqz(Register rd, Register rj, Register rk) { -+ GenRegister(MASKEQZ, rk, rj, rd); -+} -+ -+void Assembler::masknez(Register rd, Register rj, Register rk) { -+ GenRegister(MASKNEZ, rk, rj, rd); -+} -+ -+// Memory-instructions -+void Assembler::ld_b(Register rd, Register rj, int32_t si12) { -+ GenImm(LD_B, si12, rj, rd, 12); -+} -+ -+void Assembler::ld_h(Register rd, Register rj, int32_t si12) { -+ GenImm(LD_H, si12, rj, rd, 12); -+} -+ -+void Assembler::ld_w(Register rd, Register rj, int32_t si12) { -+ GenImm(LD_W, si12, rj, rd, 12); -+} -+ -+void Assembler::ld_d(Register rd, Register rj, int32_t si12) { -+ GenImm(LD_D, si12, rj, rd, 12); -+} -+ -+void Assembler::ld_bu(Register rd, Register rj, int32_t si12) { -+ GenImm(LD_BU, si12, rj, rd, 12); -+} -+ -+void Assembler::ld_hu(Register rd, Register rj, int32_t si12) { -+ GenImm(LD_HU, si12, rj, rd, 12); -+} -+ -+void Assembler::ld_wu(Register rd, Register rj, int32_t si12) { -+ GenImm(LD_WU, si12, rj, rd, 12); -+} -+ -+void Assembler::st_b(Register rd, Register rj, int32_t si12) { -+ GenImm(ST_B, si12, rj, rd, 12); -+} -+ -+void Assembler::st_h(Register rd, Register rj, int32_t si12) { -+ GenImm(ST_H, si12, rj, rd, 12); -+} -+ -+void Assembler::st_w(Register rd, Register rj, int32_t si12) { -+ GenImm(ST_W, si12, rj, rd, 12); -+} -+ -+void Assembler::st_d(Register rd, Register rj, int32_t si12) { -+ GenImm(ST_D, si12, rj, rd, 12); -+} -+ -+void Assembler::ldx_b(Register rd, Register rj, Register rk) { -+ GenRegister(LDX_B, rk, rj, rd); -+} -+ -+void Assembler::ldx_h(Register rd, Register rj, Register rk) { -+ GenRegister(LDX_H, rk, rj, rd); -+} -+ -+void Assembler::ldx_w(Register rd, Register rj, Register rk) { -+ GenRegister(LDX_W, rk, rj, rd); -+} -+ -+void Assembler::ldx_d(Register rd, Register rj, Register rk) { -+ GenRegister(LDX_D, rk, rj, rd); -+} -+ -+void Assembler::ldx_bu(Register rd, Register rj, Register rk) { -+ GenRegister(LDX_BU, rk, rj, rd); -+} -+ -+void Assembler::ldx_hu(Register rd, Register rj, Register rk) { -+ GenRegister(LDX_HU, rk, rj, rd); -+} -+ -+void Assembler::ldx_wu(Register rd, Register rj, Register rk) { -+ GenRegister(LDX_WU, rk, rj, rd); -+} -+ -+void Assembler::stx_b(Register rd, Register rj, Register rk) { -+ GenRegister(STX_B, rk, rj, rd); -+} -+ -+void Assembler::stx_h(Register rd, Register rj, Register rk) { -+ GenRegister(STX_H, rk, rj, rd); -+} -+ -+void Assembler::stx_w(Register rd, Register rj, Register rk) { -+ GenRegister(STX_W, rk, rj, rd); -+} -+ -+void Assembler::stx_d(Register rd, Register rj, Register rk) { -+ GenRegister(STX_D, rk, rj, rd); -+} -+ -+void Assembler::ldptr_w(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(LDPTR_W, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::ldptr_d(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(LDPTR_D, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::stptr_w(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(STPTR_W, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::stptr_d(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(STPTR_D, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::amswap_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMSWAP_W, rk, rj, rd); -+} -+ -+void Assembler::amswap_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMSWAP_D, rk, rj, rd); -+} -+ -+void Assembler::amadd_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMADD_W, rk, rj, rd); -+} -+ -+void Assembler::amadd_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMADD_D, rk, rj, rd); -+} -+ -+void Assembler::amand_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMAND_W, rk, rj, rd); -+} -+ -+void Assembler::amand_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMAND_D, rk, rj, rd); -+} -+ -+void Assembler::amor_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMOR_W, rk, rj, rd); -+} -+ -+void Assembler::amor_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMOR_D, rk, rj, rd); -+} -+ -+void Assembler::amxor_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMXOR_W, rk, rj, rd); -+} -+ -+void Assembler::amxor_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMXOR_D, rk, rj, rd); -+} -+ -+void Assembler::ammax_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_W, rk, rj, rd); -+} -+ -+void Assembler::ammax_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_D, rk, rj, rd); -+} -+ -+void Assembler::ammin_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_W, rk, rj, rd); -+} -+ -+void Assembler::ammin_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_D, rk, rj, rd); -+} -+ -+void Assembler::ammax_wu(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_WU, rk, rj, rd); -+} -+ -+void Assembler::ammax_du(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_DU, rk, rj, rd); -+} -+ -+void Assembler::ammin_wu(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_WU, rk, rj, rd); -+} -+ -+void Assembler::ammin_du(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_DU, rk, rj, rd); -+} -+ -+void Assembler::amswap_db_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMSWAP_DB_W, rk, rj, rd); -+} -+ -+void Assembler::amswap_db_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMSWAP_DB_D, rk, rj, rd); -+} -+ -+void Assembler::amadd_db_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMADD_DB_W, rk, rj, rd); -+} -+ -+void Assembler::amadd_db_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMADD_DB_D, rk, rj, rd); -+} -+ -+void Assembler::amand_db_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMAND_DB_W, rk, rj, rd); -+} -+ -+void Assembler::amand_db_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMAND_DB_D, rk, rj, rd); -+} -+ -+void Assembler::amor_db_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMOR_DB_W, rk, rj, rd); -+} -+ -+void Assembler::amor_db_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMOR_DB_D, rk, rj, rd); -+} -+ -+void Assembler::amxor_db_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMXOR_DB_W, rk, rj, rd); -+} -+ -+void Assembler::amxor_db_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMXOR_DB_D, rk, rj, rd); -+} -+ -+void Assembler::ammax_db_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_DB_W, rk, rj, rd); -+} -+ -+void Assembler::ammax_db_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_DB_D, rk, rj, rd); -+} -+ -+void Assembler::ammin_db_w(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_DB_W, rk, rj, rd); -+} -+ -+void Assembler::ammin_db_d(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_DB_D, rk, rj, rd); -+} -+ -+void Assembler::ammax_db_wu(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_DB_WU, rk, rj, rd); -+} -+ -+void Assembler::ammax_db_du(Register rd, Register rk, Register rj) { -+ GenRegister(AMMAX_DB_DU, rk, rj, rd); -+} -+ -+void Assembler::ammin_db_wu(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_DB_WU, rk, rj, rd); -+} -+ -+void Assembler::ammin_db_du(Register rd, Register rk, Register rj) { -+ GenRegister(AMMIN_DB_DU, rk, rj, rd); -+} -+ -+void Assembler::ll_w(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(LL_W, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::ll_d(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(LL_D, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::sc_w(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(SC_W, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::sc_d(Register rd, Register rj, int32_t si14) { -+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); -+ GenImm(SC_D, si14 >> 2, rj, rd, 14); -+} -+ -+void Assembler::dbar(int32_t hint) { GenImm(DBAR, hint); } -+ -+void Assembler::ibar(int32_t hint) { GenImm(IBAR, hint); } -+ -+// Break / Trap instructions. -+void Assembler::break_(uint32_t code, bool break_as_stop) { -+ DCHECK( -+ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) || -+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode))); -+ GenImm(BREAK, code); -+} -+ -+void Assembler::stop(uint32_t code) { -+ DCHECK_GT(code, kMaxWatchpointCode); -+ DCHECK_LE(code, kMaxStopCode); -+#if defined(V8_HOST_ARCH_LOONG64) -+ break_(0x4321); -+#else // V8_HOST_ARCH_LOONG64 -+ break_(code, true); -+#endif -+} -+ -+void Assembler::fadd_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FADD_S, fk, fj, fd); -+} -+ -+void Assembler::fadd_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FADD_D, fk, fj, fd); -+} -+ -+void Assembler::fsub_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FSUB_S, fk, fj, fd); -+} -+ -+void Assembler::fsub_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FSUB_D, fk, fj, fd); -+} -+ -+void Assembler::fmul_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMUL_S, fk, fj, fd); -+} -+ -+void Assembler::fmul_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMUL_D, fk, fj, fd); -+} -+ -+void Assembler::fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FDIV_S, fk, fj, fd); -+} -+ -+void Assembler::fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FDIV_D, fk, fj, fd); -+} -+ -+void Assembler::fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FMADD_S, fa, fk, fj, fd); -+} -+ -+void Assembler::fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FMADD_D, fa, fk, fj, fd); -+} -+ -+void Assembler::fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FMSUB_S, fa, fk, fj, fd); -+} -+ -+void Assembler::fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FMSUB_D, fa, fk, fj, fd); -+} -+ -+void Assembler::fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FNMADD_S, fa, fk, fj, fd); -+} -+ -+void Assembler::fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FNMADD_D, fa, fk, fj, fd); -+} -+ -+void Assembler::fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FNMSUB_S, fa, fk, fj, fd); -+} -+ -+void Assembler::fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, -+ FPURegister fa) { -+ GenRegister(FNMSUB_D, fa, fk, fj, fd); -+} -+ -+void Assembler::fmax_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMAX_S, fk, fj, fd); -+} -+ -+void Assembler::fmax_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMAX_D, fk, fj, fd); -+} -+ -+void Assembler::fmin_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMIN_S, fk, fj, fd); -+} -+ -+void Assembler::fmin_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMIN_D, fk, fj, fd); -+} -+ -+void Assembler::fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMAXA_S, fk, fj, fd); -+} -+ -+void Assembler::fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMAXA_D, fk, fj, fd); -+} -+ -+void Assembler::fmina_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMINA_S, fk, fj, fd); -+} -+ -+void Assembler::fmina_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FMINA_D, fk, fj, fd); -+} -+ -+void Assembler::fabs_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FABS_S, fj, fd); -+} -+ -+void Assembler::fabs_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FABS_D, fj, fd); -+} -+ -+void Assembler::fneg_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FNEG_S, fj, fd); -+} -+ -+void Assembler::fneg_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FNEG_D, fj, fd); -+} -+ -+void Assembler::fsqrt_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FSQRT_S, fj, fd); -+} -+ -+void Assembler::fsqrt_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FSQRT_D, fj, fd); -+} -+ -+void Assembler::frecip_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FRECIP_S, fj, fd); -+} -+ -+void Assembler::frecip_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FRECIP_D, fj, fd); -+} -+ -+void Assembler::frsqrt_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FRSQRT_S, fj, fd); -+} -+ -+void Assembler::frsqrt_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FRSQRT_D, fj, fd); -+} -+ -+void Assembler::fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FSCALEB_S, fk, fj, fd); -+} -+ -+void Assembler::fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FSCALEB_D, fk, fj, fd); -+} -+ -+void Assembler::flogb_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FLOGB_S, fj, fd); -+} -+ -+void Assembler::flogb_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FLOGB_D, fj, fd); -+} -+ -+void Assembler::fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FCOPYSIGN_S, fk, fj, fd); -+} -+ -+void Assembler::fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk) { -+ GenRegister(FCOPYSIGN_D, fk, fj, fd); -+} -+ -+void Assembler::fclass_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FCLASS_S, fj, fd); -+} -+ -+void Assembler::fclass_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FCLASS_D, fj, fd); -+} -+ -+void Assembler::fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, -+ CFRegister cd) { -+ GenCmp(FCMP_COND_S, cc, fk, fj, cd); -+} -+ -+void Assembler::fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, -+ CFRegister cd) { -+ GenCmp(FCMP_COND_D, cc, fk, fj, cd); -+} -+ -+void Assembler::fcvt_s_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FCVT_S_D, fj, fd); -+} -+ -+void Assembler::fcvt_d_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FCVT_D_S, fj, fd); -+} -+ -+void Assembler::ffint_s_w(FPURegister fd, FPURegister fj) { -+ GenRegister(FFINT_S_W, fj, fd); -+} -+ -+void Assembler::ffint_s_l(FPURegister fd, FPURegister fj) { -+ GenRegister(FFINT_S_L, fj, fd); -+} -+ -+void Assembler::ffint_d_w(FPURegister fd, FPURegister fj) { -+ GenRegister(FFINT_D_W, fj, fd); -+} -+ -+void Assembler::ffint_d_l(FPURegister fd, FPURegister fj) { -+ GenRegister(FFINT_D_L, fj, fd); -+} -+ -+void Assembler::ftint_w_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINT_W_S, fj, fd); -+} -+ -+void Assembler::ftint_w_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINT_W_D, fj, fd); -+} -+ -+void Assembler::ftint_l_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINT_L_S, fj, fd); -+} -+ -+void Assembler::ftint_l_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINT_L_D, fj, fd); -+} -+ -+void Assembler::ftintrm_w_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRM_W_S, fj, fd); -+} -+ -+void Assembler::ftintrm_w_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRM_W_D, fj, fd); -+} -+ -+void Assembler::ftintrm_l_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRM_L_S, fj, fd); -+} -+ -+void Assembler::ftintrm_l_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRM_L_D, fj, fd); -+} -+ -+void Assembler::ftintrp_w_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRP_W_S, fj, fd); -+} -+ -+void Assembler::ftintrp_w_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRP_W_D, fj, fd); -+} -+ -+void Assembler::ftintrp_l_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRP_L_S, fj, fd); -+} -+ -+void Assembler::ftintrp_l_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRP_L_D, fj, fd); -+} -+ -+void Assembler::ftintrz_w_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRZ_W_S, fj, fd); -+} -+ -+void Assembler::ftintrz_w_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRZ_W_D, fj, fd); -+} -+ -+void Assembler::ftintrz_l_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRZ_L_S, fj, fd); -+} -+ -+void Assembler::ftintrz_l_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRZ_L_D, fj, fd); -+} -+ -+void Assembler::ftintrne_w_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRNE_W_S, fj, fd); -+} -+ -+void Assembler::ftintrne_w_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRNE_W_D, fj, fd); -+} -+ -+void Assembler::ftintrne_l_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRNE_L_S, fj, fd); -+} -+ -+void Assembler::ftintrne_l_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FTINTRNE_L_D, fj, fd); -+} -+ -+void Assembler::frint_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FRINT_S, fj, fd); -+} -+ -+void Assembler::frint_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FRINT_D, fj, fd); -+} -+ -+void Assembler::fmov_s(FPURegister fd, FPURegister fj) { -+ GenRegister(FMOV_S, fj, fd); -+} -+ -+void Assembler::fmov_d(FPURegister fd, FPURegister fj) { -+ GenRegister(FMOV_D, fj, fd); -+} -+ -+void Assembler::fsel(CFRegister ca, FPURegister fd, FPURegister fj, -+ FPURegister fk) { -+ GenSel(FSEL, ca, fk, fj, fd); -+} -+ -+void Assembler::movgr2fr_w(FPURegister fd, Register rj) { -+ GenRegister(MOVGR2FR_W, rj, fd); -+} -+ -+void Assembler::movgr2fr_d(FPURegister fd, Register rj) { -+ GenRegister(MOVGR2FR_D, rj, fd); -+} -+ -+void Assembler::movgr2frh_w(FPURegister fd, Register rj) { -+ GenRegister(MOVGR2FRH_W, rj, fd); -+} -+ -+void Assembler::movfr2gr_s(Register rd, FPURegister fj) { -+ GenRegister(MOVFR2GR_S, fj, rd); -+} -+ -+void Assembler::movfr2gr_d(Register rd, FPURegister fj) { -+ GenRegister(MOVFR2GR_D, fj, rd); -+} -+ -+void Assembler::movfrh2gr_s(Register rd, FPURegister fj) { -+ GenRegister(MOVFRH2GR_S, fj, rd); -+} -+ -+void Assembler::movgr2fcsr(Register rj) { GenRegister(MOVGR2FCSR, rj, FCSR); } -+ -+void Assembler::movfcsr2gr(Register rd) { GenRegister(MOVFCSR2GR, FCSR, rd); } -+ -+void Assembler::movfr2cf(CFRegister cd, FPURegister fj) { -+ GenRegister(MOVFR2CF, fj, cd); -+} -+ -+void Assembler::movcf2fr(FPURegister fd, CFRegister cj) { -+ GenRegister(MOVCF2FR, cj, fd); -+} -+ -+void Assembler::movgr2cf(CFRegister cd, Register rj) { -+ GenRegister(MOVGR2CF, rj, cd); -+} -+ -+void Assembler::movcf2gr(Register rd, CFRegister cj) { -+ GenRegister(MOVCF2GR, cj, rd); -+} -+ -+void Assembler::fld_s(FPURegister fd, Register rj, int32_t si12) { -+ GenImm(FLD_S, si12, rj, fd); -+} -+ -+void Assembler::fld_d(FPURegister fd, Register rj, int32_t si12) { -+ GenImm(FLD_D, si12, rj, fd); -+} -+ -+void Assembler::fst_s(FPURegister fd, Register rj, int32_t si12) { -+ GenImm(FST_S, si12, rj, fd); -+} -+ -+void Assembler::fst_d(FPURegister fd, Register rj, int32_t si12) { -+ GenImm(FST_D, si12, rj, fd); -+} -+ -+void Assembler::fldx_s(FPURegister fd, Register rj, Register rk) { -+ GenRegister(FLDX_S, rk, rj, fd); -+} -+ -+void Assembler::fldx_d(FPURegister fd, Register rj, Register rk) { -+ GenRegister(FLDX_D, rk, rj, fd); -+} -+ -+void Assembler::fstx_s(FPURegister fd, Register rj, Register rk) { -+ GenRegister(FSTX_S, rk, rj, fd); -+} -+ -+void Assembler::fstx_d(FPURegister fd, Register rj, Register rk) { -+ GenRegister(FSTX_D, rk, rj, fd); -+} -+ -+// ------------Memory-instructions------------- -+ -+/*void Assembler::AdjustBaseAndOffset(MemOperand* src, -+ OffsetAccessType access_type, -+ int second_access_add_to_offset) { -+ // TODO should be optimized. -+ // This method is used to adjust the base register and offset pair -+ // for a load/store when the offset doesn't fit into int12_t. -+ -+ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; -+ bool two_accesses = static_cast(access_type) || !doubleword_aligned; -+ DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. -+ -+ // is_int12 must be passed a signed value, hence the static cast below. -+ if (is_int12(src->offset()) && -+ (!two_accesses || is_int12(static_cast( -+ src->offset() + second_access_add_to_offset)))) { -+ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified -+ // value) fits into int16_t. -+ return; -+ } -+ -+ DCHECK(src->rm() != -+ at); // Must not overwrite the register 'base' while loading 'offset'. -+ -+#ifdef DEBUG -+ // Remember the "(mis)alignment" of 'offset', it will be checked at the end. -+ uint32_t misalignment = src->offset() & (kDoubleSize - 1); -+#endif -+ -+ // Do not load the whole 32-bit 'offset' if it can be represented as -+ // a sum of two 16-bit signed offsets. This can save an instruction or two. -+ // To simplify matters, only do this for a symmetric range of offsets from -+ // about -64KB to about +64KB, allowing further addition of 4 when accessing -+ // 64-bit variables with two 32-bit accesses. -+ constexpr int32_t kMinOffsetForSimpleAdjustment = -+ 0x7FF8; // Max int16_t that's a multiple of 8. -+ constexpr int32_t kMaxOffsetForSimpleAdjustment = -+ 2 * kMinOffsetForSimpleAdjustment; -+ -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { -+ daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment); -+ src->offset_ -= kMinOffsetForSimpleAdjustment; -+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && -+ src->offset() < 0) { -+ daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); -+ src->offset_ += kMinOffsetForSimpleAdjustment; -+ } else if (kArchVariant == kMips64r6) { -+ // On r6 take advantage of the daui instruction, e.g.: -+ // daui at, base, offset_high -+ // [dahi at, 1] // When `offset` is close to +2GB. -+ // lw reg_lo, offset_low(at) -+ // [lw reg_hi, (offset_low+4)(at)] // If misaligned 64-bit load. -+ // or when offset_low+4 overflows int16_t: -+ // daui at, base, offset_high -+ // daddiu at, at, 8 -+ // lw reg_lo, (offset_low-8)(at) -+ // lw reg_hi, (offset_low-4)(at) -+ int16_t offset_low = static_cast(src->offset()); -+ int32_t offset_low32 = offset_low; -+ int16_t offset_high = static_cast(src->offset() >> 16); -+ bool increment_hi16 = offset_low < 0; -+ bool overflow_hi16 = false; -+ -+ if (increment_hi16) { -+ offset_high++; -+ overflow_hi16 = (offset_high == -32768); -+ } -+ daui(scratch, src->rm(), static_cast(offset_high)); -+ -+ if (overflow_hi16) { -+ dahi(scratch, 1); -+ } -+ -+ if (two_accesses && !is_int16(static_cast( -+ offset_low32 + second_access_add_to_offset))) { -+ // Avoid overflow in the 16-bit offset of the load/store instruction when -+ // adding 4. -+ daddiu(scratch, scratch, kDoubleSize); -+ offset_low32 -= kDoubleSize; -+ } -+ -+ src->offset_ = offset_low32; -+ } else { -+ // Do not load the whole 32-bit 'offset' if it can be represented as -+ // a sum of three 16-bit signed offsets. This can save an instruction. -+ // To simplify matters, only do this for a symmetric range of offsets from -+ // about -96KB to about +96KB, allowing further addition of 4 when accessing -+ // 64-bit variables with two 32-bit accesses. -+ constexpr int32_t kMinOffsetForMediumAdjustment = -+ 2 * kMinOffsetForSimpleAdjustment; -+ constexpr int32_t kMaxOffsetForMediumAdjustment = -+ 3 * kMinOffsetForSimpleAdjustment; -+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { -+ daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); -+ daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); -+ src->offset_ -= kMinOffsetForMediumAdjustment; -+ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && -+ src->offset() < 0) { -+ daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); -+ daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); -+ src->offset_ += kMinOffsetForMediumAdjustment; -+ } else { -+ // Now that all shorter options have been exhausted, load the full 32-bit -+ // offset. -+ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); -+ lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); -+ ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. -+ daddu(scratch, scratch, src->rm()); -+ src->offset_ -= loaded_offset; -+ } -+ } -+ src->rm_ = scratch; -+ -+ DCHECK(is_int16(src->offset())); -+ if (two_accesses) { -+ DCHECK(is_int16( -+ static_cast(src->offset() + second_access_add_to_offset))); -+ } -+ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); -+}*/ -+ -+void Assembler::AdjustBaseAndOffset(MemOperand* src) { -+ // is_int12 must be passed a signed value, hence the static cast below. -+ if ((!src->hasIndexReg() && is_int12(src->offset())) || src->hasIndexReg()) { -+ return; -+ } -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ if (is_uint12(static_cast(src->offset()))) { -+ ori(scratch, zero_reg, src->offset() & kImm12Mask); -+ } else { -+ lu12i_w(scratch, src->offset() >> 12 & 0xfffff); -+ if (src->offset() & kImm12Mask) { -+ ori(scratch, scratch, src->offset() & kImm12Mask); -+ } -+ } -+ src->index_ = scratch; -+ src->offset_ = 0; -+ // TODO can be optimized, for example 2 * [int12_min, int12_max] -+ // addi_d scratch base, offset/2 only on instr -+ // base = scratch -+ // offset = offset - offset / 2 -+} -+ -+int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, -+ intptr_t pc_delta) { -+ if (RelocInfo::IsInternalReference(rmode)) { -+ int64_t* p = reinterpret_cast(pc); -+ if (*p == kEndOfJumpChain) { -+ return 0; // Number of instructions patched. -+ } -+ *p += pc_delta; -+ return 2; // Number of instructions patched. -+ } -+ abort(); -+ /* Instr instr = instr_at(pc); -+ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); -+ if (IsLui(instr)) { -+ Instr instr_lui = instr_at(pc + 0 * kInstrSize); -+ Instr instr_ori = instr_at(pc + 1 * kInstrSize); -+ Instr instr_ori2 = instr_at(pc + 3 * kInstrSize); -+ DCHECK(IsOri(instr_ori)); -+ DCHECK(IsOri(instr_ori2)); -+ // TODO(plind): symbolic names for the shifts. -+ int64_t imm = (instr_lui & static_cast(kImm16Mask)) << 48; -+ imm |= (instr_ori & static_cast(kImm16Mask)) << 32; -+ imm |= (instr_ori2 & static_cast(kImm16Mask)) << 16; -+ // Sign extend address. -+ imm >>= 16; -+ -+ if (imm == kEndOfJumpChain) { -+ return 0; // Number of instructions patched. -+ } -+ imm += pc_delta; -+ DCHECK_EQ(imm & 3, 0); -+ -+ instr_lui &= ~kImm16Mask; -+ instr_ori &= ~kImm16Mask; -+ instr_ori2 &= ~kImm16Mask; -+ -+ instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask)); -+ instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask)); -+ instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask)); -+ return 4; // Number of instructions patched. -+ } else if (IsJ(instr) || IsJal(instr)) { -+ // Regular j/jal relocation. -+ uint32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; -+ imm28 += pc_delta; -+ imm28 &= kImm28Mask; -+ instr &= ~kImm26Mask; -+ DCHECK_EQ(imm28 & 3, 0); -+ uint32_t imm26 = static_cast(imm28 >> 2); -+ instr_at_put(pc, instr | (imm26 & kImm26Mask)); -+ return 1; // Number of instructions patched. -+ } else { -+ DCHECK(((instr & kJumpRawMask) == kJRawMark) || -+ ((instr & kJumpRawMask) == kJalRawMark)); -+ // Unbox raw offset and emit j/jal. -+ int32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; -+ // Sign extend 28-bit offset to 32-bit. -+ imm28 = (imm28 << 4) >> 4; -+ uint64_t target = -+ static_cast(imm28) + reinterpret_cast(pc); -+ target &= kImm28Mask; -+ DCHECK_EQ(imm28 & 3, 0); -+ uint32_t imm26 = static_cast(target >> 2); -+ // Check markings whether to emit j or jal. -+ uint32_t unbox = (instr & kJRawMark) ? J : JAL; -+ instr_at_put(pc, unbox | (imm26 & kImm26Mask)); -+ return 1; // Number of instructions patched. -+ }*/ -+} -+ -+void Assembler::GrowBuffer() { -+ // Compute new buffer size. -+ int old_size = buffer_->size(); -+ int new_size = std::min(2 * old_size, old_size + 1 * MB); -+ -+ // Some internal data structures overflow for very large buffers, -+ // they must ensure that kMaximalBufferSize is not too large. -+ if (new_size > kMaximalBufferSize) { -+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer"); -+ } -+ -+ // Set up new buffer. -+ std::unique_ptr new_buffer = buffer_->Grow(new_size); -+ DCHECK_EQ(new_size, new_buffer->size()); -+ byte* new_start = new_buffer->start(); -+ -+ // Copy the data. -+ intptr_t pc_delta = new_start - buffer_start_; -+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size); -+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); -+ MemMove(new_start, buffer_start_, pc_offset()); -+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), -+ reloc_size); -+ -+ // Switch buffers. -+ buffer_ = std::move(new_buffer); -+ buffer_start_ = new_start; -+ pc_ += pc_delta; -+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, -+ reloc_info_writer.last_pc() + pc_delta); -+ -+ // Relocate runtime entries. -+ Vector instructions{buffer_start_, pc_offset()}; -+ Vector reloc_info{reloc_info_writer.pos(), reloc_size}; -+ for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) { -+ RelocInfo::Mode rmode = it.rinfo()->rmode(); -+ if (rmode == RelocInfo::INTERNAL_REFERENCE) { -+ RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta); -+ } -+ } -+ DCHECK(!overflow()); -+} -+ -+void Assembler::db(uint8_t data) { -+ if (!is_buffer_growth_blocked()) { -+ CheckBuffer(); -+ } -+ EmitHelper(data); -+} -+ -+void Assembler::dd(uint32_t data) { -+ if (!is_buffer_growth_blocked()) { -+ CheckBuffer(); -+ } -+ EmitHelper(data); -+} -+ -+void Assembler::dq(uint64_t data) { -+ if (!is_buffer_growth_blocked()) { -+ CheckBuffer(); -+ } -+ EmitHelper(data); -+} -+ -+void Assembler::dd(Label* label) { -+ if (!is_buffer_growth_blocked()) { -+ CheckBuffer(); -+ } -+ uint64_t data; -+ if (label->is_bound()) { -+ data = reinterpret_cast(buffer_start_ + label->pos()); -+ } else { -+ data = jump_address(label); -+ unbound_labels_count_++; -+ internal_reference_positions_.insert(label->pos()); -+ } -+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); -+ EmitHelper(data); -+} -+ -+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { -+ if (!ShouldRecordRelocInfo(rmode)) return; -+ // We do not try to reuse pool constants. -+ RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); -+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here. -+ reloc_info_writer.Write(&rinfo); -+} -+ -+void Assembler::BlockTrampolinePoolFor(int instructions) { -+ CheckTrampolinePoolQuick(instructions); -+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); -+} -+ -+void Assembler::CheckTrampolinePool() { -+ // Some small sequences of instructions must not be broken up by the -+ // insertion of a trampoline pool; such sequences are protected by setting -+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, -+ // which are both checked here. Also, recursive calls to CheckTrampolinePool -+ // are blocked by trampoline_pool_blocked_nesting_. -+ if ((trampoline_pool_blocked_nesting_ > 0) || -+ (pc_offset() < no_trampoline_pool_before_)) { -+ // Emission is currently blocked; make sure we try again as soon as -+ // possible. -+ if (trampoline_pool_blocked_nesting_ > 0) { -+ next_buffer_check_ = pc_offset() + kInstrSize; -+ } else { -+ next_buffer_check_ = no_trampoline_pool_before_; -+ } -+ return; -+ } -+ -+ DCHECK(!trampoline_emitted_); -+ DCHECK_GE(unbound_labels_count_, 0); -+ if (unbound_labels_count_ > 0) { -+ // First we emit jump (2 instructions), then we emit trampoline pool. -+ { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Label after_pool; -+ b(&after_pool); -+ nop(); // TODO remove this -+ -+ int pool_start = pc_offset(); -+ for (int i = 0; i < unbound_labels_count_; i++) { -+ { -+ b(&after_pool); -+ nop(); // TODO remove this -+ } -+ } -+ nop(); -+ bind(&after_pool); -+ trampoline_ = Trampoline(pool_start, unbound_labels_count_); -+ -+ trampoline_emitted_ = true; -+ // As we are only going to emit trampoline once, we need to prevent any -+ // further emission. -+ next_buffer_check_ = kMaxInt; -+ } -+ } else { -+ // Number of branches to unbound label at this point is zero, so we can -+ // move next buffer check to maximum. -+ next_buffer_check_ = -+ pc_offset() + kMax16BranchOffset - kTrampolineSlotsSize * 16; -+ } -+ return; -+} -+ -+Address Assembler::target_address_at(Address pc) { -+ Instr instr0 = instr_at(pc); -+ Instr instr1 = instr_at(pc + 1 * kInstrSize); -+ Instr instr2 = instr_at(pc + 2 * kInstrSize); -+ -+ // Interpret 4 instructions for address generated by li: See listing in -+ // Assembler::set_target_address_at() just below. -+ DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2)))); -+ -+ // Assemble the 48 bit value. -+ uint64_t hi20 = ((uint64_t)(instr2 >> 5) & 0xfffff) << 32; -+ uint64_t mid20 = ((uint64_t)(instr0 >> 5) & 0xfffff) << 12; -+ uint64_t low12 = ((uint64_t)(instr1 >> 10) & 0xfff); -+ int64_t addr = static_cast(hi20 | mid20 | low12); -+ -+ // Sign extend to get canonical address. -+ addr = (addr << 16) >> 16; -+ // printf("add : 0x%lx 0x%lx 0x%lx 0x%lx\n", addr, hi20, mid20, low12); -+ return static_cast
(addr); -+} -+ -+// On loong64, a target address is stored in a 3-instruction sequence: -+// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask); -+// 1: ori(rd, rd, j.imm64_ & kImm12Mask); -+// 2: lu32i_d(rd, (j.imm64_ >> 32) & kImm20Mask); -+// -+// Patching the address must replace all the lui & ori instructions, -+// and flush the i-cache. -+// -+// There is an optimization below, which emits a nop when the address -+// fits in just 16 bits. This is unlikely to help, and should be benchmarked, -+// and possibly removed. -+void Assembler::set_target_value_at(Address pc, uint64_t target, -+ ICacheFlushMode icache_flush_mode) { -+ // There is an optimization where only 3 instructions are used to load address -+ // in code on LOONG64 because only 48-bits of address is effectively used. -+ // It relies on fact the upper [63:48] bits are not used for virtual address -+ // translation and they have to be set according to value of bit 47 in order -+ // get canonical address. -+#ifdef DEBUG -+ // Check we have the result from a li macro-instruction. -+ Instr instr0 = instr_at(pc); -+ Instr instr1 = instr_at(pc + kInstrSize); -+ Instr instr2 = instr_at(pc + kInstrSize * 2); -+ DCHECK(IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2)); -+#endif -+ -+ Instr instr = instr_at(pc); -+ uint32_t rd_code = GetRd(instr); -+ uint32_t* p = reinterpret_cast(pc); -+ -+ // Must use 3 instructions to insure patchable code. -+ // lu12i_w rd, middle-20. -+ // ori rd, rd, low-12. -+ // li32i_d rd, high-20. -+ *p = LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code; -+ *(p + 1) = -+ ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code; -+ *(p + 2) = LU32I_D | (((target >> 32) & 0xfffff) << kRjShift) | rd_code; -+ -+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) { -+ FlushInstructionCache(pc, 3 * kInstrSize); -+ } -+} -+ -+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) -+ : available_(assembler->GetScratchRegisterList()), -+ old_available_(*available_) {} -+ -+UseScratchRegisterScope::~UseScratchRegisterScope() { -+ *available_ = old_available_; -+} -+ -+Register UseScratchRegisterScope::Acquire() { -+ DCHECK_NOT_NULL(available_); -+ DCHECK_NE(*available_, 0); -+ int index = static_cast(base::bits::CountTrailingZeros32(*available_)); -+ *available_ &= ~(1UL << index); -+ -+ return Register::from_code(index); -+} -+ -+bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; } -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h -new file mode 100644 -index 00000000..b3804242 ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/assembler-loong64.h -@@ -0,0 +1,1118 @@ -+// Copyright (c) 1994-2006 Sun Microsystems Inc. -+// All Rights Reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// - Redistributions of source code must retain the above copyright notice, -+// this list of conditions and the following disclaimer. -+// -+// - Redistribution in binary form must reproduce the above copyright -+// notice, this list of conditions and the following disclaimer in the -+// documentation and/or other materials provided with the distribution. -+// -+// - Neither the name of Sun Microsystems or the names of contributors may -+// be used to endorse or promote products derived from this software without -+// specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// The original source code covered by the above license above has been -+// modified significantly by Google Inc. -+// Copyright 2012 the V8 project authors. All rights reserved. -+ -+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ -+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ -+ -+#include -+#include -+#include -+ -+#include "src/codegen/assembler.h" -+#include "src/codegen/external-reference.h" -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/codegen/loong64/register-loong64.h" -+#include "src/codegen/label.h" -+#include "src/objects/contexts.h" -+#include "src/objects/smi.h" -+ -+namespace v8 { -+namespace internal { -+ -+class SafepointTableBuilder; -+ -+// ----------------------------------------------------------------------------- -+// Machine instruction Operands. -+constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize; -+constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; -+// Class Operand represents a shifter operand in data processing instructions. -+class Operand { -+ public: -+ // Immediate. -+ V8_INLINE explicit Operand(int64_t immediate, -+ RelocInfo::Mode rmode = RelocInfo::NONE) -+ : rm_(no_reg), rmode_(rmode) { -+ value_.immediate = immediate; -+ } -+ V8_INLINE explicit Operand(const ExternalReference& f) -+ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { -+ value_.immediate = static_cast(f.address()); -+ } -+ V8_INLINE explicit Operand(const char* s); -+ explicit Operand(Handle handle); -+ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) { -+ value_.immediate = static_cast(value.ptr()); -+ } -+ -+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber. -+ static Operand EmbeddedStringConstant(const StringConstantBase* str); -+ -+ // Register. -+ V8_INLINE explicit Operand(Register rm) : rm_(rm) {} -+ -+ // Return true if this is a register operand. -+ V8_INLINE bool is_reg() const; -+ -+ inline int64_t immediate() const; -+ -+ bool IsImmediate() const { return !rm_.is_valid(); } -+ -+ HeapObjectRequest heap_object_request() const { -+ DCHECK(IsHeapObjectRequest()); -+ return value_.heap_object_request; -+ } -+ -+ bool IsHeapObjectRequest() const { -+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); -+ DCHECK_IMPLIES(is_heap_object_request_, -+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || -+ rmode_ == RelocInfo::CODE_TARGET); -+ return is_heap_object_request_; -+ } -+ -+ Register rm() const { return rm_; } -+ -+ RelocInfo::Mode rmode() const { return rmode_; } -+ -+ private: -+ Register rm_; -+ union Value { -+ Value() {} -+ HeapObjectRequest heap_object_request; // if is_heap_object_request_ -+ int64_t immediate; // otherwise -+ } value_; // valid if rm_ == no_reg -+ bool is_heap_object_request_ = false; -+ RelocInfo::Mode rmode_; -+ -+ friend class Assembler; -+ friend class MacroAssembler; -+}; -+ -+// Class MemOperand represents a memory operand in load and store instructions. -+// 1: base_reg + off_imm( si12 | si14<<2) -+// 2: base_reg + offset_reg -+class V8_EXPORT_PRIVATE MemOperand { -+ public: -+ explicit MemOperand(Register rj, int32_t offset = 0); -+ explicit MemOperand(Register rj, Register offset = no_reg); -+ Register base() const { return base_; } -+ Register index() const { return index_; } -+ int32_t offset() const { return offset_; } -+ -+ bool hasIndexReg() const { return index_ != no_reg; } -+ -+ private: -+ Register base_; // base -+ Register index_; // index -+ int32_t offset_; // offset -+ -+ friend class Assembler; -+}; -+ -+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { -+ public: -+ // Create an assembler. Instructions and relocation information are emitted -+ // into a buffer, with the instructions starting from the beginning and the -+ // relocation information starting from the end of the buffer. See CodeDesc -+ // for a detailed comment on the layout (globals.h). -+ // -+ // If the provided buffer is nullptr, the assembler allocates and grows its -+ // own buffer. Otherwise it takes ownership of the provided buffer. -+ explicit Assembler(const AssemblerOptions&, -+ std::unique_ptr = {}); -+ -+ virtual ~Assembler() {} -+ -+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc. -+ static constexpr int kNoHandlerTable = 0; -+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; -+ void GetCode(Isolate* isolate, CodeDesc* desc, -+ SafepointTableBuilder* safepoint_table_builder, -+ int handler_table_offset); -+ -+ // Convenience wrapper for code without safepoint or handler tables. -+ void GetCode(Isolate* isolate, CodeDesc* desc) { -+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); -+ } -+ -+ // Unused on this architecture. -+ void MaybeEmitOutOfLineConstantPool() {} -+ -+ // Label operations & relative jumps (PPUM Appendix D). -+ // -+ // Takes a branch opcode (cc) and a label (L) and generates -+ // either a backward branch or a forward branch and links it -+ // to the label fixup chain. Usage: -+ // -+ // Label L; // unbound label -+ // j(cc, &L); // forward branch to unbound label -+ // bind(&L); // bind label to the current pc -+ // j(cc, &L); // backward branch to bound label -+ // bind(&L); // illegal: a label may be bound only once -+ // -+ // Note: The same Label can be used for forward and backward branches -+ // but it may be bound only once. -+ void bind(Label* L); // Binds an unbound label L to current code position. -+ -+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; -+ -+ // Determines if Label is bound and near enough so that branch instruction -+ // can be used to reach it, instead of jump instruction. -+ // c means conditinal branch, a means always branch. -+ bool is_near_c(Label* L); -+ bool is_near(Label* L, OffsetSize bits); -+ bool is_near_a(Label* L); -+ -+ int BranchOffset(Instr instr); -+ -+ // Returns the branch offset to the given label from the current code -+ // position. Links the label to the current position if it is still unbound. -+ // Manages the jump elimination optimization if the second parameter is true. -+ int32_t branch_offset_helper(Label* L, OffsetSize bits); -+ inline int32_t branch_offset(Label* L) { -+ return branch_offset_helper(L, OffsetSize::kOffset16); -+ } -+ inline int32_t branch_offset21(Label* L) { -+ return branch_offset_helper(L, OffsetSize::kOffset21); -+ } -+ inline int32_t branch_offset26(Label* L) { -+ return branch_offset_helper(L, OffsetSize::kOffset26); -+ } -+ inline int32_t shifted_branch_offset(Label* L) { -+ return branch_offset(L) >> 2; -+ } -+ inline int32_t shifted_branch_offset21(Label* L) { -+ return branch_offset21(L) >> 2; -+ } -+ inline int32_t shifted_branch_offset26(Label* L) { -+ return branch_offset26(L) >> 2; -+ } -+ uint64_t jump_address(Label* L); -+ uint64_t jump_offset(Label* L); -+ uint64_t branch_long_offset(Label* L); -+ -+ // Puts a labels target address at the given position. -+ // The high 8 bits are set to zero. -+ void label_at_put(Label* L, int at_offset); -+ -+ // Read/Modify the code target address in the branch/call instruction at pc. -+ // The isolate argument is unused (and may be nullptr) when skipping flushing. -+ static Address target_address_at(Address pc); -+ V8_INLINE static void set_target_address_at( -+ Address pc, Address target, -+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { -+ set_target_value_at(pc, target, icache_flush_mode); -+ } -+ // On MIPS there is no Constant Pool so we skip that parameter. -+ V8_INLINE static Address target_address_at(Address pc, -+ Address constant_pool) { -+ return target_address_at(pc); -+ } -+ V8_INLINE static void set_target_address_at( -+ Address pc, Address constant_pool, Address target, -+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { -+ set_target_address_at(pc, target, icache_flush_mode); -+ } -+ -+ static void set_target_value_at( -+ Address pc, uint64_t target, -+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); -+ -+ static void JumpLabelToJumpRegister(Address pc); -+ -+ // This sets the branch destination (which gets loaded at the call address). -+ // This is for calls and branches within generated code. The serializer -+ // has already deserialized the lui/ori instructions etc. -+ inline static void deserialization_set_special_target_at( -+ Address instruction_payload, Code code, Address target); -+ -+ // Get the size of the special target encoded at 'instruction_payload'. -+ inline static int deserialization_special_target_size( -+ Address instruction_payload); -+ -+ // This sets the internal reference at the pc. -+ inline static void deserialization_set_target_internal_reference_at( -+ Address pc, Address target, -+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); -+ -+ // Here we are patching the address in the LUI/ORI instruction pair. -+ // These values are used in the serialization process and must be zero for -+ // LA platform, as Code, Embedded Object or External-reference pointers -+ // are split across two consecutive instructions and don't exist separately -+ // in the code, so the serializer should not step forwards in memory after -+ // a target is resolved and written. -+ static constexpr int kSpecialTargetSize = 0; -+ -+ // Number of consecutive instructions used to store 32bit/64bit constant. -+ // This constant was used in RelocInfo::target_address_address() function -+ // to tell serializer address of the instruction that follows -+ // LUI/ORI instruction pair. -+ // TODO check this -+ static constexpr int kInstructionsFor64BitConstant = 4; -+ -+ // Difference between address of current opcode and target address offset. -+ static constexpr int kBranchPCOffset = 0; -+ -+ // Difference between address of current opcode and target address offset, -+ // when we are generatinga sequence of instructions for long relative PC -+ // branches -+ static constexpr int kLongBranchPCOffset = 0; // 3 * kInstrSize; -+ -+ // Max offset for instructions with 16-bit offset field -+ static constexpr int kMax16BranchOffset = (1 << (18 - 1)) - 1; -+ -+ // Max offset for instructions with 21-bit offset field -+ static constexpr int kMax21BranchOffset = (1 << (23 - 1)) - 1; -+ -+ // Max offset for compact branch instructions with 26-bit offset field -+ static constexpr int kMax26BranchOffset = (1 << (28 - 1)) - 1; -+ -+ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize; -+ -+ RegList* GetScratchRegisterList() { return &scratch_register_list_; } -+ -+ // --------------------------------------------------------------------------- -+ // Code generation. -+ -+ // Insert the smallest number of nop instructions -+ // possible to align the pc offset to a multiple -+ // of m. m must be a power of 2 (>= 4). -+ void Align(int m); -+ // Insert the smallest number of zero bytes possible to align the pc offset -+ // to a mulitple of m. m must be a power of 2 (>= 2). -+ void DataAlign(int m); -+ // Aligns code to something that's optimal for a jump target for the platform. -+ void CodeTargetAlign(); -+ -+ // Different nop operations are used by the code generator to detect certain -+ // states of the generated code. -+ enum NopMarkerTypes { -+ NON_MARKING_NOP = 0, -+ DEBUG_BREAK_NOP, -+ // IC markers. -+ PROPERTY_ACCESS_INLINED, -+ PROPERTY_ACCESS_INLINED_CONTEXT, -+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, -+ // Helper values. -+ LAST_CODE_MARKER, -+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, -+ }; -+ -+ // Type == 0 is the default non-marking nop. For loongisa this is a -+ // andi(zero_reg, zero_reg, 0). We use rt_reg == r1 for non-zero -+ // marking, to avoid conflict with ssnop and ehb instructions. -+ void nop(unsigned int type = 0) { -+ DCHECK_LT(type, 32); -+ Register nop_rt_reg = (type == 0) ? zero_reg : t7; -+ andi(zero_reg, nop_rt_reg, type); -+ } -+ -+ // --------Branch-and-jump-instructions---------- -+ // We don't use likely variant of instructions. -+ void b(int32_t offset); -+ inline void b(Label* L) { b(shifted_branch_offset26(L)); } -+ void bl(int32_t offset); -+ inline void bl(Label* L) { bl(shifted_branch_offset26(L)); } -+ -+ void beq(Register rj, Register rd, int32_t offset); -+ inline void beq(Register rj, Register rd, Label* L) { -+ beq(rj, rd, shifted_branch_offset(L)); -+ } -+ void bne(Register rj, Register rd, int32_t offset); -+ inline void bne(Register rj, Register rd, Label* L) { -+ bne(rj, rd, shifted_branch_offset(L)); -+ } -+ void blt(Register rj, Register rd, int32_t offset); -+ inline void blt(Register rj, Register rd, Label* L) { -+ blt(rj, rd, shifted_branch_offset(L)); -+ } -+ void bge(Register rj, Register rd, int32_t offset); -+ inline void bge(Register rj, Register rd, Label* L) { -+ bge(rj, rd, shifted_branch_offset(L)); -+ } -+ void bltu(Register rj, Register rd, int32_t offset); -+ inline void bltu(Register rj, Register rd, Label* L) { -+ bltu(rj, rd, shifted_branch_offset(L)); -+ } -+ void bgeu(Register rj, Register rd, int32_t offset); -+ inline void bgeu(Register rj, Register rd, Label* L) { -+ bgeu(rj, rd, shifted_branch_offset(L)); -+ } -+ void beqz(Register rj, int32_t offset); -+ inline void beqz(Register rj, Label* L) { -+ beqz(rj, shifted_branch_offset21(L)); -+ } -+ void bnez(Register rj, int32_t offset); -+ inline void bnez(Register rj, Label* L) { -+ bnez(rj, shifted_branch_offset21(L)); -+ } -+ -+ void jirl(Register rd, Register rj, int32_t offset); -+ -+ void bceqz(CFRegister cj, int32_t si21); -+ inline void bceqz(CFRegister cj, Label* L) { -+ bceqz(cj, shifted_branch_offset21(L)); -+ } -+ void bcnez(CFRegister cj, int32_t si21); -+ inline void bcnez(CFRegister cj, Label* L) { -+ bcnez(cj, shifted_branch_offset21(L)); -+ } -+ -+ // -------Data-processing-instructions--------- -+ -+ // Arithmetic. -+ void add_w(Register rd, Register rj, Register rk); -+ void add_d(Register rd, Register rj, Register rk); -+ void sub_w(Register rd, Register rj, Register rk); -+ void sub_d(Register rd, Register rj, Register rk); -+ -+ void addi_w(Register rd, Register rj, int32_t si12); -+ void addi_d(Register rd, Register rj, int32_t si12); -+ -+ void addu16i_d(Register rd, Register rj, int32_t si16); -+ -+ void alsl_w(Register rd, Register rj, Register rk, int32_t sa2); -+ void alsl_wu(Register rd, Register rj, Register rk, int32_t sa2); -+ void alsl_d(Register rd, Register rj, Register rk, int32_t sa2); -+ -+ void lu12i_w(Register rd, int32_t si20); -+ void lu32i_d(Register rd, int32_t si20); -+ void lu52i_d(Register rd, Register rj, int32_t si12); -+ -+ void slt(Register rd, Register rj, Register rk); -+ void sltu(Register rd, Register rj, Register rk); -+ void slti(Register rd, Register rj, int32_t si12); -+ void sltui(Register rd, Register rj, int32_t si12); -+ -+ void pcaddi(Register rd, int32_t si20); -+ void pcaddu12i(Register rd, int32_t si20); -+ void pcaddu18i(Register rd, int32_t si20); -+ void pcalau12i(Register rd, int32_t si20); -+ -+ void and_(Register rd, Register rj, Register rk); -+ void or_(Register rd, Register rj, Register rk); -+ void xor_(Register rd, Register rj, Register rk); -+ void nor(Register rd, Register rj, Register rk); -+ void andn(Register rd, Register rj, Register rk); -+ void orn(Register rd, Register rj, Register rk); -+ -+ void andi(Register rd, Register rj, int32_t ui12); -+ void ori(Register rd, Register rj, int32_t ui12); -+ void xori(Register rd, Register rj, int32_t ui12); -+ -+ void mul_w(Register rd, Register rj, Register rk); -+ void mulh_w(Register rd, Register rj, Register rk); -+ void mulh_wu(Register rd, Register rj, Register rk); -+ void mul_d(Register rd, Register rj, Register rk); -+ void mulh_d(Register rd, Register rj, Register rk); -+ void mulh_du(Register rd, Register rj, Register rk); -+ -+ void mulw_d_w(Register rd, Register rj, Register rk); -+ void mulw_d_wu(Register rd, Register rj, Register rk); -+ -+ void div_w(Register rd, Register rj, Register rk); -+ void mod_w(Register rd, Register rj, Register rk); -+ void div_wu(Register rd, Register rj, Register rk); -+ void mod_wu(Register rd, Register rj, Register rk); -+ void div_d(Register rd, Register rj, Register rk); -+ void mod_d(Register rd, Register rj, Register rk); -+ void div_du(Register rd, Register rj, Register rk); -+ void mod_du(Register rd, Register rj, Register rk); -+ -+ // Shifts. -+ void sll_w(Register rd, Register rj, Register rk); -+ void srl_w(Register rd, Register rj, Register rk); -+ void sra_w(Register rd, Register rj, Register rk); -+ void rotr_w(Register rd, Register rj, Register rk); -+ -+ void slli_w(Register rd, Register rj, int32_t ui5); -+ void srli_w(Register rd, Register rj, int32_t ui5); -+ void srai_w(Register rd, Register rj, int32_t ui5); -+ void rotri_w(Register rd, Register rj, int32_t ui5); -+ -+ void sll_d(Register rd, Register rj, Register rk); -+ void srl_d(Register rd, Register rj, Register rk); -+ void sra_d(Register rd, Register rj, Register rk); -+ void rotr_d(Register rd, Register rj, Register rk); -+ -+ void slli_d(Register rd, Register rj, int32_t ui6); -+ void srli_d(Register rd, Register rj, int32_t ui6); -+ void srai_d(Register rd, Register rj, int32_t ui6); -+ void rotri_d(Register rd, Register rj, int32_t ui6); -+ -+ // Bit twiddling. -+ void ext_w_b(Register rd, Register rj); -+ void ext_w_h(Register rd, Register rj); -+ -+ void clo_w(Register rd, Register rj); -+ void clz_w(Register rd, Register rj); -+ void cto_w(Register rd, Register rj); -+ void ctz_w(Register rd, Register rj); -+ void clo_d(Register rd, Register rj); -+ void clz_d(Register rd, Register rj); -+ void cto_d(Register rd, Register rj); -+ void ctz_d(Register rd, Register rj); -+ -+ void bytepick_w(Register rd, Register rj, Register rk, int32_t sa2); -+ void bytepick_d(Register rd, Register rj, Register rk, int32_t sa3); -+ -+ void revb_2h(Register rd, Register rj); -+ void revb_4h(Register rd, Register rj); -+ void revb_2w(Register rd, Register rj); -+ void revb_d(Register rd, Register rj); -+ -+ void revh_2w(Register rd, Register rj); -+ void revh_d(Register rd, Register rj); -+ -+ void bitrev_4b(Register rd, Register rj); -+ void bitrev_8b(Register rd, Register rj); -+ -+ void bitrev_w(Register rd, Register rj); -+ void bitrev_d(Register rd, Register rj); -+ -+ void bstrins_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); -+ void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); -+ -+ void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); -+ void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); -+ -+ void maskeqz(Register rd, Register rj, Register rk); -+ void masknez(Register rd, Register rj, Register rk); -+ -+ // Memory-instructions -+ void ld_b(Register rd, Register rj, int32_t si12); -+ void ld_h(Register rd, Register rj, int32_t si12); -+ void ld_w(Register rd, Register rj, int32_t si12); -+ void ld_d(Register rd, Register rj, int32_t si12); -+ void ld_bu(Register rd, Register rj, int32_t si12); -+ void ld_hu(Register rd, Register rj, int32_t si12); -+ void ld_wu(Register rd, Register rj, int32_t si12); -+ void st_b(Register rd, Register rj, int32_t si12); -+ void st_h(Register rd, Register rj, int32_t si12); -+ void st_w(Register rd, Register rj, int32_t si12); -+ void st_d(Register rd, Register rj, int32_t si12); -+ -+ void ldx_b(Register rd, Register rj, Register rk); -+ void ldx_h(Register rd, Register rj, Register rk); -+ void ldx_w(Register rd, Register rj, Register rk); -+ void ldx_d(Register rd, Register rj, Register rk); -+ void ldx_bu(Register rd, Register rj, Register rk); -+ void ldx_hu(Register rd, Register rj, Register rk); -+ void ldx_wu(Register rd, Register rj, Register rk); -+ void stx_b(Register rd, Register rj, Register rk); -+ void stx_h(Register rd, Register rj, Register rk); -+ void stx_w(Register rd, Register rj, Register rk); -+ void stx_d(Register rd, Register rj, Register rk); -+ -+ void ldptr_w(Register rd, Register rj, int32_t si14); -+ void ldptr_d(Register rd, Register rj, int32_t si14); -+ void stptr_w(Register rd, Register rj, int32_t si14); -+ void stptr_d(Register rd, Register rj, int32_t si14); -+ -+ void amswap_w(Register rd, Register rk, Register rj); -+ void amswap_d(Register rd, Register rk, Register rj); -+ void amadd_w(Register rd, Register rk, Register rj); -+ void amadd_d(Register rd, Register rk, Register rj); -+ void amand_w(Register rd, Register rk, Register rj); -+ void amand_d(Register rd, Register rk, Register rj); -+ void amor_w(Register rd, Register rk, Register rj); -+ void amor_d(Register rd, Register rk, Register rj); -+ void amxor_w(Register rd, Register rk, Register rj); -+ void amxor_d(Register rd, Register rk, Register rj); -+ void ammax_w(Register rd, Register rk, Register rj); -+ void ammax_d(Register rd, Register rk, Register rj); -+ void ammin_w(Register rd, Register rk, Register rj); -+ void ammin_d(Register rd, Register rk, Register rj); -+ void ammax_wu(Register rd, Register rk, Register rj); -+ void ammax_du(Register rd, Register rk, Register rj); -+ void ammin_wu(Register rd, Register rk, Register rj); -+ void ammin_du(Register rd, Register rk, Register rj); -+ -+ void amswap_db_w(Register rd, Register rk, Register rj); -+ void amswap_db_d(Register rd, Register rk, Register rj); -+ void amadd_db_w(Register rd, Register rk, Register rj); -+ void amadd_db_d(Register rd, Register rk, Register rj); -+ void amand_db_w(Register rd, Register rk, Register rj); -+ void amand_db_d(Register rd, Register rk, Register rj); -+ void amor_db_w(Register rd, Register rk, Register rj); -+ void amor_db_d(Register rd, Register rk, Register rj); -+ void amxor_db_w(Register rd, Register rk, Register rj); -+ void amxor_db_d(Register rd, Register rk, Register rj); -+ void ammax_db_w(Register rd, Register rk, Register rj); -+ void ammax_db_d(Register rd, Register rk, Register rj); -+ void ammin_db_w(Register rd, Register rk, Register rj); -+ void ammin_db_d(Register rd, Register rk, Register rj); -+ void ammax_db_wu(Register rd, Register rk, Register rj); -+ void ammax_db_du(Register rd, Register rk, Register rj); -+ void ammin_db_wu(Register rd, Register rk, Register rj); -+ void ammin_db_du(Register rd, Register rk, Register rj); -+ -+ void ll_w(Register rd, Register rj, int32_t si14); -+ void ll_d(Register rd, Register rj, int32_t si14); -+ void sc_w(Register rd, Register rj, int32_t si14); -+ void sc_d(Register rd, Register rj, int32_t si14); -+ -+ void dbar(int32_t hint); -+ void ibar(int32_t hint); -+ -+ // Break / Trap instructions. -+ void break_(uint32_t code, bool break_as_stop = false); -+ void stop(uint32_t code = kMaxStopCode); -+ -+ // Arithmetic. -+ void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmul_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ -+ void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ void fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ void fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ void fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ void fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); -+ -+ void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ -+ void fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmina_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fmina_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ -+ void fabs_s(FPURegister fd, FPURegister fj); -+ void fabs_d(FPURegister fd, FPURegister fj); -+ void fneg_s(FPURegister fd, FPURegister fj); -+ void fneg_d(FPURegister fd, FPURegister fj); -+ -+ void fsqrt_s(FPURegister fd, FPURegister fj); -+ void fsqrt_d(FPURegister fd, FPURegister fj); -+ void frecip_s(FPURegister fd, FPURegister fj); -+ void frecip_d(FPURegister fd, FPURegister fj); -+ void frsqrt_s(FPURegister fd, FPURegister fj); -+ void frsqrt_d(FPURegister fd, FPURegister fj); -+ -+ void fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ void flogb_s(FPURegister fd, FPURegister fj); -+ void flogb_d(FPURegister fd, FPURegister fj); -+ void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk); -+ void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk); -+ -+ void fclass_s(FPURegister fd, FPURegister fj); -+ void fclass_d(FPURegister fd, FPURegister fj); -+ -+ void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, -+ CFRegister cd); -+ void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, -+ CFRegister cd); -+ -+ void fcvt_s_d(FPURegister fd, FPURegister fj); -+ void fcvt_d_s(FPURegister fd, FPURegister fj); -+ -+ void ffint_s_w(FPURegister fd, FPURegister fj); -+ void ffint_s_l(FPURegister fd, FPURegister fj); -+ void ffint_d_w(FPURegister fd, FPURegister fj); -+ void ffint_d_l(FPURegister fd, FPURegister fj); -+ void ftint_w_s(FPURegister fd, FPURegister fj); -+ void ftint_w_d(FPURegister fd, FPURegister fj); -+ void ftint_l_s(FPURegister fd, FPURegister fj); -+ void ftint_l_d(FPURegister fd, FPURegister fj); -+ -+ void ftintrm_w_s(FPURegister fd, FPURegister fj); -+ void ftintrm_w_d(FPURegister fd, FPURegister fj); -+ void ftintrm_l_s(FPURegister fd, FPURegister fj); -+ void ftintrm_l_d(FPURegister fd, FPURegister fj); -+ void ftintrp_w_s(FPURegister fd, FPURegister fj); -+ void ftintrp_w_d(FPURegister fd, FPURegister fj); -+ void ftintrp_l_s(FPURegister fd, FPURegister fj); -+ void ftintrp_l_d(FPURegister fd, FPURegister fj); -+ void ftintrz_w_s(FPURegister fd, FPURegister fj); -+ void ftintrz_w_d(FPURegister fd, FPURegister fj); -+ void ftintrz_l_s(FPURegister fd, FPURegister fj); -+ void ftintrz_l_d(FPURegister fd, FPURegister fj); -+ void ftintrne_w_s(FPURegister fd, FPURegister fj); -+ void ftintrne_w_d(FPURegister fd, FPURegister fj); -+ void ftintrne_l_s(FPURegister fd, FPURegister fj); -+ void ftintrne_l_d(FPURegister fd, FPURegister fj); -+ -+ void frint_s(FPURegister fd, FPURegister fj); -+ void frint_d(FPURegister fd, FPURegister fj); -+ -+ void fmov_s(FPURegister fd, FPURegister fj); -+ void fmov_d(FPURegister fd, FPURegister fj); -+ -+ void fsel(CFRegister ca, FPURegister fd, FPURegister fj, FPURegister fk); -+ -+ void movgr2fr_w(FPURegister fd, Register rj); -+ void movgr2fr_d(FPURegister fd, Register rj); -+ void movgr2frh_w(FPURegister fd, Register rj); -+ -+ void movfr2gr_s(Register rd, FPURegister fj); -+ void movfr2gr_d(Register rd, FPURegister fj); -+ void movfrh2gr_s(Register rd, FPURegister fj); -+ -+ void movgr2fcsr(Register rj); -+ void movfcsr2gr(Register rd); -+ -+ void movfr2cf(CFRegister cd, FPURegister fj); -+ void movcf2fr(FPURegister fd, CFRegister cj); -+ -+ void movgr2cf(CFRegister cd, Register rj); -+ void movcf2gr(Register rd, CFRegister cj); -+ -+ void fld_s(FPURegister fd, Register rj, int32_t si12); -+ void fld_d(FPURegister fd, Register rj, int32_t si12); -+ void fst_s(FPURegister fd, Register rj, int32_t si12); -+ void fst_d(FPURegister fd, Register rj, int32_t si12); -+ -+ void fldx_s(FPURegister fd, Register rj, Register rk); -+ void fldx_d(FPURegister fd, Register rj, Register rk); -+ void fstx_s(FPURegister fd, Register rj, Register rk); -+ void fstx_d(FPURegister fd, Register rj, Register rk); -+ -+ // Check the code size generated from label to here. -+ int SizeOfCodeGeneratedSince(Label* label) { -+ return pc_offset() - label->pos(); -+ } -+ -+ // Check the number of instructions generated from label to here. -+ int InstructionsGeneratedSince(Label* label) { -+ return SizeOfCodeGeneratedSince(label) / kInstrSize; -+ } -+ -+ // Class for scoping postponing the trampoline pool generation. -+ class BlockTrampolinePoolScope { -+ public: -+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { -+ assem_->StartBlockTrampolinePool(); -+ } -+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } -+ -+ private: -+ Assembler* assem_; -+ -+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); -+ }; -+ -+ // Class for postponing the assembly buffer growth. Typically used for -+ // sequences of instructions that must be emitted as a unit, before -+ // buffer growth (and relocation) can occur. -+ // This blocking scope is not nestable. -+ class BlockGrowBufferScope { -+ public: -+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { -+ assem_->StartBlockGrowBuffer(); -+ } -+ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); } -+ -+ private: -+ Assembler* assem_; -+ -+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); -+ }; -+ -+ // Record a deoptimization reason that can be used by a log or cpu profiler. -+ // Use --trace-deopt to enable. -+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, -+ int id); -+ -+ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, -+ intptr_t pc_delta); -+ -+ // Writes a single byte or word of data in the code stream. Used for -+ // inline tables, e.g., jump-tables. -+ void db(uint8_t data); -+ void dd(uint32_t data); -+ void dq(uint64_t data); -+ void dp(uintptr_t data) { dq(data); } -+ void dd(Label* label); -+ -+ // Postpone the generation of the trampoline pool for the specified number of -+ // instructions. -+ void BlockTrampolinePoolFor(int instructions); -+ -+ // Check if there is less than kGap bytes available in the buffer. -+ // If this is the case, we need to grow the buffer before emitting -+ // an instruction or relocation information. -+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } -+ -+ // Get the number of bytes available in the buffer. -+ inline intptr_t available_space() const { -+ return reloc_info_writer.pos() - pc_; -+ } -+ -+ // Read/patch instructions. -+ static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } -+ static void instr_at_put(Address pc, Instr instr) { -+ *reinterpret_cast(pc) = instr; -+ } -+ Instr instr_at(int pos) { -+ return *reinterpret_cast(buffer_start_ + pos); -+ } -+ void instr_at_put(int pos, Instr instr) { -+ *reinterpret_cast(buffer_start_ + pos) = instr; -+ } -+ -+ // Check if an instruction is a branch of some kind. -+ static bool IsBranch(Instr instr); -+ static bool IsB(Instr instr); -+ static bool IsBz(Instr instr); -+ static bool IsNal(Instr instr); -+ -+ static bool IsBeq(Instr instr); -+ static bool IsBne(Instr instr); -+ -+ static bool IsJump(Instr instr); -+ static bool IsMov(Instr instr, Register rd, Register rs); -+ static bool IsPcAddi(Instr instr, Register rd, int32_t si20); -+ -+ static bool IsJ(Instr instr); -+ static bool IsLu12i_w(Instr instr); -+ static bool IsOri(Instr instr); -+ static bool IsLu32i_d(Instr instr); -+ static bool IsLu52i_d(Instr instr); -+ -+ static bool IsNop(Instr instr, unsigned int type); -+ static bool IsPop(Instr instr); -+ static bool IsPush(Instr instr); -+ // static bool IsLwRegFpOffset(Instr instr); -+ // static bool IsSwRegFpOffset(Instr instr); -+ // static bool IsLwRegFpNegOffset(Instr instr); -+ // static bool IsSwRegFpNegOffset(Instr instr); -+ -+ static Register GetRjReg(Instr instr); -+ static Register GetRkReg(Instr instr); -+ static Register GetRdReg(Instr instr); -+ -+ static uint32_t GetRj(Instr instr); -+ static uint32_t GetRjField(Instr instr); -+ static uint32_t GetRk(Instr instr); -+ static uint32_t GetRkField(Instr instr); -+ static uint32_t GetRd(Instr instr); -+ static uint32_t GetRdField(Instr instr); -+ static uint32_t GetSa2(Instr instr); -+ static uint32_t GetSa3(Instr instr); -+ static uint32_t GetSa2Field(Instr instr); -+ static uint32_t GetSa3Field(Instr instr); -+ static uint32_t GetOpcodeField(Instr instr); -+ static uint32_t GetFunction(Instr instr); -+ static uint32_t GetFunctionField(Instr instr); -+ static uint32_t GetImmediate16(Instr instr); -+ static uint32_t GetLabelConst(Instr instr); -+ -+ static bool IsAddImmediate(Instr instr); -+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset); -+ -+ static bool IsAndImmediate(Instr instr); -+ static bool IsEmittedConstant(Instr instr); -+ -+ void CheckTrampolinePool(); -+ -+ inline int UnboundLabelsCount() { return unbound_labels_count_; } -+ -+ protected: -+ // Helper function for memory load/store. -+ void AdjustBaseAndOffset(MemOperand* src); -+ -+ inline static void set_target_internal_reference_encoded_at(Address pc, -+ Address target); -+ -+ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } -+ -+ // Decode branch instruction at pos and return branch target pos. -+ int target_at(int pos, bool is_internal); -+ -+ // Patch branch instruction at pos to branch to given branch target pos. -+ void target_at_put(int pos, int target_pos, bool is_internal); -+ -+ // Say if we need to relocate with this mode. -+ bool MustUseReg(RelocInfo::Mode rmode); -+ -+ // Record reloc info for current pc_. -+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); -+ -+ // Block the emission of the trampoline pool before pc_offset. -+ void BlockTrampolinePoolBefore(int pc_offset) { -+ if (no_trampoline_pool_before_ < pc_offset) -+ no_trampoline_pool_before_ = pc_offset; -+ } -+ -+ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } -+ -+ void EndBlockTrampolinePool() { -+ trampoline_pool_blocked_nesting_--; -+ if (trampoline_pool_blocked_nesting_ == 0) { -+ CheckTrampolinePoolQuick(1); -+ } -+ } -+ -+ bool is_trampoline_pool_blocked() const { -+ return trampoline_pool_blocked_nesting_ > 0; -+ } -+ -+ bool has_exception() const { return internal_trampoline_exception_; } -+ -+ bool is_trampoline_emitted() const { return trampoline_emitted_; } -+ -+ // Temporarily block automatic assembly buffer growth. -+ void StartBlockGrowBuffer() { -+ DCHECK(!block_buffer_growth_); -+ block_buffer_growth_ = true; -+ } -+ -+ void EndBlockGrowBuffer() { -+ DCHECK(block_buffer_growth_); -+ block_buffer_growth_ = false; -+ } -+ -+ bool is_buffer_growth_blocked() const { return block_buffer_growth_; } -+ -+ void CheckTrampolinePoolQuick(int extra_instructions = 0) { -+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { -+ CheckTrampolinePool(); -+ } -+ } -+ -+ private: -+ // Avoid overflows for displacements etc. -+ static const int kMaximalBufferSize = 512 * MB; -+ -+ // Buffer size and constant pool distance are checked together at regular -+ // intervals of kBufferCheckInterval emitted bytes. -+ static constexpr int kBufferCheckInterval = 1 * KB / 2; -+ -+ // Code generation. -+ // The relocation writer's position is at least kGap bytes below the end of -+ // the generated instructions. This is so that multi-instruction sequences do -+ // not have to check for overflow. The same is true for writes of large -+ // relocation info entries. -+ static constexpr int kGap = 64; -+ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); -+ -+ // Repeated checking whether the trampoline pool should be emitted is rather -+ // expensive. By default we only check again once a number of instructions -+ // has been generated. -+ static constexpr int kCheckConstIntervalInst = 32; -+ static constexpr int kCheckConstInterval = -+ kCheckConstIntervalInst * kInstrSize; -+ -+ int next_buffer_check_; // pc offset of next buffer check. -+ -+ // Emission of the trampoline pool may be blocked in some code sequences. -+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. -+ int no_trampoline_pool_before_; // Block emission before this pc offset. -+ -+ // Keep track of the last emitted pool to guarantee a maximal distance. -+ int last_trampoline_pool_end_; // pc offset of the end of the last pool. -+ -+ // Automatic growth of the assembly buffer may be blocked for some sequences. -+ bool block_buffer_growth_; // Block growth when true. -+ -+ // Relocation information generation. -+ // Each relocation is encoded as a variable size value. -+ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; -+ RelocInfoWriter reloc_info_writer; -+ -+ // The bound position, before this we cannot do instruction elimination. -+ int last_bound_pos_; -+ -+ // Code emission. -+ inline void CheckBuffer(); -+ void GrowBuffer(); -+ inline void emit(Instr x); -+ inline void emit(uint64_t x); -+ // inline void CheckForEmitInForbiddenSlot(); -+ template -+ inline void EmitHelper(T x); -+ inline void EmitHelper(Instr x); -+ -+ void GenB(Opcode opcode, Register rj, int32_t si21); // opcode:6 -+ void GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq); -+ void GenB(Opcode opcode, int32_t si26); -+ void GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16); -+ void GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, FPURegister fj, -+ CFRegister cd); -+ void GenSel(Opcode opcode, CFRegister ca, FPURegister fk, FPURegister fj, -+ FPURegister rd); -+ -+ void GenRegister(Opcode opcode, Register rj, Register rd, bool rjrd = true); -+ void GenRegister(Opcode opcode, FPURegister fj, FPURegister fd); -+ void GenRegister(Opcode opcode, Register rj, FPURegister fd); -+ void GenRegister(Opcode opcode, FPURegister fj, Register rd); -+ void GenRegister(Opcode opcode, Register rj, FPUControlRegister fd); -+ void GenRegister(Opcode opcode, FPUControlRegister fj, Register rd); -+ void GenRegister(Opcode opcode, FPURegister fj, CFRegister cd); -+ void GenRegister(Opcode opcode, CFRegister cj, FPURegister fd); -+ void GenRegister(Opcode opcode, Register rj, CFRegister cd); -+ void GenRegister(Opcode opcode, CFRegister cj, Register rd); -+ -+ void GenRegister(Opcode opcode, Register rk, Register rj, Register rd); -+ void GenRegister(Opcode opcode, FPURegister fk, FPURegister fj, -+ FPURegister fd); -+ -+ void GenRegister(Opcode opcode, FPURegister fa, FPURegister fk, -+ FPURegister fj, FPURegister fd); -+ void GenRegister(Opcode opcode, Register rk, Register rj, FPURegister fd); -+ -+ void GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj, -+ Register rd); -+ void GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj, -+ Register rd); -+ void GenImm(Opcode opcode, int32_t bit20, Register rd); -+ void GenImm(Opcode opcode, int32_t bit15); -+ void GenImm(Opcode opcode, int32_t value, Register rj, Register rd, -+ int32_t value_bits); // 6 | 12 | 14 | 16 -+ void GenImm(Opcode opcode, int32_t bit12, Register rj, FPURegister fd); -+ -+ // Labels. -+ void print(const Label* L); -+ void bind_to(Label* L, int pos); -+ void next(Label* L, bool is_internal); -+ -+ // One trampoline consists of: -+ // - space for trampoline slots, -+ // - space for labels. -+ // -+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. -+ // Space for trampoline slots precedes space for labels. Each label is of one -+ // instruction size, so total amount for labels is equal to -+ // label_count * kInstrSize. -+ class Trampoline { -+ public: -+ Trampoline() { -+ start_ = 0; -+ next_slot_ = 0; -+ free_slot_count_ = 0; -+ end_ = 0; -+ } -+ Trampoline(int start, int slot_count) { -+ start_ = start; -+ next_slot_ = start; -+ free_slot_count_ = slot_count; -+ end_ = start + slot_count * kTrampolineSlotsSize; -+ } -+ int start() { return start_; } -+ int end() { return end_; } -+ int take_slot() { -+ int trampoline_slot = kInvalidSlotPos; -+ if (free_slot_count_ <= 0) { -+ // We have run out of space on trampolines. -+ // Make sure we fail in debug mode, so we become aware of each case -+ // when this happens. -+ DCHECK(0); -+ // Internal exception will be caught. -+ } else { -+ trampoline_slot = next_slot_; -+ free_slot_count_--; -+ next_slot_ += kTrampolineSlotsSize; -+ } -+ return trampoline_slot; -+ } -+ -+ private: -+ int start_; -+ int end_; -+ int next_slot_; -+ int free_slot_count_; -+ }; -+ -+ int32_t get_trampoline_entry(int32_t pos); -+ int unbound_labels_count_; -+ // After trampoline is emitted, long branches are used in generated code for -+ // the forward branches whose target offsets could be beyond reach of branch -+ // instruction. We use this information to trigger different mode of -+ // branch instruction generation, where we use jump instructions rather -+ // than regular branch instructions. -+ bool trampoline_emitted_; -+ static constexpr int kInvalidSlotPos = -1; -+ -+ // Internal reference positions, required for unbounded internal reference -+ // labels. -+ std::set internal_reference_positions_; -+ bool is_internal_reference(Label* L) { -+ return internal_reference_positions_.find(L->pos()) != -+ internal_reference_positions_.end(); -+ } -+ -+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } -+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } -+ bool prev_instr_compact_branch_ = false; -+ -+ Trampoline trampoline_; -+ bool internal_trampoline_exception_; -+ -+ RegList scratch_register_list_; -+ -+ private: -+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); -+ -+ int WriteCodeComments(); -+ -+ friend class RegExpMacroAssemblerMIPS; -+ friend class RelocInfo; -+ friend class BlockTrampolinePoolScope; -+ friend class EnsureSpace; -+}; -+ -+class EnsureSpace { -+ public: -+ explicit inline EnsureSpace(Assembler* assembler); -+}; -+ -+class V8_EXPORT_PRIVATE UseScratchRegisterScope { -+ public: -+ explicit UseScratchRegisterScope(Assembler* assembler); -+ ~UseScratchRegisterScope(); -+ -+ Register Acquire(); -+ bool hasAvailable() const; -+ -+ private: -+ RegList* available_; -+ RegList old_available_; -+}; -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ -diff --git a/deps/v8/src/codegen/loong64/constants-loong64.cc b/deps/v8/src/codegen/loong64/constants-loong64.cc -new file mode 100644 -index 00000000..3ae0f473 ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/constants-loong64.cc -@@ -0,0 +1,100 @@ -+// Copyright 2011 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/codegen/loong64/constants-loong64.h" -+ -+namespace v8 { -+namespace internal { -+ -+// ----------------------------------------------------------------------------- -+// Registers. -+ -+// These register names are defined in a way to match the native disassembler -+// formatting. See for example the command "objdump -d ". -+const char* Registers::names_[kNumSimuRegisters] = { -+ "zero_reg", "ra", "gp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6", -+ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "tp", -+ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"}; -+ -+// List of alias names which can be used when referring to MIPS registers. -+const Registers::RegisterAlias Registers::aliases_[] = { -+ {0, "zero"}, {23, "cp"}, {kInvalidRegister, nullptr}}; -+ -+const char* Registers::Name(int reg) { -+ const char* result; -+ if ((0 <= reg) && (reg < kNumSimuRegisters)) { -+ result = names_[reg]; -+ } else { -+ result = "noreg"; -+ } -+ return result; -+} -+ -+int Registers::Number(const char* name) { -+ // Look through the canonical names. -+ for (int i = 0; i < kNumSimuRegisters; i++) { -+ if (strcmp(names_[i], name) == 0) { -+ return i; -+ } -+ } -+ -+ // Look through the alias names. -+ int i = 0; -+ while (aliases_[i].reg != kInvalidRegister) { -+ if (strcmp(aliases_[i].name, name) == 0) { -+ return aliases_[i].reg; -+ } -+ i++; -+ } -+ -+ // No register with the reguested name found. -+ return kInvalidRegister; -+} -+ -+const char* FPURegisters::names_[kNumFPURegisters] = { -+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", -+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", -+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; -+ -+// List of alias names which can be used when referring to MIPS registers. -+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { -+ {kInvalidRegister, nullptr}}; -+ -+const char* FPURegisters::Name(int creg) { -+ const char* result; -+ if ((0 <= creg) && (creg < kNumFPURegisters)) { -+ result = names_[creg]; -+ } else { -+ result = "nocreg"; -+ } -+ return result; -+} -+ -+int FPURegisters::Number(const char* name) { -+ // Look through the canonical names. -+ for (int i = 0; i < kNumFPURegisters; i++) { -+ if (strcmp(names_[i], name) == 0) { -+ return i; -+ } -+ } -+ -+ // Look through the alias names. -+ int i = 0; -+ while (aliases_[i].creg != kInvalidRegister) { -+ if (strcmp(aliases_[i].name, name) == 0) { -+ return aliases_[i].creg; -+ } -+ i++; -+ } -+ -+ // No Cregister with the reguested name found. -+ return kInvalidFPURegister; -+} -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/codegen/loong64/constants-loong64.h b/deps/v8/src/codegen/loong64/constants-loong64.h -new file mode 100644 -index 00000000..e94ec5dd ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/constants-loong64.h -@@ -0,0 +1,1340 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_ -+#define V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_ -+ -+#include "src/base/logging.h" -+#include "src/base/macros.h" -+#include "src/common/globals.h" -+ -+// UNIMPLEMENTED_ macro for LOONGISA. -+#ifdef DEBUG -+#define UNIMPLEMENTED_LOONGISA() \ -+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ -+ __FILE__, __LINE__, __func__) -+#else -+#define UNIMPLEMENTED_LOONGISA() -+#endif -+ -+#define UNSUPPORTED_LOONGISA() \ -+ v8::internal::PrintF("Unsupported instruction.\n") -+ -+const uint32_t kLeastSignificantByteInInt32Offset = 0; -+const uint32_t kLessSignificantWordInDoublewordOffset = 0; -+ -+#ifndef __STDC_FORMAT_MACROS -+#define __STDC_FORMAT_MACROS -+#endif -+#include -+ -+// Defines constants and accessor classes to assemble, disassemble and -+// simulate LOONG64 instructions. -+ -+namespace v8 { -+namespace internal { -+ -+constexpr size_t kMaxPCRelativeCodeRangeInMB = 128; -+ -+// ----------------------------------------------------------------------------- -+// Registers and FPURegisters. -+ -+// Number of general purpose registers. -+const int kNumRegisters = 32; -+const int kInvalidRegister = -1; -+ -+// Number of registers with pc. -+const int kNumSimuRegisters = 33; -+ -+// In the simulator, the PC register is simulated as the 34th register. -+const int kPCRegister = 32; -+ -+// Number coprocessor registers. -+const int kNumFPURegisters = 32; -+const int kInvalidFPURegister = -1; -+ -+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. -+// TODO fcsr0 fcsr1 fcsr2 fcsr3 -+const int kFCSRRegister = 0; -+const int kInvalidFPUControlRegister = -1; -+const uint32_t kFPUInvalidResult = static_cast(1u << 31) - 1; -+const int32_t kFPUInvalidResultNegative = static_cast(1u << 31); -+const uint64_t kFPU64InvalidResult = -+ static_cast(static_cast(1) << 63) - 1; -+const int64_t kFPU64InvalidResultNegative = -+ static_cast(static_cast(1) << 63); -+ -+// FCSR constants. -+// TODO -+const uint32_t kFCSRInexactFlagBit = 16; -+const uint32_t kFCSRUnderflowFlagBit = 17; -+const uint32_t kFCSROverflowFlagBit = 18; -+const uint32_t kFCSRDivideByZeroFlagBit = 19; -+const uint32_t kFCSRInvalidOpFlagBit = 20; -+ -+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; -+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; -+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; -+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; -+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; -+ -+const uint32_t kFCSRFlagMask = -+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask | -+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask; -+ -+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; -+ -+// Actual value of root register is offset from the root array's start -+// to take advantage of negative displacement values. -+// TODO(sigurds): Choose best value. -+constexpr int kRootRegisterBias = 256; -+ -+// Helper functions for converting between register numbers and names. -+class Registers { -+ public: -+ // Return the name of the register. -+ static const char* Name(int reg); -+ -+ // Lookup the register number for the name provided. -+ static int Number(const char* name); -+ -+ struct RegisterAlias { -+ int reg; -+ const char* name; -+ }; -+ -+ static const int64_t kMaxValue = 0x7fffffffffffffffl; -+ static const int64_t kMinValue = 0x8000000000000000l; -+ -+ private: -+ static const char* names_[kNumSimuRegisters]; -+ static const RegisterAlias aliases_[]; -+}; -+ -+// Helper functions for converting between register numbers and names. -+class FPURegisters { -+ public: -+ // Return the name of the register. -+ static const char* Name(int reg); -+ -+ // Lookup the register number for the name provided. -+ static int Number(const char* name); -+ -+ struct RegisterAlias { -+ int creg; -+ const char* name; -+ }; -+ -+ private: -+ static const char* names_[kNumFPURegisters]; -+ static const RegisterAlias aliases_[]; -+}; -+ -+// ----------------------------------------------------------------------------- -+// Instructions encoding constants. -+ -+// On LoongISA all instructions are 32 bits. -+using Instr = int32_t; -+ -+// Special Software Interrupt codes when used in the presence of the LOONG64 -+// simulator. -+enum SoftwareInterruptCodes { -+ // Transition to C code. -+ call_rt_redirected = 0x7fff -+}; -+ -+// On LOONG64 Simulator breakpoints can have different codes: -+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, -+// the simulator will run through them and print the registers. -+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() -+// instructions (see Assembler::stop()). -+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the -+// debugger. -+const uint32_t kMaxWatchpointCode = 31; -+const uint32_t kMaxStopCode = 127; -+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode); -+ -+// ----- Fields offset and length. -+const int kRjShift = 5; -+const int kRjBits = 5; -+const int kRkShift = 10; -+const int kRkBits = 5; -+const int kRdShift = 0; -+const int kRdBits = 5; -+const int kSaShift = 15; -+const int kSa2Bits = 2; -+const int kSa3Bits = 3; -+const int kCdShift = 0; -+const int kCdBits = 3; -+const int kCjShift = 5; -+const int kCjBits = 3; -+const int kCodeShift = 0; -+const int kCodeBits = 15; -+const int kCondShift = 15; -+const int kCondBits = 5; -+const int kUi5Shift = 10; -+const int kUi5Bits = 5; -+const int kUi6Shift = 10; -+const int kUi6Bits = 6; -+const int kUi12Shift = 10; -+const int kUi12Bits = 12; -+const int kSi12Shift = 10; -+const int kSi12Bits = 12; -+const int kSi14Shift = 10; -+const int kSi14Bits = 14; -+const int kSi16Shift = 10; -+const int kSi16Bits = 16; -+const int kSi20Shift = 5; -+const int kSi20Bits = 20; -+const int kMsbwShift = 16; -+const int kMsbwBits = 5; -+const int kLsbwShift = 10; -+const int kLsbwBits = 5; -+const int kMsbdShift = 16; -+const int kMsbdBits = 6; -+const int kLsbdShift = 10; -+const int kLsbdBits = 6; -+const int kFdShift = 0; -+const int kFdBits = 5; -+const int kFjShift = 5; -+const int kFjBits = 5; -+const int kFkShift = 10; -+const int kFkBits = 5; -+const int kFaShift = 15; -+const int kFaBits = 5; -+const int kCaShift = 15; -+const int kCaBits = 3; -+const int kHint15Shift = 0; -+const int kHint15Bits = 15; -+const int kHint5Shift = 0; -+const int kHint5Bits = 5; -+const int kOffsLowShift = 10; -+const int kOffsLowBits = 16; -+const int kOffs26HighShift = 0; -+const int kOffs26HighBits = 10; -+const int kOffs21HighShift = 0; -+const int kOffs21HighBits = 5; -+const int kImm12Shift = 0; -+const int kImm12Bits = 12; -+const int kImm16Shift = 0; -+const int kImm16Bits = 16; -+const int kImm26Shift = 0; -+const int kImm26Bits = 26; -+const int kImm28Shift = 0; -+const int kImm28Bits = 28; -+const int kImm32Shift = 0; -+const int kImm32Bits = 32; -+ -+// ----- Miscellaneous useful masks. -+// Instruction bit masks. -+const int kRjFieldMask = ((1 << kRjBits) - 1) << kRjShift; -+const int kRkFieldMask = ((1 << kRkBits) - 1) << kRkShift; -+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; -+const int kSa2FieldMask = ((1 << kSa2Bits) - 1) << kSaShift; -+const int kSa3FieldMask = ((1 << kSa3Bits) - 1) << kSaShift; -+// Misc masks. -+const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values -+const int kLoMaskOf32 = 0xffff; -+const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values -+const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48; -+const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32; -+const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16; -+ -+const int kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift; -+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; -+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift; -+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift; -+ -+// ----- LOONG64 Opcodes and Function Fields. -+enum Opcode : uint32_t { -+ BEQZ = 0x10U << 26, -+ BNEZ = 0x11U << 26, -+ BCZ = 0x12U << 26, // BCEQZ & BCNEZ -+ JIRL = 0x13U << 26, -+ B = 0x14U << 26, -+ BL = 0x15U << 26, -+ BEQ = 0x16U << 26, -+ BNE = 0x17U << 26, -+ BLT = 0x18U << 26, -+ BGE = 0x19U << 26, -+ BLTU = 0x1aU << 26, -+ BGEU = 0x1bU << 26, -+ -+ ADDU16I_D = 0x4U << 26, -+ -+ LU12I_W = 0xaU << 25, -+ LU32I_D = 0xbU << 25, -+ PCADDI = 0xcU << 25, -+ PCALAU12I = 0xdU << 25, -+ PCADDU12I = 0xeU << 25, -+ PCADDU18I = 0xfU << 25, -+ -+ LL_W = 0x20U << 24, -+ SC_W = 0x21U << 24, -+ LL_D = 0x22U << 24, -+ SC_D = 0x23U << 24, -+ LDPTR_W = 0x24U << 24, -+ STPTR_W = 0x25U << 24, -+ LDPTR_D = 0x26U << 24, -+ STPTR_D = 0x27U << 24, -+ -+ BSTR_W = 0x1U << 22, // BSTRINS_W & BSTRPICK_W -+ BSTRINS_W = BSTR_W, -+ BSTRPICK_W = BSTR_W, -+ BSTRINS_D = 0x2U << 22, -+ BSTRPICK_D = 0x3U << 22, -+ -+ SLTI = 0x8U << 22, -+ SLTUI = 0x9U << 22, -+ ADDI_W = 0xaU << 22, -+ ADDI_D = 0xbU << 22, -+ LU52I_D = 0xcU << 22, -+ ANDI = 0xdU << 22, -+ ORI = 0xeU << 22, -+ XORI = 0xfU << 22, -+ -+ LD_B = 0xa0U << 22, -+ LD_H = 0xa1U << 22, -+ LD_W = 0xa2U << 22, -+ LD_D = 0xa3U << 22, -+ ST_B = 0xa4U << 22, -+ ST_H = 0xa5U << 22, -+ ST_W = 0xa6U << 22, -+ ST_D = 0xa7U << 22, -+ LD_BU = 0xa8U << 22, -+ LD_HU = 0xa9U << 22, -+ LD_WU = 0xaaU << 22, -+ FLD_S = 0xacU << 22, -+ FST_S = 0xadU << 22, -+ FLD_D = 0xaeU << 22, -+ FST_D = 0xafU << 22, -+ -+ FMADD_S = 0x81U << 20, -+ FMADD_D = 0x82U << 20, -+ FMSUB_S = 0x85U << 20, -+ FMSUB_D = 0x86U << 20, -+ FNMADD_S = 0x89U << 20, -+ FNMADD_D = 0x8aU << 20, -+ FNMSUB_S = 0x8dU << 20, -+ FNMSUB_D = 0x8eU << 20, -+ FCMP_COND_S = 0xc1U << 20, -+ FCMP_COND_D = 0xc2U << 20, -+ -+ BYTEPICK_D = 0x3U << 18, -+ BYTEPICK_W = 0x2U << 18, -+ -+ FSEL = 0x340U << 18, -+ -+ ALSL = 0x1U << 18, -+ ALSL_W = ALSL, -+ ALSL_WU = ALSL, -+ -+ ALSL_D = 0xbU << 18, -+ -+ SLLI_W = 0x40U << 16, -+ SRLI_W = 0x44U << 16, -+ SRAI_W = 0x48U << 16, -+ ROTRI_W = 0x4cU << 16, -+ -+ SLLI_D = 0x41U << 16, -+ SRLI_D = 0x45U << 16, -+ SRAI_D = 0x49U << 16, -+ ROTRI_D = 0x4dU << 16, -+ -+ SLLI = 0x10U << 18, -+ SRLI = 0x11U << 18, -+ SRAI = 0x12U << 18, -+ ROTRI = 0x13U << 18, -+ -+ ADD_W = 0x20U << 15, -+ ADD_D = 0x21U << 15, -+ SUB_W = 0x22U << 15, -+ SUB_D = 0x23U << 15, -+ SLT = 0x24U << 15, -+ SLTU = 0x25U << 15, -+ MASKNEZ = 0x26U << 15, -+ MASKEQZ = 0x27U << 15, -+ NOR = 0x28U << 15, -+ AND = 0x29U << 15, -+ OR = 0x2aU << 15, -+ XOR = 0x2bU << 15, -+ ORN = 0x2cU << 15, -+ ANDN = 0x2dU << 15, -+ SLL_W = 0x2eU << 15, -+ SRL_W = 0x2fU << 15, -+ SRA_W = 0x30U << 15, -+ SLL_D = 0x31U << 15, -+ SRL_D = 0x32U << 15, -+ SRA_D = 0x33U << 15, -+ ROTR_W = 0x36U << 15, -+ ROTR_D = 0x37U << 15, -+ MUL_W = 0x38U << 15, -+ MULH_W = 0x39U << 15, -+ MULH_WU = 0x3aU << 15, -+ MUL_D = 0x3bU << 15, -+ MULH_D = 0x3cU << 15, -+ MULH_DU = 0x3dU << 15, -+ MULW_D_W = 0x3eU << 15, -+ MULW_D_WU = 0x3fU << 15, -+ -+ DIV_W = 0x40U << 15, -+ MOD_W = 0x41U << 15, -+ DIV_WU = 0x42U << 15, -+ MOD_WU = 0x43U << 15, -+ DIV_D = 0x44U << 15, -+ MOD_D = 0x45U << 15, -+ DIV_DU = 0x46U << 15, -+ MOD_DU = 0x47U << 15, -+ -+ BREAK = 0x54U << 15, -+ -+ FADD_S = 0x201U << 15, -+ FADD_D = 0x202U << 15, -+ FSUB_S = 0x205U << 15, -+ FSUB_D = 0x206U << 15, -+ FMUL_S = 0x209U << 15, -+ FMUL_D = 0x20aU << 15, -+ FDIV_S = 0x20dU << 15, -+ FDIV_D = 0x20eU << 15, -+ FMAX_S = 0x211U << 15, -+ FMAX_D = 0x212U << 15, -+ FMIN_S = 0x215U << 15, -+ FMIN_D = 0x216U << 15, -+ FMAXA_S = 0x219U << 15, -+ FMAXA_D = 0x21aU << 15, -+ FMINA_S = 0x21dU << 15, -+ FMINA_D = 0x21eU << 15, -+ FSCALEB_S = 0x221U << 15, -+ FSCALEB_D = 0x222U << 15, -+ FCOPYSIGN_S = 0x225U << 15, -+ FCOPYSIGN_D = 0x226U << 15, -+ -+ LDX_B = 0x7000U << 15, -+ LDX_H = 0x7008U << 15, -+ LDX_W = 0x7010U << 15, -+ LDX_D = 0x7018U << 15, -+ STX_B = 0x7020U << 15, -+ STX_H = 0x7028U << 15, -+ STX_W = 0x7030U << 15, -+ STX_D = 0x7038U << 15, -+ LDX_BU = 0x7040U << 15, -+ LDX_HU = 0x7048U << 15, -+ LDX_WU = 0x7050U << 15, -+ FLDX_S = 0x7060U << 15, -+ FLDX_D = 0x7068U << 15, -+ FSTX_S = 0x7070U << 15, -+ FSTX_D = 0x7078U << 15, -+ -+ AMSWAP_W = 0x70c0U << 15, -+ AMSWAP_D = 0x70c1U << 15, -+ AMADD_W = 0x70c2U << 15, -+ AMADD_D = 0x70c3U << 15, -+ AMAND_W = 0x70c4U << 15, -+ AMAND_D = 0x70c5U << 15, -+ AMOR_W = 0x70c6U << 15, -+ AMOR_D = 0x70c7U << 15, -+ AMXOR_W = 0x70c8U << 15, -+ AMXOR_D = 0x70c9U << 15, -+ AMMAX_W = 0x70caU << 15, -+ AMMAX_D = 0x70cbU << 15, -+ AMMIN_W = 0x70ccU << 15, -+ AMMIN_D = 0x70cdU << 15, -+ AMMAX_WU = 0x70ceU << 15, -+ AMMAX_DU = 0x70cfU << 15, -+ AMMIN_WU = 0x70d0U << 15, -+ AMMIN_DU = 0x70d1U << 15, -+ AMSWAP_DB_W = 0x70d2U << 15, -+ AMSWAP_DB_D = 0x70d3U << 15, -+ AMADD_DB_W = 0x70d4U << 15, -+ AMADD_DB_D = 0x70d5U << 15, -+ AMAND_DB_W = 0x70d6U << 15, -+ AMAND_DB_D = 0x70d7U << 15, -+ AMOR_DB_W = 0x70d8U << 15, -+ AMOR_DB_D = 0x70d9U << 15, -+ AMXOR_DB_W = 0x70daU << 15, -+ AMXOR_DB_D = 0x70dbU << 15, -+ AMMAX_DB_W = 0x70dcU << 15, -+ AMMAX_DB_D = 0x70ddU << 15, -+ AMMIN_DB_W = 0x70deU << 15, -+ AMMIN_DB_D = 0x70dfU << 15, -+ AMMAX_DB_WU = 0x70e0U << 15, -+ AMMAX_DB_DU = 0x70e1U << 15, -+ AMMIN_DB_WU = 0x70e2U << 15, -+ AMMIN_DB_DU = 0x70e3U << 15, -+ -+ DBAR = 0x70e4U << 15, -+ IBAR = 0x70e5U << 15, -+ -+ CLO_W = 0X4U << 10, -+ CLZ_W = 0X5U << 10, -+ CTO_W = 0X6U << 10, -+ CTZ_W = 0X7U << 10, -+ CLO_D = 0X8U << 10, -+ CLZ_D = 0X9U << 10, -+ CTO_D = 0XaU << 10, -+ CTZ_D = 0XbU << 10, -+ REVB_2H = 0XcU << 10, -+ REVB_4H = 0XdU << 10, -+ REVB_2W = 0XeU << 10, -+ REVB_D = 0XfU << 10, -+ REVH_2W = 0X10U << 10, -+ REVH_D = 0X11U << 10, -+ BITREV_4B = 0X12U << 10, -+ BITREV_8B = 0X13U << 10, -+ BITREV_W = 0X14U << 10, -+ BITREV_D = 0X15U << 10, -+ EXT_W_H = 0X16U << 10, -+ EXT_W_B = 0X17U << 10, -+ -+ FABS_S = 0X4501U << 10, -+ FABS_D = 0X4502U << 10, -+ FNEG_S = 0X4505U << 10, -+ FNEG_D = 0X4506U << 10, -+ FLOGB_S = 0X4509U << 10, -+ FLOGB_D = 0X450aU << 10, -+ FCLASS_S = 0X450dU << 10, -+ FCLASS_D = 0X450eU << 10, -+ FSQRT_S = 0X4511U << 10, -+ FSQRT_D = 0X4512U << 10, -+ FRECIP_S = 0X4515U << 10, -+ FRECIP_D = 0X4516U << 10, -+ FRSQRT_S = 0X4519U << 10, -+ FRSQRT_D = 0X451aU << 10, -+ FMOV_S = 0X4525U << 10, -+ FMOV_D = 0X4526U << 10, -+ MOVGR2FR_W = 0X4529U << 10, -+ MOVGR2FR_D = 0X452aU << 10, -+ MOVGR2FRH_W = 0X452bU << 10, -+ MOVFR2GR_S = 0X452dU << 10, -+ MOVFR2GR_D = 0X452eU << 10, -+ MOVFRH2GR_S = 0X452fU << 10, -+ MOVGR2FCSR = 0X4530U << 10, -+ MOVFCSR2GR = 0X4532U << 10, -+ MOVFR2CF = 0X4534U << 10, -+ MOVGR2CF = 0X4536U << 10, -+ -+ FCVT_S_D = 0x4646U << 10, -+ FCVT_D_S = 0x4649U << 10, -+ FTINTRM_W_S = 0x4681U << 10, -+ FTINTRM_W_D = 0x4682U << 10, -+ FTINTRM_L_S = 0x4689U << 10, -+ FTINTRM_L_D = 0x468aU << 10, -+ FTINTRP_W_S = 0x4691U << 10, -+ FTINTRP_W_D = 0x4692U << 10, -+ FTINTRP_L_S = 0x4699U << 10, -+ FTINTRP_L_D = 0x469aU << 10, -+ FTINTRZ_W_S = 0x46a1U << 10, -+ FTINTRZ_W_D = 0x46a2U << 10, -+ FTINTRZ_L_S = 0x46a9U << 10, -+ FTINTRZ_L_D = 0x46aaU << 10, -+ FTINTRNE_W_S = 0x46b1U << 10, -+ FTINTRNE_W_D = 0x46b2U << 10, -+ FTINTRNE_L_S = 0x46b9U << 10, -+ FTINTRNE_L_D = 0x46baU << 10, -+ FTINT_W_S = 0x46c1U << 10, -+ FTINT_W_D = 0x46c2U << 10, -+ FTINT_L_S = 0x46c9U << 10, -+ FTINT_L_D = 0x46caU << 10, -+ FFINT_S_W = 0x4744U << 10, -+ FFINT_S_L = 0x4746U << 10, -+ FFINT_D_W = 0x4748U << 10, -+ FFINT_D_L = 0x474aU << 10, -+ FRINT_S = 0x4791U << 10, -+ FRINT_D = 0x4792U << 10, -+ -+ MOVCF2FR = 0x4535U << 10, -+ MOVCF2GR = 0x4537U << 10 -+}; -+ -+// ----- Emulated conditions. -+// On LOONG64 we use this enum to abstract from conditional branch instructions. -+// The 'U' prefix is used to specify unsigned comparisons. -+enum Condition { -+ // Any value < 0 is considered no_condition. -+ kNoCondition = -1, -+ overflow = 0, -+ no_overflow = 1, -+ Uless = 2, -+ Ugreater_equal = 3, -+ Uless_equal = 4, -+ Ugreater = 5, -+ equal = 6, -+ not_equal = 7, // Unordered or Not Equal. -+ negative = 8, -+ positive = 9, -+ parity_even = 10, -+ parity_odd = 11, -+ less = 12, -+ greater_equal = 13, -+ less_equal = 14, -+ greater = 15, -+ ueq = 16, // Unordered or Equal. -+ ogl = 17, // Ordered and Not Equal. -+ cc_always = 18, -+ -+ // Aliases. -+ carry = Uless, -+ not_carry = Ugreater_equal, -+ zero = equal, -+ eq = equal, -+ not_zero = not_equal, -+ ne = not_equal, -+ nz = not_equal, -+ sign = negative, -+ not_sign = positive, -+ mi = negative, -+ pl = positive, -+ hi = Ugreater, -+ ls = Uless_equal, -+ ge = greater_equal, -+ lt = less, -+ gt = greater, -+ le = less_equal, -+ hs = Ugreater_equal, -+ lo = Uless, -+ al = cc_always, -+ ult = Uless, -+ uge = Ugreater_equal, -+ ule = Uless_equal, -+ ugt = Ugreater, -+ cc_default = kNoCondition -+}; -+ -+// Returns the equivalent of !cc. -+// Negation of the default kNoCondition (-1) results in a non-default -+// no_condition value (-2). As long as tests for no_condition check -+// for condition < 0, this will work as expected. -+inline Condition NegateCondition(Condition cc) { -+ DCHECK(cc != cc_always); -+ return static_cast(cc ^ 1); -+} -+ -+inline Condition NegateFpuCondition(Condition cc) { -+ DCHECK(cc != cc_always); -+ switch (cc) { -+ case ult: -+ return ge; -+ case ugt: -+ return le; -+ case uge: -+ return lt; -+ case ule: -+ return gt; -+ case lt: -+ return uge; -+ case gt: -+ return ule; -+ case ge: -+ return ult; -+ case le: -+ return ugt; -+ case eq: -+ return ne; -+ case ne: -+ return eq; -+ case ueq: -+ return ogl; -+ case ogl: -+ return ueq; -+ default: -+ return cc; -+ } -+} -+ -+// ----- Coprocessor conditions. -+enum FPUCondition { -+ kNoFPUCondition = -1, -+ -+ CAF = 0x00, // False. -+ SAF = 0x01, // False. -+ CLT = 0x02, // Less Than quiet -+ // SLT = 0x03, // Less Than signaling -+ CEQ = 0x04, -+ SEQ = 0x05, -+ CLE = 0x06, -+ SLE = 0x07, -+ CUN = 0x08, -+ SUN = 0x09, -+ CULT = 0x0a, -+ SULT = 0x0b, -+ CUEQ = 0x0c, -+ SUEQ = 0x0d, -+ CULE = 0x0e, -+ SULE = 0x0f, -+ CNE = 0x10, -+ SNE = 0x11, -+ COR = 0x14, -+ SOR = 0x15, -+ CUNE = 0x18, -+ SUNE = 0x19, -+}; -+ -+const uint32_t kFPURoundingModeShift = 8; -+const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift; -+ -+// FPU rounding modes. -+enum FPURoundingMode { -+ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest. -+ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero. -+ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity. -+ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity. -+ -+ // Aliases. -+ kRoundToNearest = RN, -+ kRoundToZero = RZ, -+ kRoundToPlusInf = RP, -+ kRoundToMinusInf = RM, -+ -+ mode_round = RN, -+ mode_ceil = RP, -+ mode_floor = RM, -+ mode_trunc = RZ -+}; -+ -+enum CheckForInexactConversion { -+ kCheckForInexactConversion, -+ kDontCheckForInexactConversion -+}; -+ -+enum class MaxMinKind : int { kMin = 0, kMax = 1 }; -+ -+// ----------------------------------------------------------------------------- -+// Hints. -+ -+// Branch hints are not used on the LOONG64. They are defined so that they can -+// appear in shared function signatures, but will be ignored in LOONG64 -+// implementations. -+enum Hint { no_hint = 0 }; -+ -+inline Hint NegateHint(Hint hint) { return no_hint; } -+ -+// ----------------------------------------------------------------------------- -+// Specific instructions, constants, and masks. -+// These constants are declared in assembler-mips.cc, as they use named -+// registers and other constants. -+ -+// addi_d(sp, sp, 8) aka Pop() operation or part of Pop(r) -+// operations as post-increment of sp. -+extern const Instr kPopInstruction; -+// addi_d(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. -+extern const Instr kPushInstruction; -+// St_d(r, MemOperand(sp, 0)) -+extern const Instr kPushRegPattern; -+// Ld_d(r, MemOperand(sp, 0)) -+extern const Instr kPopRegPattern; -+// extern const Instr kLwRegFpOffsetPattern; -+// extern const Instr kSwRegFpOffsetPattern; -+// extern const Instr kLwRegFpNegOffsetPattern; -+// extern const Instr kSwRegFpNegOffsetPattern; -+// A mask for the Rk register for push, pop, lw, sw instructions. -+extern const Instr kRtMask; -+// extern const Instr kLwSwInstrTypeMask; -+// extern const Instr kLwSwInstrArgumentMask; -+// extern const Instr kLwSwOffsetMask; -+ -+// Break 0xfffff, reserved for redirected real time call. -+const Instr rtCallRedirInstr = BREAK | call_rt_redirected; -+// A nop instruction. (Encoding of addi_w 0 0 0). -+const Instr nopInstr = ADDI_W; -+ -+constexpr uint8_t kInstrSize = 4; -+constexpr uint8_t kInstrSizeLog2 = 2; -+ -+class InstructionBase { -+ public: -+ enum { -+ // On Loonisa PC cannot actually be directly accessed. We behave as if PC -+ // was -+ // always the value of the current instruction being executed. -+ kPCReadOffset = 0 -+ }; -+ -+ enum Type { -+ kOp6Type, -+ kOp7Type, -+ kOp8Type, -+ kOp10Type, -+ kOp12Type, -+ kOp14Type, -+ kOp17Type, -+ kOp22Type, -+ kUnsupported = -1 -+ }; -+ -+ // Get the raw instruction bits. -+ inline Instr InstructionBits() const { -+ return *reinterpret_cast(this); -+ } -+ -+ // Set the raw instruction bits to value. -+ inline void SetInstructionBits(Instr value) { -+ *reinterpret_cast(this) = value; -+ } -+ -+ // Read one particular bit out of the instruction bits. -+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } -+ -+ // Read a bit field out of the instruction bits. -+ inline int Bits(int hi, int lo) const { -+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1); -+ } -+ -+ // Safe to call within InstructionType(). -+ inline int RjFieldRawNoAssert() const { -+ return InstructionBits() & kRjFieldMask; -+ } -+ -+ // Get the encoding type of the instruction. -+ inline Type InstructionType() const; -+ -+ protected: -+ InstructionBase() {} -+}; -+ -+template -+class InstructionGetters : public T { -+ public: -+ inline int RjValue() const { -+ return this->Bits(kRjShift + kRjBits - 1, kRjShift); -+ } -+ -+ inline int RkValue() const { -+ return this->Bits(kRkShift + kRkBits - 1, kRkShift); -+ } -+ -+ inline int RdValue() const { -+ return this->Bits(kRdShift + kRdBits - 1, kRdShift); -+ } -+ -+ inline int Sa2Value() const { -+ return this->Bits(kSaShift + kSa2Bits - 1, kSaShift); -+ } -+ -+ inline int Sa3Value() const { -+ return this->Bits(kSaShift + kSa3Bits - 1, kSaShift); -+ } -+ -+ inline int Ui5Value() const { -+ return this->Bits(kUi5Shift + kUi5Bits - 1, kUi5Shift); -+ } -+ -+ inline int Ui6Value() const { -+ return this->Bits(kUi6Shift + kUi6Bits - 1, kUi6Shift); -+ } -+ -+ inline int Ui12Value() const { -+ return this->Bits(kUi12Shift + kUi12Bits - 1, kUi12Shift); -+ } -+ -+ inline int LsbwValue() const { -+ return this->Bits(kLsbwShift + kLsbwBits - 1, kLsbwShift); -+ } -+ -+ inline int MsbwValue() const { -+ return this->Bits(kMsbwShift + kMsbwBits - 1, kMsbwShift); -+ } -+ -+ inline int LsbdValue() const { -+ return this->Bits(kLsbdShift + kLsbdBits - 1, kLsbdShift); -+ } -+ -+ inline int MsbdValue() const { -+ return this->Bits(kMsbdShift + kMsbdBits - 1, kMsbdShift); -+ } -+ -+ inline int CondValue() const { -+ return this->Bits(kCondShift + kCondBits - 1, kCondShift); -+ } -+ -+ inline int Si12Value() const { -+ return this->Bits(kSi12Shift + kSi12Bits - 1, kSi12Shift); -+ } -+ -+ inline int Si14Value() const { -+ return this->Bits(kSi14Shift + kSi14Bits - 1, kSi14Shift); -+ } -+ -+ inline int Si16Value() const { -+ return this->Bits(kSi16Shift + kSi16Bits - 1, kSi16Shift); -+ } -+ -+ inline int Si20Value() const { -+ return this->Bits(kSi20Shift + kSi20Bits - 1, kSi20Shift); -+ } -+ -+ inline int FdValue() const { -+ return this->Bits(kFdShift + kFdBits - 1, kFdShift); -+ } -+ -+ inline int FaValue() const { -+ return this->Bits(kFaShift + kFaBits - 1, kFaShift); -+ } -+ -+ inline int FjValue() const { -+ return this->Bits(kFjShift + kFjBits - 1, kFjShift); -+ } -+ -+ inline int FkValue() const { -+ return this->Bits(kFkShift + kFkBits - 1, kFkShift); -+ } -+ -+ inline int CjValue() const { -+ return this->Bits(kCjShift + kCjBits - 1, kCjShift); -+ } -+ -+ inline int CdValue() const { -+ return this->Bits(kCdShift + kCdBits - 1, kCdShift); -+ } -+ -+ inline int CaValue() const { -+ return this->Bits(kCaShift + kCaBits - 1, kCaShift); -+ } -+ -+ inline int CodeValue() const { -+ return this->Bits(kCodeShift + kCodeBits - 1, kCodeShift); -+ } -+ -+ inline int Hint5Value() const { -+ return this->Bits(kHint5Shift + kHint5Bits - 1, kHint5Shift); -+ } -+ -+ inline int Hint15Value() const { -+ return this->Bits(kHint15Shift + kHint15Bits - 1, kHint15Shift); -+ } -+ -+ inline int Offs16Value() const { -+ return this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); -+ } -+ -+ inline int Offs21Value() const { -+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); -+ int high = -+ this->Bits(kOffs21HighShift + kOffs21HighBits - 1, kOffs21HighShift); -+ return ((high << kOffsLowBits) + low); -+ } -+ -+ inline int Offs26Value() const { -+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); -+ int high = -+ this->Bits(kOffs26HighShift + kOffs26HighBits - 1, kOffs26HighShift); -+ return ((high << kOffsLowBits) + low); -+ } -+ -+ inline int RjFieldRaw() const { -+ return this->InstructionBits() & kRjFieldMask; -+ } -+ -+ inline int RkFieldRaw() const { -+ return this->InstructionBits() & kRkFieldMask; -+ } -+ -+ inline int RdFieldRaw() const { -+ return this->InstructionBits() & kRdFieldMask; -+ } -+ -+ inline int32_t ImmValue(int bits) const { return this->Bits(bits - 1, 0); } -+ -+ /*TODO*/ -+ inline int32_t Imm12Value() const { abort(); } -+ -+ inline int32_t Imm14Value() const { abort(); } -+ -+ inline int32_t Imm16Value() const { abort(); } -+ -+ // Say if the instruction 'links'. e.g. jal, bal. -+ bool IsLinkingInstruction() const; -+ // Say if the instruction is a break or a trap. -+ bool IsTrap() const; -+}; -+ -+class Instruction : public InstructionGetters { -+ public: -+ // Instructions are read of out a code stream. The only way to get a -+ // reference to an instruction is to convert a pointer. There is no way -+ // to allocate or create instances of class Instruction. -+ // Use the At(pc) function to create references to Instruction. -+ static Instruction* At(byte* pc) { -+ return reinterpret_cast(pc); -+ } -+ -+ private: -+ // We need to prevent the creation of instances of class Instruction. -+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); -+}; -+ -+// ----------------------------------------------------------------------------- -+// LOONG64 assembly various constants. -+ -+// C/C++ argument slots size. -+const int kCArgSlotCount = 0; -+ -+const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2; -+ -+const int kInvalidStackOffset = -1; -+ -+static const int kNegOffset = 0x00008000; -+ -+InstructionBase::Type InstructionBase::InstructionType() const { -+ InstructionBase::Type kType = kUnsupported; -+ -+ // Check for kOp6Type -+ switch (Bits(31, 26) << 26) { -+ case ADDU16I_D: -+ case BEQZ: -+ case BNEZ: -+ case BCZ: -+ case JIRL: -+ case B: -+ case BL: -+ case BEQ: -+ case BNE: -+ case BLT: -+ case BGE: -+ case BLTU: -+ case BGEU: -+ kType = kOp6Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ -+ if (kType == kUnsupported) { -+ // Check for kOp7Type -+ switch (Bits(31, 25) << 25) { -+ case LU12I_W: -+ case LU32I_D: -+ case PCADDI: -+ case PCALAU12I: -+ case PCADDU12I: -+ case PCADDU18I: -+ kType = kOp7Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ } -+ -+ if (kType == kUnsupported) { -+ // Check for kOp8Type -+ switch (Bits(31, 24) << 24) { -+ case LDPTR_W: -+ case STPTR_W: -+ case LDPTR_D: -+ case STPTR_D: -+ case LL_W: -+ case SC_W: -+ case LL_D: -+ case SC_D: -+ kType = kOp8Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ } -+ -+ if (kType == kUnsupported) { -+ // Check for kOp10Type -+ switch (Bits(31, 22) << 22) { -+ case BSTR_W: { -+ // If Bit(21) = 0, then the Opcode is not BSTR_W. -+ if (Bit(21) == 0) -+ kType = kUnsupported; -+ else -+ kType = kOp10Type; -+ break; -+ } -+ case BSTRINS_D: -+ case BSTRPICK_D: -+ case SLTI: -+ case SLTUI: -+ case ADDI_W: -+ case ADDI_D: -+ case LU52I_D: -+ case ANDI: -+ case ORI: -+ case XORI: -+ case LD_B: -+ case LD_H: -+ case LD_W: -+ case LD_D: -+ case ST_B: -+ case ST_H: -+ case ST_W: -+ case ST_D: -+ case LD_BU: -+ case LD_HU: -+ case LD_WU: -+ case FLD_S: -+ case FST_S: -+ case FLD_D: -+ case FST_D: -+ kType = kOp10Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ } -+ -+ if (kType == kUnsupported) { -+ // Check for kOp12Type -+ switch (Bits(31, 20) << 20) { -+ case FMADD_S: -+ case FMADD_D: -+ case FMSUB_S: -+ case FMSUB_D: -+ case FNMADD_S: -+ case FNMADD_D: -+ case FNMSUB_S: -+ case FNMSUB_D: -+ case FCMP_COND_S: -+ case FCMP_COND_D: -+ case FSEL: -+ kType = kOp12Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ } -+ -+ if (kType == kUnsupported) { -+ // Check for kOp14Type -+ switch (Bits(31, 18) << 18) { -+ case ALSL: -+ case BYTEPICK_W: -+ case BYTEPICK_D: -+ case ALSL_D: -+ case SLLI: -+ case SRLI: -+ case SRAI: -+ case ROTRI: -+ kType = kOp14Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ } -+ -+ if (kType == kUnsupported) { -+ // Check for kOp17Type -+ switch (Bits(31, 15) << 15) { -+ case ADD_W: -+ case ADD_D: -+ case SUB_W: -+ case SUB_D: -+ case SLT: -+ case SLTU: -+ case MASKEQZ: -+ case MASKNEZ: -+ case NOR: -+ case AND: -+ case OR: -+ case XOR: -+ case ORN: -+ case ANDN: -+ case SLL_W: -+ case SRL_W: -+ case SRA_W: -+ case SLL_D: -+ case SRL_D: -+ case SRA_D: -+ case ROTR_D: -+ case ROTR_W: -+ case MUL_W: -+ case MULH_W: -+ case MULH_WU: -+ case MUL_D: -+ case MULH_D: -+ case MULH_DU: -+ case MULW_D_W: -+ case MULW_D_WU: -+ case DIV_W: -+ case MOD_W: -+ case DIV_WU: -+ case MOD_WU: -+ case DIV_D: -+ case MOD_D: -+ case DIV_DU: -+ case MOD_DU: -+ case BREAK: -+ case FADD_S: -+ case FADD_D: -+ case FSUB_S: -+ case FSUB_D: -+ case FMUL_S: -+ case FMUL_D: -+ case FDIV_S: -+ case FDIV_D: -+ case FMAX_S: -+ case FMAX_D: -+ case FMIN_S: -+ case FMIN_D: -+ case FMAXA_S: -+ case FMAXA_D: -+ case FMINA_S: -+ case FMINA_D: -+ case LDX_B: -+ case LDX_H: -+ case LDX_W: -+ case LDX_D: -+ case STX_B: -+ case STX_H: -+ case STX_W: -+ case STX_D: -+ case LDX_BU: -+ case LDX_HU: -+ case LDX_WU: -+ case FLDX_S: -+ case FLDX_D: -+ case FSTX_S: -+ case FSTX_D: -+ case AMSWAP_W: -+ case AMSWAP_D: -+ case AMADD_W: -+ case AMADD_D: -+ case AMAND_W: -+ case AMAND_D: -+ case AMOR_W: -+ case AMOR_D: -+ case AMXOR_W: -+ case AMXOR_D: -+ case AMMAX_W: -+ case AMMAX_D: -+ case AMMIN_W: -+ case AMMIN_D: -+ case AMMAX_WU: -+ case AMMAX_DU: -+ case AMMIN_WU: -+ case AMMIN_DU: -+ case AMSWAP_DB_W: -+ case AMSWAP_DB_D: -+ case AMADD_DB_W: -+ case AMADD_DB_D: -+ case AMAND_DB_W: -+ case AMAND_DB_D: -+ case AMOR_DB_W: -+ case AMOR_DB_D: -+ case AMXOR_DB_W: -+ case AMXOR_DB_D: -+ case AMMAX_DB_W: -+ case AMMAX_DB_D: -+ case AMMIN_DB_W: -+ case AMMIN_DB_D: -+ case AMMAX_DB_WU: -+ case AMMAX_DB_DU: -+ case AMMIN_DB_WU: -+ case AMMIN_DB_DU: -+ case DBAR: -+ case IBAR: -+ case FSCALEB_S: -+ case FSCALEB_D: -+ case FCOPYSIGN_S: -+ case FCOPYSIGN_D: -+ kType = kOp17Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ } -+ -+ if (kType == kUnsupported) { -+ // Check for kOp22Type -+ switch (Bits(31, 10) << 10) { -+ case CLZ_W: -+ case CTZ_W: -+ case CLZ_D: -+ case CTZ_D: -+ case REVB_2H: -+ case REVB_4H: -+ case REVB_2W: -+ case REVB_D: -+ case REVH_2W: -+ case REVH_D: -+ case BITREV_4B: -+ case BITREV_8B: -+ case BITREV_W: -+ case BITREV_D: -+ case EXT_W_B: -+ case EXT_W_H: -+ case FABS_S: -+ case FABS_D: -+ case FNEG_S: -+ case FNEG_D: -+ case FSQRT_S: -+ case FSQRT_D: -+ case FMOV_S: -+ case FMOV_D: -+ case MOVGR2FR_W: -+ case MOVGR2FR_D: -+ case MOVGR2FRH_W: -+ case MOVFR2GR_S: -+ case MOVFR2GR_D: -+ case MOVFRH2GR_S: -+ case MOVGR2FCSR: -+ case MOVFCSR2GR: -+ case FCVT_S_D: -+ case FCVT_D_S: -+ case FTINTRM_W_S: -+ case FTINTRM_W_D: -+ case FTINTRM_L_S: -+ case FTINTRM_L_D: -+ case FTINTRP_W_S: -+ case FTINTRP_W_D: -+ case FTINTRP_L_S: -+ case FTINTRP_L_D: -+ case FTINTRZ_W_S: -+ case FTINTRZ_W_D: -+ case FTINTRZ_L_S: -+ case FTINTRZ_L_D: -+ case FTINTRNE_W_S: -+ case FTINTRNE_W_D: -+ case FTINTRNE_L_S: -+ case FTINTRNE_L_D: -+ case FTINT_W_S: -+ case FTINT_W_D: -+ case FTINT_L_S: -+ case FTINT_L_D: -+ case FFINT_S_W: -+ case FFINT_S_L: -+ case FFINT_D_W: -+ case FFINT_D_L: -+ case FRINT_S: -+ case FRINT_D: -+ case MOVFR2CF: -+ case MOVCF2FR: -+ case MOVGR2CF: -+ case MOVCF2GR: -+ case FRECIP_S: -+ case FRECIP_D: -+ case FRSQRT_S: -+ case FRSQRT_D: -+ case FCLASS_S: -+ case FCLASS_D: -+ case FLOGB_S: -+ case FLOGB_D: -+ case CLO_W: -+ case CTO_W: -+ case CLO_D: -+ case CTO_D: -+ kType = kOp22Type; -+ break; -+ default: -+ kType = kUnsupported; -+ } -+ } -+ -+ return kType; -+} -+ -+// ----------------------------------------------------------------------------- -+// Instructions. -+ -+template -+bool InstructionGetters

::IsTrap() const { -+ return true; -+} -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_ -diff --git a/deps/v8/src/codegen/loong64/cpu-loong64.cc b/deps/v8/src/codegen/loong64/cpu-loong64.cc -new file mode 100644 -index 00000000..4b5dc7c9 ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/cpu-loong64.cc -@@ -0,0 +1,38 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+// CPU specific code for loongisa independent of OS goes here. -+ -+#include -+#include -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/codegen/cpu-features.h" -+ -+namespace v8 { -+namespace internal { -+ -+void CpuFeatures::FlushICache(void* start, size_t size) { -+#if !defined(USE_SIMULATOR) -+ // Nothing to do, flushing no instructions. -+ if (size == 0) { -+ return; -+ } -+ -+#if defined(ANDROID) && !defined(__LP64__) -+ // Bionic cacheflush can typically run in userland, avoiding kernel call. -+ char* end = reinterpret_cast(start) + size; -+ cacheflush(reinterpret_cast(start), reinterpret_cast(end), -+ 0); -+#else // ANDROID -+ asm("ibar 0\n"); -+#endif // ANDROID -+#endif // !USE_SIMULATOR. -+} -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64.cc b/deps/v8/src/codegen/loong64/interface-descriptors-loong64.cc -new file mode 100644 -index 00000000..579b1b0f ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64.cc -@@ -0,0 +1,356 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/codegen/interface-descriptors.h" -+ -+#include "src/execution/frames.h" -+ -+namespace v8 { -+namespace internal { -+ -+const Register CallInterfaceDescriptor::ContextRegister() { return cp; } -+ -+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific( -+ CallInterfaceDescriptorData* data, int register_parameter_count) { -+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; -+ CHECK_LE(static_cast(register_parameter_count), -+ arraysize(default_stub_registers)); -+ data->InitializePlatformSpecific(register_parameter_count, -+ default_stub_registers); -+} -+ -+// On MIPS it is not allowed to use odd numbered floating point registers -+// (e.g. f1, f3, etc.) for parameters. This can happen if we use -+// DefaultInitializePlatformSpecific to assign float registers for parameters. -+// E.g if fourth parameter goes to float register, f7 would be assigned for -+// parameter (a3 casted to int is 7). -+bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { -+ return reg.code() % 2 == 0; -+} -+ -+void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ const Register default_stub_registers[] = {a0, a1, a2, a3}; -+ CHECK_EQ(static_cast(kParameterCount), -+ arraysize(default_stub_registers)); -+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -+} -+ -+void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ const Register default_stub_registers[] = {a0, a1, a2}; -+ CHECK_EQ(static_cast(kParameterCount), -+ arraysize(default_stub_registers)); -+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -+} -+ -+void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; -+ CHECK_EQ(static_cast(kParameterCount - kStackArgumentsCount), -+ arraysize(default_stub_registers)); -+ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount, -+ default_stub_registers); -+} -+ -+void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ const Register default_stub_registers[] = {a0, a1, a2}; -+ CHECK_EQ(static_cast(kParameterCount), -+ arraysize(default_stub_registers)); -+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -+} -+ -+void RecordWriteDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; -+ -+ data->RestrictAllocatableRegisters(default_stub_registers, -+ arraysize(default_stub_registers)); -+ -+ CHECK_LE(static_cast(kParameterCount), -+ arraysize(default_stub_registers)); -+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -+} -+ -+void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; -+ -+ data->RestrictAllocatableRegisters(default_stub_registers, -+ arraysize(default_stub_registers)); -+ -+ CHECK_LE(static_cast(kParameterCount), -+ arraysize(default_stub_registers)); -+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -+} -+ -+const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { -+ return a1; -+} -+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; } -+ -+const Register LoadDescriptor::ReceiverRegister() { return a1; } -+const Register LoadDescriptor::NameRegister() { return a2; } -+const Register LoadDescriptor::SlotRegister() { return a0; } -+ -+const Register LoadWithVectorDescriptor::VectorRegister() { return a3; } -+ -+const Register StoreDescriptor::ReceiverRegister() { return a1; } -+const Register StoreDescriptor::NameRegister() { return a2; } -+const Register StoreDescriptor::ValueRegister() { return a0; } -+const Register StoreDescriptor::SlotRegister() { return a4; } -+ -+const Register StoreWithVectorDescriptor::VectorRegister() { return a3; } -+ -+const Register StoreTransitionDescriptor::SlotRegister() { return a4; } -+const Register StoreTransitionDescriptor::VectorRegister() { return a3; } -+const Register StoreTransitionDescriptor::MapRegister() { return a5; } -+ -+const Register ApiGetterDescriptor::HolderRegister() { return a0; } -+const Register ApiGetterDescriptor::CallbackRegister() { return a3; } -+ -+const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; } -+const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; } -+ -+// static -+const Register TypeConversionDescriptor::ArgumentRegister() { return a0; } -+ -+void TypeofDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = {a3}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void CallTrampolineDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a1: target -+ // a0: number of arguments -+ Register registers[] = {a1, a0}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void CallVarargsDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a0 : number of arguments (on the stack, not including receiver) -+ // a1 : the target to call -+ // a4 : arguments list length (untagged) -+ // a2 : arguments list (FixedArray) -+ Register registers[] = {a1, a0, a4, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void CallForwardVarargsDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a1: the target to call -+ // a0: number of arguments -+ // a2: start index (to support rest parameters) -+ Register registers[] = {a1, a0, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void CallFunctionTemplateDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a1 : function template info -+ // a0 : number of arguments (on the stack, not including receiver) -+ Register registers[] = {a1, a0}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void CallWithSpreadDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a0 : number of arguments (on the stack, not including receiver) -+ // a1 : the target to call -+ // a2 : the object to spread -+ Register registers[] = {a1, a0, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void CallWithArrayLikeDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a1 : the target to call -+ // a2 : the arguments list -+ Register registers[] = {a1, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ConstructVarargsDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a0 : number of arguments (on the stack, not including receiver) -+ // a1 : the target to call -+ // a3 : the new target -+ // a4 : arguments list length (untagged) -+ // a2 : arguments list (FixedArray) -+ Register registers[] = {a1, a3, a0, a4, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a1: the target to call -+ // a3: new target -+ // a0: number of arguments -+ // a2: start index (to support rest parameters) -+ Register registers[] = {a1, a3, a0, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ConstructWithSpreadDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a0 : number of arguments (on the stack, not including receiver) -+ // a1 : the target to call -+ // a3 : the new target -+ // a2 : the object to spread -+ Register registers[] = {a1, a3, a0, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a1 : the target to call -+ // a3 : the new target -+ // a2 : the arguments list -+ Register registers[] = {a1, a3, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ConstructStubDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // a1: target -+ // a3: new target -+ // a0: number of arguments -+ // a2: allocation site or undefined -+ Register registers[] = {a1, a3, a0, a2}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void AbortDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = {a0}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void AllocateHeapNumberDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // register state -+ data->InitializePlatformSpecific(0, nullptr); -+} -+ -+void CompareDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = {a1, a0}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void BinaryOpDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = {a1, a0}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ArgumentsAdaptorDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = { -+ a1, // JSFunction -+ a3, // the new target -+ a0, // actual number of arguments -+ a2, // expected number of arguments -+ }; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ApiCallbackDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = { -+ a1, // kApiFunctionAddress -+ a2, // kArgc -+ a3, // kCallData -+ a0, // kHolder -+ }; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void InterpreterDispatchDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = { -+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, -+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = { -+ a0, // argument count (not including receiver) -+ a2, // address of first argument -+ a1 // the target callable to be call -+ }; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = { -+ a0, // argument count (not including receiver) -+ a4, // address of the first argument -+ a1, // constructor to call -+ a3, // new target -+ a2, // allocation site feedback if available, undefined otherwise -+ }; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void ResumeGeneratorDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = { -+ a0, // the value to pass to the generator -+ a1 // the JSGeneratorObject to resume -+ }; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = { -+ a1, // loaded new FP -+ }; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ Register registers[] = {a0, a1}; -+ data->InitializePlatformSpecific(arraysize(registers), registers); -+} -+ -+void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // TODO(v8:8888): Implement on this platform. -+ DefaultInitializePlatformSpecific(data, 4); -+} -+ -+void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // TODO(v8:8888): Implement on this platform. -+ DefaultInitializePlatformSpecific(data, 4); -+} -+ -+void Compare_WithFeedbackDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // TODO(v8:8888): Implement on this platform. -+ DefaultInitializePlatformSpecific(data, 4); -+} -+ -+void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( -+ CallInterfaceDescriptorData* data) { -+ // TODO(v8:8888): Implement on this platform. -+ DefaultInitializePlatformSpecific(data, 3); -+} -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc -new file mode 100644 -index 00000000..69fd5618 ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc -@@ -0,0 +1,4050 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#include // For LONG_MIN, LONG_MAX. -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/base/bits.h" -+#include "src/base/division-by-constant.h" -+#include "src/codegen/assembler-inl.h" -+#include "src/codegen/callable.h" -+#include "src/codegen/code-factory.h" -+#include "src/codegen/external-reference-table.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/codegen/register-configuration.h" -+#include "src/debug/debug.h" -+#include "src/execution/frames-inl.h" -+#include "src/heap/memory-chunk.h" -+#include "src/init/bootstrapper.h" -+#include "src/logging/counters.h" -+#include "src/objects/heap-number.h" -+#include "src/runtime/runtime.h" -+#include "src/snapshot/embedded/embedded-data.h" -+#include "src/snapshot/snapshot.h" -+#include "src/wasm/wasm-code-manager.h" -+ -+// Satisfy cpplint check, but don't include platform-specific header. It is -+// included recursively via macro-assembler.h. -+#if 0 -+#include "src/codegen/loong64/macro-assembler-loong64.h" -+#endif -+ -+namespace v8 { -+namespace internal { -+ -+static inline bool IsZero(const Operand& rk) { -+ if (rk.is_reg()) { -+ return rk.rm() == zero_reg; -+ } else { -+ return rk.immediate() == 0; -+ } -+} -+ -+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, -+ Register exclusion1, -+ Register exclusion2, -+ Register exclusion3) const { -+ int bytes = 0; -+ RegList exclusions = 0; -+ if (exclusion1 != no_reg) { -+ exclusions |= exclusion1.bit(); -+ if (exclusion2 != no_reg) { -+ exclusions |= exclusion2.bit(); -+ if (exclusion3 != no_reg) { -+ exclusions |= exclusion3.bit(); -+ } -+ } -+ } -+ -+ RegList list = kJSCallerSaved & ~exclusions; -+ bytes += NumRegs(list) * kPointerSize; -+ -+ if (fp_mode == kSaveFPRegs) { -+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; -+ } -+ -+ return bytes; -+} -+ -+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, -+ Register exclusion2, Register exclusion3) { -+ int bytes = 0; -+ RegList exclusions = 0; -+ if (exclusion1 != no_reg) { -+ exclusions |= exclusion1.bit(); -+ if (exclusion2 != no_reg) { -+ exclusions |= exclusion2.bit(); -+ if (exclusion3 != no_reg) { -+ exclusions |= exclusion3.bit(); -+ } -+ } -+ } -+ -+ RegList list = kJSCallerSaved & ~exclusions; -+ MultiPush(list); -+ bytes += NumRegs(list) * kPointerSize; -+ -+ if (fp_mode == kSaveFPRegs) { -+ MultiPushFPU(kCallerSavedFPU); -+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; -+ } -+ -+ return bytes; -+} -+ -+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, -+ Register exclusion2, Register exclusion3) { -+ int bytes = 0; -+ if (fp_mode == kSaveFPRegs) { -+ MultiPopFPU(kCallerSavedFPU); -+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; -+ } -+ -+ RegList exclusions = 0; -+ if (exclusion1 != no_reg) { -+ exclusions |= exclusion1.bit(); -+ if (exclusion2 != no_reg) { -+ exclusions |= exclusion2.bit(); -+ if (exclusion3 != no_reg) { -+ exclusions |= exclusion3.bit(); -+ } -+ } -+ } -+ -+ RegList list = kJSCallerSaved & ~exclusions; -+ MultiPop(list); -+ bytes += NumRegs(list) * kPointerSize; -+ -+ return bytes; -+} -+ -+void TurboAssembler::LoadRoot(Register destination, RootIndex index) { -+ Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); -+} -+ -+void TurboAssembler::PushCommonFrame(Register marker_reg) { -+ if (marker_reg.is_valid()) { -+ Push(ra, fp, marker_reg); -+ Add_d(fp, sp, Operand(kPointerSize)); -+ } else { -+ Push(ra, fp); -+ mov(fp, sp); -+ } -+} -+ -+void TurboAssembler::PushStandardFrame(Register function_reg) { -+ int offset = -StandardFrameConstants::kContextOffset; -+ if (function_reg.is_valid()) { -+ Push(ra, fp, cp, function_reg); -+ offset += kPointerSize; -+ } else { -+ Push(ra, fp, cp); -+ } -+ Add_d(fp, sp, Operand(offset)); -+} -+ -+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { -+ // The registers are pushed starting with the highest encoding, -+ // which means that lowest encodings are closest to the stack pointer. -+ return kSafepointRegisterStackIndexMap[reg_code]; -+} -+ -+// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) -+// The register 'object' contains a heap object pointer. The heap object -+// tag is shifted away. -+void MacroAssembler::RecordWriteField(Register object, int offset, -+ Register value, Register dst, -+ RAStatus ra_status, -+ SaveFPRegsMode save_fp, -+ RememberedSetAction remembered_set_action, -+ SmiCheck smi_check) { -+ DCHECK(!AreAliased(value, dst, t8, object)); -+ // First, check if a write barrier is even needed. The tests below -+ // catch stores of Smis. -+ Label done; -+ -+ // Skip barrier if writing a smi. -+ if (smi_check == INLINE_SMI_CHECK) { -+ JumpIfSmi(value, &done); -+ } -+ -+ // Although the object register is tagged, the offset is relative to the start -+ // of the object, so so offset must be a multiple of kPointerSize. -+ DCHECK(IsAligned(offset, kPointerSize)); -+ -+ Add_d(dst, object, Operand(offset - kHeapObjectTag)); -+ if (emit_debug_code()) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Label ok; -+ And(t8, dst, Operand(kPointerSize - 1)); -+ Branch(&ok, eq, t8, Operand(zero_reg)); -+ stop(); -+ bind(&ok); -+ } -+ -+ RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action, -+ OMIT_SMI_CHECK); -+ -+ bind(&done); -+ -+ // Clobber clobbered input registers when running with the debug-code flag -+ // turned on to provoke errors. -+ if (emit_debug_code()) { -+ li(value, Operand(bit_cast(kZapValue + 4))); -+ li(dst, Operand(bit_cast(kZapValue + 8))); -+ } -+} -+ -+void TurboAssembler::SaveRegisters(RegList registers) { -+ DCHECK_GT(NumRegs(registers), 0); -+ RegList regs = 0; -+ for (int i = 0; i < Register::kNumRegisters; ++i) { -+ if ((registers >> i) & 1u) { -+ regs |= Register::from_code(i).bit(); -+ } -+ } -+ MultiPush(regs); -+} -+ -+void TurboAssembler::RestoreRegisters(RegList registers) { -+ DCHECK_GT(NumRegs(registers), 0); -+ RegList regs = 0; -+ for (int i = 0; i < Register::kNumRegisters; ++i) { -+ if ((registers >> i) & 1u) { -+ regs |= Register::from_code(i).bit(); -+ } -+ } -+ MultiPop(regs); -+} -+ -+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address, -+ SaveFPRegsMode fp_mode) { -+ EphemeronKeyBarrierDescriptor descriptor; -+ RegList registers = descriptor.allocatable_registers(); -+ -+ SaveRegisters(registers); -+ -+ Register object_parameter( -+ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject)); -+ Register slot_parameter(descriptor.GetRegisterParameter( -+ EphemeronKeyBarrierDescriptor::kSlotAddress)); -+ Register fp_mode_parameter( -+ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode)); -+ -+ Push(object); -+ Push(address); -+ -+ Pop(slot_parameter); -+ Pop(object_parameter); -+ -+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); -+ Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier), -+ RelocInfo::CODE_TARGET); -+ RestoreRegisters(registers); -+} -+ -+void TurboAssembler::CallRecordWriteStub( -+ Register object, Register address, -+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { -+ CallRecordWriteStub( -+ object, address, remembered_set_action, fp_mode, -+ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), -+ kNullAddress); -+} -+ -+void TurboAssembler::CallRecordWriteStub( -+ Register object, Register address, -+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, -+ Address wasm_target) { -+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode, -+ Handle::null(), wasm_target); -+} -+ -+void TurboAssembler::CallRecordWriteStub( -+ Register object, Register address, -+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, -+ Handle code_target, Address wasm_target) { -+ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); -+ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, -+ // i.e. always emit remember set and save FP registers in RecordWriteStub. If -+ // large performance regression is observed, we should use these values to -+ // avoid unnecessary work. -+ -+ RecordWriteDescriptor descriptor; -+ RegList registers = descriptor.allocatable_registers(); -+ -+ SaveRegisters(registers); -+ Register object_parameter( -+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject)); -+ Register slot_parameter( -+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot)); -+ Register remembered_set_parameter( -+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet)); -+ Register fp_mode_parameter( -+ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode)); -+ -+ Push(object); -+ Push(address); -+ -+ Pop(slot_parameter); -+ Pop(object_parameter); -+ -+ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); -+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); -+ if (code_target.is_null()) { -+ Call(wasm_target, RelocInfo::WASM_STUB_CALL); -+ } else { -+ Call(code_target, RelocInfo::CODE_TARGET); -+ } -+ -+ RestoreRegisters(registers); -+} -+ -+// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) -+// The register 'object' contains a heap object pointer. The heap object -+// tag is shifted away. -+void MacroAssembler::RecordWrite(Register object, Register address, -+ Register value, RAStatus ra_status, -+ SaveFPRegsMode fp_mode, -+ RememberedSetAction remembered_set_action, -+ SmiCheck smi_check) { -+ DCHECK(!AreAliased(object, address, value)); -+ -+ if (emit_debug_code()) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ Ld_d(scratch, MemOperand(address, 0)); -+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch, -+ Operand(value)); -+ } -+ -+ if ((remembered_set_action == OMIT_REMEMBERED_SET && -+ !FLAG_incremental_marking) || -+ FLAG_disable_write_barriers) { -+ return; -+ } -+ -+ // First, check if a write barrier is even needed. The tests below -+ // catch stores of smis and stores into the young generation. -+ Label done; -+ -+ if (smi_check == INLINE_SMI_CHECK) { -+ DCHECK_EQ(0, kSmiTag); -+ JumpIfSmi(value, &done); -+ } -+ -+ CheckPageFlag(value, -+ value, // Used as scratch. -+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); -+ CheckPageFlag(object, -+ value, // Used as scratch. -+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); -+ -+ // Record the actual write. -+ if (ra_status == kRAHasNotBeenSaved) { -+ push(ra); -+ } -+ CallRecordWriteStub(object, address, remembered_set_action, fp_mode); -+ if (ra_status == kRAHasNotBeenSaved) { -+ pop(ra); -+ } -+ -+ bind(&done); -+ -+ // Clobber clobbered registers when running with the debug-code flag -+ // turned on to provoke errors. -+ if (emit_debug_code()) { -+ li(address, Operand(bit_cast(kZapValue + 12))); -+ li(value, Operand(bit_cast(kZapValue + 16))); -+ } -+} -+ -+// --------------------------------------------------------------------------- -+// Instruction macros. -+ -+void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ add_w(rd, rj, rk.rm()); -+ } else { -+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { -+ addi_w(rd, rj, static_cast(rk.immediate())); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ add_w(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ add_d(rd, rj, rk.rm()); -+ } else { -+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { -+ addi_d(rd, rj, static_cast(rk.immediate())); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ add_d(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ sub_w(rd, rj, rk.rm()); -+ } else { -+ DCHECK(is_int32(rk.immediate())); -+ if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { -+ addi_w(rd, rj, -+ static_cast( -+ -rk.immediate())); // No subi_w instr, use addi_w(x, y, -imm). -+ } else { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ if (-rk.immediate() >> 12 == 0 && !MustUseReg(rk.rmode())) { -+ // Use load -imm and addu when loading -imm generates one instruction. -+ li(scratch, -rk.immediate()); -+ add_w(rd, rj, scratch); -+ } else { -+ // li handles the relocation. -+ li(scratch, rk); -+ sub_w(rd, rj, scratch); -+ } -+ } -+ } -+} -+ -+void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ sub_d(rd, rj, rk.rm()); -+ } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { -+ addi_d(rd, rj, -+ static_cast( -+ -rk.immediate())); // No subi_d instr, use addi_d(x, y, -imm). -+ } else { -+ DCHECK(rj != t7); -+ int li_count = InstrCountForLi64Bit(rk.immediate()); -+ int li_neg_count = InstrCountForLi64Bit(-rk.immediate()); -+ if (li_neg_count < li_count && !MustUseReg(rk.rmode())) { -+ // Use load -imm and add_d when loading -imm generates one instruction. -+ DCHECK(rk.immediate() != std::numeric_limits::min()); -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, Operand(-rk.immediate())); -+ add_d(rd, rj, scratch); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, rk); -+ sub_d(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mul_w(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mul_w(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mulh_w(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mulh_w(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mulh_wu(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mulh_wu(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mul_d(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mul_d(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mulh_d(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mulh_d(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ div_w(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ div_w(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mod_w(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mod_w(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mod_wu(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mod_wu(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ div_d(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ div_d(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ div_wu(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ div_wu(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ div_du(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ div_du(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mod_d(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mod_d(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ mod_du(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ mod_du(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::And(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ and_(rd, rj, rk.rm()); -+ } else { -+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { -+ andi(rd, rj, static_cast(rk.immediate())); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ and_(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ or_(rd, rj, rk.rm()); -+ } else { -+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { -+ ori(rd, rj, static_cast(rk.immediate())); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ or_(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ xor_(rd, rj, rk.rm()); -+ } else { -+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { -+ xori(rd, rj, static_cast(rk.immediate())); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ xor_(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ nor(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ nor(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ andn(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ andn(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ orn(rd, rj, rk.rm()); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ orn(rd, rj, scratch); -+ } -+} -+ -+void TurboAssembler::Neg(Register rj, const Operand& rk) { -+ DCHECK(rk.is_reg()); -+ sub_d(rj, zero_reg, rk.rm()); -+} -+ -+void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ slt(rd, rj, rk.rm()); -+ } else { -+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { -+ slti(rd, rj, static_cast(rk.immediate())); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ // TODO why?? -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ slt(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ sltu(rd, rj, rk.rm()); -+ } else { -+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { -+ sltui(rd, rj, static_cast(rk.immediate())); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ sltu(rd, rj, scratch); -+ } -+ } -+} -+ -+void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ slt(rd, rk.rm(), rj); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ slt(rd, scratch, rj); -+ } -+ xori(rd, rd, 1); -+} -+ -+void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ sltu(rd, rk.rm(), rj); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ sltu(rd, scratch, rj); -+ } -+ xori(rd, rd, 1); -+} -+ -+void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) { -+ Slt(rd, rj, rk); -+ xori(rd, rd, 1); -+} -+ -+void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) { -+ Sltu(rd, rj, rk); -+ xori(rd, rd, 1); -+} -+ -+void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ slt(rd, rk.rm(), rj); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ slt(rd, scratch, rj); -+ } -+} -+ -+void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ sltu(rd, rk.rm(), rj); -+ } else { -+ // li handles the relocation. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ DCHECK(rj != scratch); -+ li(scratch, rk); -+ sltu(rd, scratch, rj); -+ } -+} -+ -+void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ rotr_w(rd, rj, rk.rm()); -+ } else { -+ int64_t ror_value = rk.immediate() % 32; -+ if (ror_value < 0) { -+ ror_value += 32; -+ } -+ rotri_w(rd, rj, ror_value); -+ } -+} -+ -+void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { -+ if (rk.is_reg()) { -+ rotr_d(rd, rj, rk.rm()); -+ } else { -+ int64_t dror_value = rk.immediate() % 64; -+ if (dror_value < 0) dror_value += 64; -+ rotri_d(rd, rj, dror_value); -+ } -+} -+ -+void MacroAssembler::Pref(int32_t hint, const MemOperand& rj) { -+ // TODO -+ // pref(hint); -+} -+ -+void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, -+ Register scratch) { -+ DCHECK(sa >= 1 && sa <= 31); -+ if (sa <= 4) { -+ alsl_w(rd, rj, rk, sa); -+ } else { -+ Register tmp = rd == rk ? scratch : rd; -+ DCHECK(tmp != rk); -+ slli_w(tmp, rj, sa); -+ add_w(rd, rk, tmp); -+ } -+} -+ -+void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, -+ Register scratch) { -+ DCHECK(sa >= 1 && sa <= 31); -+ if (sa <= 4) { -+ alsl_d(rd, rj, rk, sa); -+ } else { -+ Register tmp = rd == rk ? scratch : rd; -+ DCHECK(tmp != rk); -+ slli_d(tmp, rj, sa); -+ add_d(rd, rk, tmp); -+ } -+} -+ -+// ------------Pseudo-instructions------------- -+ -+// Change endianness -+void TurboAssembler::ByteSwapSigned(Register dest, Register src, -+ int operand_size) { -+ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); -+ if (operand_size == 2) { -+ revb_2h(dest, src); -+ ext_w_h(dest, dest); -+ } else if (operand_size == 4) { -+ revb_2w(dest, src); -+ slli_w(dest, dest, 0); -+ } else { -+ revb_d(dest, dest); -+ } -+} -+ -+void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, -+ int operand_size) { -+ DCHECK(operand_size == 2 || operand_size == 4); -+ if (operand_size == 2) { -+ revb_2h(dest, src); -+ bstrins_d(dest, zero_reg, 63, 16); -+ } else { -+ revb_2w(dest, src); -+ bstrins_d(dest, zero_reg, 63, 32); -+ } -+} -+ -+void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ ldx_b(rd, source.base(), source.index()); -+ } else { -+ ld_b(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ ldx_bu(rd, source.base(), source.index()); -+ } else { -+ ld_bu(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::St_b(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ stx_b(rd, source.base(), source.index()); -+ } else { -+ st_b(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ ldx_h(rd, source.base(), source.index()); -+ } else { -+ ld_h(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ ldx_hu(rd, source.base(), source.index()); -+ } else { -+ ld_hu(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::St_h(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ stx_h(rd, source.base(), source.index()); -+ } else { -+ st_h(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); // TODO ldptr_w ?? -+ if (source.hasIndexReg()) { -+ ldx_w(rd, source.base(), source.index()); -+ } else { -+ ld_w(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ ldx_wu(rd, source.base(), source.index()); -+ } else { -+ ld_wu(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::St_w(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ stx_w(rd, source.base(), source.index()); -+ } else { -+ st_w(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ ldx_d(rd, source.base(), source.index()); -+ } else { -+ ld_d(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::St_d(Register rd, const MemOperand& rj) { -+ MemOperand source = rj; -+ AdjustBaseAndOffset(&source); -+ if (source.hasIndexReg()) { -+ stx_d(rd, source.base(), source.index()); -+ } else { -+ st_d(rd, source.base(), source.offset()); -+ } -+} -+ -+void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) { -+ MemOperand tmp = src; -+ AdjustBaseAndOffset(&tmp); -+ if (tmp.hasIndexReg()) { -+ fldx_s(fd, tmp.base(), tmp.index()); -+ } else { -+ fld_s(fd, tmp.base(), tmp.offset()); -+ } -+} -+ -+void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) { -+ MemOperand tmp = src; -+ AdjustBaseAndOffset(&tmp); -+ if (tmp.hasIndexReg()) { -+ fstx_s(fs, tmp.base(), tmp.index()); -+ } else { -+ fst_s(fs, tmp.base(), tmp.offset()); -+ } -+} -+ -+void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) { -+ MemOperand tmp = src; -+ AdjustBaseAndOffset(&tmp); -+ if (tmp.hasIndexReg()) { -+ fldx_d(fd, tmp.base(), tmp.index()); -+ } else { -+ fld_d(fd, tmp.base(), tmp.offset()); -+ } -+} -+ -+void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) { -+ MemOperand tmp = src; -+ AdjustBaseAndOffset(&tmp); -+ if (tmp.hasIndexReg()) { -+ fstx_d(fs, tmp.base(), tmp.index()); -+ } else { -+ fst_d(fs, tmp.base(), tmp.offset()); -+ } -+} -+ -+void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) { -+ DCHECK(!rj.hasIndexReg()); -+ bool is_one_instruction = is_int14(rj.offset()); -+ if (is_one_instruction) { -+ ll_w(rd, rj.base(), rj.offset()); -+ } else { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, rj.offset()); -+ add_d(scratch, scratch, rj.base()); -+ ll_w(rd, scratch, 0); -+ } -+} -+ -+void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) { -+ DCHECK(!rj.hasIndexReg()); -+ bool is_one_instruction = is_int14(rj.offset()); -+ if (is_one_instruction) { -+ ll_d(rd, rj.base(), rj.offset()); -+ } else { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, rj.offset()); -+ add_d(scratch, scratch, rj.base()); -+ ll_d(rd, scratch, 0); -+ } -+} -+ -+void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) { -+ DCHECK(!rj.hasIndexReg()); -+ bool is_one_instruction = is_int14(rj.offset()); -+ if (is_one_instruction) { -+ sc_w(rd, rj.base(), rj.offset()); -+ } else { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, rj.offset()); -+ add_d(scratch, scratch, rj.base()); -+ sc_w(rd, scratch, 0); -+ } -+} -+ -+void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) { -+ DCHECK(!rj.hasIndexReg()); -+ bool is_one_instruction = is_int14(rj.offset()); -+ if (is_one_instruction) { -+ sc_d(rd, rj.base(), rj.offset()); -+ } else { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, rj.offset()); -+ add_d(scratch, scratch, rj.base()); -+ sc_d(rd, scratch, 0); -+ } -+} -+ -+void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { -+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating -+ // non-isolate-independent code. In many cases it might be cheaper than -+ // embedding the relocatable value. -+ if (root_array_available_ && options().isolate_independent_code) { -+ IndirectLoadConstant(dst, value); -+ return; -+ } -+ li(dst, Operand(value), mode); -+} -+ -+void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { -+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating -+ // non-isolate-independent code. In many cases it might be cheaper than -+ // embedding the relocatable value. -+ if (root_array_available_ && options().isolate_independent_code) { -+ IndirectLoadExternalReference(dst, value); -+ return; -+ } -+ li(dst, Operand(value), mode); -+} -+ -+void TurboAssembler::li(Register dst, const StringConstantBase* string, -+ LiFlags mode) { -+ li(dst, Operand::EmbeddedStringConstant(string), mode); -+} -+ -+static inline int InstrCountForLiLower32Bit(int64_t value) { -+ if (is_int12(static_cast(value)) || -+ is_uint12(static_cast(value)) || !(value & kImm12Mask)) { -+ return 1; -+ } else { -+ return 2; -+ } -+} -+ -+void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { -+ if (is_int12(static_cast(j.immediate()))) { -+ addi_d(rd, zero_reg, j.immediate()); -+ } else if (is_uint12(static_cast(j.immediate()))) { -+ ori(rd, zero_reg, j.immediate() & kImm12Mask); -+ } else { -+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); -+ if (j.immediate() & kImm12Mask) { -+ ori(rd, rd, j.immediate() & kImm12Mask); -+ } -+ } -+} -+ -+int TurboAssembler::InstrCountForLi64Bit(int64_t value) { -+ if (is_int32(value)) { -+ return InstrCountForLiLower32Bit(value); -+ } else if (is_int52(value)) { -+ return InstrCountForLiLower32Bit(value) + 1; -+ } else if ((value & 0xffffffffL) == 0) { -+ // 32 LSBs (Least Significant Bits) all set to zero. -+ uint8_t tzc = base::bits::CountTrailingZeros32(value >> 32); -+ uint8_t lzc = base::bits::CountLeadingZeros32(value >> 32); -+ if (tzc >= 20) { -+ return 1; -+ } else if (tzc + lzc > 12) { -+ return 2; -+ } else { -+ return 3; -+ } -+ } else { -+ int64_t imm21 = (value >> 31) & 0x1fffffL; -+ if (imm21 != 0x1fffffL && imm21 != 0) { -+ return InstrCountForLiLower32Bit(value) + 2; -+ } else { -+ return InstrCountForLiLower32Bit(value) + 1; -+ } -+ } -+ UNREACHABLE(); -+ return INT_MAX; -+} -+ -+// All changes to if...else conditions here must be added to -+// InstrCountForLi64Bit as well. -+void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { -+ DCHECK(!j.is_reg()); -+ DCHECK(!MustUseReg(j.rmode())); -+ DCHECK(mode == OPTIMIZE_SIZE); -+ int64_t imm = j.immediate(); -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Normal load of an immediate value which does not need Relocation Info. -+ if (is_int32(imm)) { -+ LiLower32BitHelper(rd, j); -+ } else if (is_int52(imm)) { -+ LiLower32BitHelper(rd, j); -+ lu32i_d(rd, imm >> 32 & 0xfffff); -+ } else if ((imm & 0xffffffffL) == 0) { -+ // 32 LSBs (Least Significant Bits) all set to zero. -+ uint8_t tzc = base::bits::CountTrailingZeros32(imm >> 32); -+ uint8_t lzc = base::bits::CountLeadingZeros32(imm >> 32); -+ if (tzc >= 20) { -+ lu52i_d(rd, zero_reg, imm >> 52 & kImm12Mask); -+ } else if (tzc + lzc > 12) { -+ int32_t mask = (1 << (32 - tzc)) - 1; -+ lu12i_w(rd, imm >> (tzc + 32) & mask); -+ slli_d(rd, rd, tzc + 20); -+ } else { -+ xor_(rd, rd, rd); -+ lu32i_d(rd, imm >> 32 & 0xfffff); -+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask); -+ } -+ } else { -+ int64_t imm21 = (imm >> 31) & 0x1fffffL; -+ LiLower32BitHelper(rd, j); -+ if (imm21 != 0x1fffffL && imm21 != 0) lu32i_d(rd, imm >> 32 & 0xfffff); -+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask); -+ } -+} -+ -+void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { -+ DCHECK(!j.is_reg()); -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { -+ li_optimized(rd, j, mode); -+ } else if (MustUseReg(j.rmode())) { -+ int64_t immediate; -+ if (j.IsHeapObjectRequest()) { -+ RequestHeapObject(j.heap_object_request()); -+ immediate = 0; -+ } else { -+ immediate = j.immediate(); -+ } -+ -+ RecordRelocInfo(j.rmode(), immediate); -+ lu12i_w(rd, immediate >> 12 & 0xfffff); -+ ori(rd, rd, immediate & kImm12Mask); -+ lu32i_d(rd, immediate >> 32 & 0xfffff); -+ } else if (mode == ADDRESS_LOAD) { -+ // We always need the same number of instructions as we may need to patch -+ // this code to load another value which may need all 3 instructions. -+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); -+ ori(rd, rd, j.immediate() & kImm12Mask); -+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff); -+ } else { // mode == CONSTANT_SIZE - always emit the same instruction -+ // sequence. -+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); -+ ori(rd, rd, j.immediate() & kImm12Mask); -+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff); -+ lu52i_d(rd, rd, j.immediate() >> 52 & kImm12Mask); -+ } -+} -+ -+void TurboAssembler::MultiPush(RegList regs) { -+ int16_t stack_offset = 0; -+ -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ if ((regs & (1 << i)) != 0) { -+ stack_offset -= kPointerSize; -+ St_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ } -+ } -+ addi_d(sp, sp, stack_offset); -+} -+ -+void TurboAssembler::MultiPush(RegList regs1, RegList regs2) { -+ DCHECK_EQ(regs1 & regs2, 0); -+ int16_t stack_offset = 0; -+ -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ if ((regs1 & (1 << i)) != 0) { -+ stack_offset -= kPointerSize; -+ St_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ } -+ } -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ if ((regs2 & (1 << i)) != 0) { -+ stack_offset -= kPointerSize; -+ St_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ } -+ } -+ addi_d(sp, sp, stack_offset); -+} -+ -+void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { -+ DCHECK_EQ(regs1 & regs2, 0); -+ DCHECK_EQ(regs1 & regs3, 0); -+ DCHECK_EQ(regs2 & regs3, 0); -+ int16_t stack_offset = 0; -+ -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ if ((regs1 & (1 << i)) != 0) { -+ stack_offset -= kPointerSize; -+ St_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ } -+ } -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ if ((regs2 & (1 << i)) != 0) { -+ stack_offset -= kPointerSize; -+ St_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ } -+ } -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ if ((regs3 & (1 << i)) != 0) { -+ stack_offset -= kPointerSize; -+ St_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ } -+ } -+ addi_d(sp, sp, stack_offset); -+} -+ -+void TurboAssembler::MultiPop(RegList regs) { -+ int16_t stack_offset = 0; -+ -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ if ((regs & (1 << i)) != 0) { -+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ stack_offset += kPointerSize; -+ } -+ } -+ addi_d(sp, sp, stack_offset); -+} -+ -+void TurboAssembler::MultiPop(RegList regs1, RegList regs2) { -+ DCHECK_EQ(regs1 & regs2, 0); -+ int16_t stack_offset = 0; -+ -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ if ((regs2 & (1 << i)) != 0) { -+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ stack_offset += kPointerSize; -+ } -+ } -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ if ((regs1 & (1 << i)) != 0) { -+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ stack_offset += kPointerSize; -+ } -+ } -+ addi_d(sp, sp, stack_offset); -+} -+ -+void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { -+ DCHECK_EQ(regs1 & regs2, 0); -+ DCHECK_EQ(regs1 & regs3, 0); -+ DCHECK_EQ(regs2 & regs3, 0); -+ int16_t stack_offset = 0; -+ -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ if ((regs3 & (1 << i)) != 0) { -+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ stack_offset += kPointerSize; -+ } -+ } -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ if ((regs2 & (1 << i)) != 0) { -+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ stack_offset += kPointerSize; -+ } -+ } -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ if ((regs1 & (1 << i)) != 0) { -+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); -+ stack_offset += kPointerSize; -+ } -+ } -+ addi_d(sp, sp, stack_offset); -+} -+ -+void TurboAssembler::MultiPushFPU(RegList regs) { -+ int16_t num_to_push = base::bits::CountPopulation(regs); -+ int16_t stack_offset = num_to_push * kDoubleSize; -+ -+ Sub_d(sp, sp, Operand(stack_offset)); -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ if ((regs & (1 << i)) != 0) { -+ stack_offset -= kDoubleSize; -+ Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset)); -+ } -+ } -+} -+ -+void TurboAssembler::MultiPopFPU(RegList regs) { -+ int16_t stack_offset = 0; -+ -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ if ((regs & (1 << i)) != 0) { -+ Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset)); -+ stack_offset += kDoubleSize; -+ } -+ } -+ addi_d(sp, sp, stack_offset); -+} -+ -+void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, -+ uint16_t lsbw) { -+ DCHECK_LT(lsbw, msbw); -+ DCHECK_LT(lsbw, 32); -+ DCHECK_LT(msbw, 32); -+ bstrpick_w(rk, rj, msbw, lsbw); -+} -+ -+void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, -+ uint16_t lsbw) { -+ DCHECK_LT(lsbw, msbw); -+ DCHECK_LT(lsbw, 64); -+ DCHECK_LT(msbw, 64); -+ bstrpick_d(rk, rj, msbw, lsbw); -+} -+ -+void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); } -+ -+void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); } -+ -+void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) { -+ // Move the data from fs to t8. -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ movfr2gr_s(t8, fj); -+ Ffint_d_uw(fd, t8); -+} -+ -+void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ -+ // Convert rj to a FP value in fd. -+ DCHECK(rj != t7); -+ -+ // Zero extend int32 in rj. -+ Bstrpick_d(t7, rj, 31, 0); -+ movgr2fr_d(fd, t7); -+ ffint_d_l(fd, fd); -+} -+ -+void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Move the data from fs to t8. -+ movfr2gr_d(t8, fj); -+ Ffint_d_ul(fd, t8); -+} -+ -+void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Convert rj to a FP value in fd. -+ -+ DCHECK(rj != t7); -+ -+ Label msb_clear, conversion_done; -+ -+ Branch(&msb_clear, ge, rj, Operand(zero_reg)); -+ -+ // Rj >= 2^63 -+ andi(t7, rj, 1); -+ srli_d(rj, rj, 1); -+ or_(t7, t7, rj); -+ movgr2fr_d(fd, t7); -+ ffint_d_l(fd, fd); -+ fadd_d(fd, fd, fd); -+ Branch(&conversion_done); -+ -+ bind(&msb_clear); -+ // Rs < 2^63, we can do simple conversion. -+ movgr2fr_d(fd, rj); -+ ffint_d_l(fd, fd); -+ -+ bind(&conversion_done); -+} -+ -+void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Move the data from fs to t8. -+ movfr2gr_d(t8, fj); -+ Ffint_s_uw(fd, t8); -+} -+ -+void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Convert rj to a FP value in fd. -+ DCHECK(rj != t7); -+ -+ // Zero extend int32 in rj. -+ bstrpick_d(t7, rj, 31, 0); -+ movgr2fr_d(fd, t7); -+ ffint_s_l(fd, fd); -+} -+ -+void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Move the data from fs to t8. -+ movfr2gr_d(t8, fj); -+ Ffint_s_ul(fd, t8); -+} -+ -+void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Convert rj to a FP value in fd. -+ -+ DCHECK(rj != t7); -+ -+ Label positive, conversion_done; -+ -+ Branch(&positive, ge, rj, Operand(zero_reg)); -+ -+ // Rs >= 2^31. -+ andi(t7, rj, 1); -+ srli_d(rj, rj, 1); -+ or_(t7, t7, rj); -+ movgr2fr_d(fd, t7); -+ ffint_s_l(fd, fd); -+ fadd_s(fd, fd, fd); -+ Branch(&conversion_done); -+ -+ bind(&positive); -+ // Rs < 2^31, we can do simple conversion. -+ movgr2fr_d(fd, rj); -+ ffint_s_l(fd, fd); -+ -+ bind(&conversion_done); -+} -+ -+void MacroAssembler::Ftintrne_l_d(FPURegister fd, FPURegister fj) { -+ ftintrne_l_d(fd, fj); -+} -+ -+void MacroAssembler::Ftintrm_l_d(FPURegister fd, FPURegister fj) { -+ ftintrm_l_d(fd, fj); -+} -+ -+void MacroAssembler::Ftintrp_l_d(FPURegister fd, FPURegister fj) { -+ ftintrp_l_d(fd, fj); -+} -+ -+void MacroAssembler::Ftintrz_l_d(FPURegister fd, FPURegister fj) { -+ ftintrz_l_d(fd, fj); -+} -+ -+void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj, -+ FPURegister scratch) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Load to GPR. -+ movfr2gr_d(t8, fj); -+ // Reset sign bit. -+ { -+ UseScratchRegisterScope temps(this); -+ Register scratch1 = temps.Acquire(); -+ li(scratch1, 0x7FFFFFFFFFFFFFFFl); -+ and_(t8, t8, scratch1); -+ } -+ movgr2fr_d(scratch, t8); -+ Ftintrz_l_d(fd, scratch); -+} -+ -+void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj, -+ FPURegister scratch) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Ftintrz_uw_d(t8, fj, scratch); -+ movgr2fr_w(fd, t8); -+} -+ -+void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj, -+ FPURegister scratch) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Ftintrz_uw_s(t8, fj, scratch); -+ movgr2fr_w(fd, t8); -+} -+ -+void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj, -+ FPURegister scratch, Register result) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Ftintrz_ul_d(t8, fj, scratch, result); -+ movgr2fr_d(fd, t8); -+} -+ -+void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj, -+ FPURegister scratch, Register result) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Ftintrz_ul_s(t8, fj, scratch, result); -+ movgr2fr_d(fd, t8); -+} -+ -+void MacroAssembler::Ftintrz_w_d(FPURegister fd, FPURegister fj) { -+ ftintrz_w_d(fd, fj); -+} -+ -+void MacroAssembler::Ftintrne_w_d(FPURegister fd, FPURegister fj) { -+ ftintrne_w_d(fd, fj); -+} -+ -+void MacroAssembler::Ftintrm_w_d(FPURegister fd, FPURegister fj) { -+ ftintrm_w_d(fd, fj); -+} -+ -+void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) { -+ ftintrp_w_d(fd, fj); -+} -+ -+void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, -+ FPURegister scratch) { -+ DCHECK(fj != scratch); -+ DCHECK(rd != t7); -+ -+ { -+ // Load 2^31 into scratch as its float representation. -+ UseScratchRegisterScope temps(this); -+ Register scratch1 = temps.Acquire(); -+ li(scratch1, 0x41E00000); -+ movgr2fr_w(scratch, zero_reg); -+ movgr2frh_w(scratch, scratch1); -+ } -+ // Test if scratch > fd. -+ // If fd < 2^31 we can convert it normally. -+ Label simple_convert; -+ CompareF64(fj, scratch, CLT); -+ BranchTrueShortF(&simple_convert); -+ -+ // First we subtract 2^31 from fd, then trunc it to rs -+ // and add 2^31 to rj. -+ fsub_d(scratch, fj, scratch); -+ ftintrz_w_d(scratch, scratch); -+ movfr2gr_s(rd, scratch); -+ Or(rd, rd, 1 << 31); -+ -+ Label done; -+ Branch(&done); -+ // Simple conversion. -+ bind(&simple_convert); -+ ftintrz_w_d(scratch, fj); -+ movfr2gr_s(rd, scratch); -+ -+ bind(&done); -+} -+ -+void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, -+ FPURegister scratch) { -+ DCHECK(fj != scratch); -+ DCHECK(rd != t7); -+ { -+ // Load 2^31 into scratch as its float representation. -+ UseScratchRegisterScope temps(this); -+ Register scratch1 = temps.Acquire(); -+ li(scratch1, 0x4F000000); -+ movgr2fr_w(scratch, scratch1); -+ } -+ // Test if scratch > fs. -+ // If fs < 2^31 we can convert it normally. -+ Label simple_convert; -+ CompareF32(fj, scratch, CLT); -+ BranchTrueShortF(&simple_convert); -+ -+ // First we subtract 2^31 from fs, then trunc it to rd -+ // and add 2^31 to rd. -+ fsub_s(scratch, fj, scratch); -+ ftintrz_w_s(scratch, scratch); -+ movfr2gr_s(rd, scratch); -+ Or(rd, rd, 1 << 31); -+ -+ Label done; -+ Branch(&done); -+ // Simple conversion. -+ bind(&simple_convert); -+ ftintrz_w_s(scratch, fj); -+ movfr2gr_s(rd, scratch); -+ -+ bind(&done); -+} -+ -+void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, -+ FPURegister scratch, Register result) { -+ DCHECK(fj != scratch); -+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); -+ -+ Label simple_convert, done, fail; -+ if (result.is_valid()) { -+ mov(result, zero_reg); -+ Move(scratch, -1.0); -+ // If fd =< -1 or unordered, then the conversion fails. -+ CompareF64(fj, scratch, CLE); -+ BranchTrueShortF(&fail); -+ CompareIsNanF64(fj, scratch); -+ BranchTrueShortF(&fail); -+ } -+ -+ // Load 2^63 into scratch as its double representation. -+ li(t7, 0x43E0000000000000); -+ movgr2fr_d(scratch, t7); -+ -+ // Test if scratch > fs. -+ // If fs < 2^63 we can convert it normally. -+ CompareF64(fj, scratch, CLT); -+ BranchTrueShortF(&simple_convert); -+ -+ // First we subtract 2^63 from fs, then trunc it to rd -+ // and add 2^63 to rd. -+ fsub_d(scratch, fj, scratch); -+ ftintrz_l_d(scratch, scratch); -+ movfr2gr_d(rd, scratch); -+ Or(rd, rd, Operand(1UL << 63)); -+ Branch(&done); -+ -+ // Simple conversion. -+ bind(&simple_convert); -+ ftintrz_l_d(scratch, fj); -+ movfr2gr_d(rd, scratch); -+ -+ bind(&done); -+ if (result.is_valid()) { -+ // Conversion is failed if the result is negative. -+ { -+ UseScratchRegisterScope temps(this); -+ Register scratch1 = temps.Acquire(); -+ addi_d(scratch1, zero_reg, -1); -+ srli_d(scratch1, scratch1, 1); // Load 2^62. -+ movfr2gr_d(result, scratch); -+ xor_(result, result, scratch1); -+ } -+ Slt(result, zero_reg, result); -+ } -+ -+ bind(&fail); -+} -+ -+void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, -+ FPURegister scratch, Register result) { -+ DCHECK(fj != scratch); -+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); -+ -+ Label simple_convert, done, fail; -+ if (result.is_valid()) { -+ mov(result, zero_reg); -+ Move(scratch, -1.0f); -+ // If fd =< -1 or unordered, then the conversion fails. -+ CompareF32(fj, scratch, CLE); -+ BranchTrueShortF(&fail); -+ CompareIsNanF32(fj, scratch); -+ BranchTrueShortF(&fail); -+ } -+ -+ { -+ // Load 2^63 into scratch as its float representation. -+ UseScratchRegisterScope temps(this); -+ Register scratch1 = temps.Acquire(); -+ li(scratch1, 0x5F000000); -+ movgr2fr_w(scratch, scratch1); -+ } -+ -+ // Test if scratch > fs. -+ // If fs < 2^63 we can convert it normally. -+ CompareF32(fj, scratch, CLT); -+ BranchTrueShortF(&simple_convert); -+ -+ // First we subtract 2^63 from fs, then trunc it to rd -+ // and add 2^63 to rd. -+ fsub_s(scratch, fj, scratch); -+ ftintrz_l_s(scratch, scratch); -+ movfr2gr_d(rd, scratch); -+ Or(rd, rd, Operand(1UL << 63)); -+ Branch(&done); -+ -+ // Simple conversion. -+ bind(&simple_convert); -+ ftintrz_l_s(scratch, fj); -+ movfr2gr_d(rd, scratch); -+ -+ bind(&done); -+ if (result.is_valid()) { -+ // Conversion is failed if the result is negative or unordered. -+ { -+ UseScratchRegisterScope temps(this); -+ Register scratch1 = temps.Acquire(); -+ addi_d(scratch1, zero_reg, -1); -+ srli_d(scratch1, scratch1, 1); // Load 2^62. -+ movfr2gr_d(result, scratch); -+ xor_(result, result, scratch1); -+ } -+ Slt(result, zero_reg, result); -+ } -+ -+ bind(&fail); -+} -+ -+void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, -+ FPURoundingMode mode) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = t8; -+ movfcsr2gr(scratch); -+ li(t7, Operand(mode)); -+ movgr2fcsr(t7); -+ frint_d(dst, src); -+ movgr2fcsr(scratch); -+} -+ -+void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) { -+ RoundDouble(dst, src, mode_floor); -+} -+ -+void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) { -+ RoundDouble(dst, src, mode_ceil); -+} -+ -+void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) { -+ RoundDouble(dst, src, mode_trunc); -+} -+ -+void TurboAssembler::Round_d(FPURegister dst, FPURegister src) { -+ RoundDouble(dst, src, mode_round); -+} -+ -+void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, -+ FPURoundingMode mode) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = t8; -+ movfcsr2gr(scratch); -+ li(t7, Operand(mode)); -+ movgr2fcsr(t7); -+ frint_s(dst, src); -+ movgr2fcsr(scratch); -+} -+ -+void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) { -+ RoundFloat(dst, src, mode_floor); -+} -+ -+void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) { -+ RoundFloat(dst, src, mode_ceil); -+} -+ -+void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) { -+ RoundFloat(dst, src, mode_trunc); -+} -+ -+void TurboAssembler::Round_s(FPURegister dst, FPURegister src) { -+ RoundFloat(dst, src, mode_round); -+} -+ -+void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, -+ FPUCondition cc, CFRegister cd, bool f32) { -+ if (f32) { -+ fcmp_cond_s(cc, cmp1, cmp2, cd); -+ } else { -+ fcmp_cond_d(cc, cmp1, cmp2, cd); -+ } -+} -+ -+void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2, -+ CFRegister cd, bool f32) { -+ CompareF(cmp1, cmp2, CUN, cd, f32); -+} -+ -+void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) { -+ bcnez(cj, target); -+} -+ -+void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) { -+ bceqz(cj, target); -+} -+ -+void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) { -+ // TODO can be optimzed -+ bool long_branch = target->is_bound() -+ ? !is_near(target, OffsetSize::kOffset21) -+ : is_trampoline_emitted(); -+ if (long_branch) { -+ Label skip; -+ BranchFalseShortF(&skip, cj); -+ Branch(target); -+ bind(&skip); -+ } else { -+ BranchTrueShortF(target, cj); -+ } -+} -+ -+void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) { -+ bool long_branch = target->is_bound() -+ ? !is_near(target, OffsetSize::kOffset21) -+ : is_trampoline_emitted(); -+ if (long_branch) { -+ Label skip; -+ BranchTrueShortF(&skip, cj); -+ Branch(target); -+ bind(&skip); -+ } else { -+ BranchFalseShortF(target, cj); -+ } -+} -+ -+void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ DCHECK(src_low != scratch); -+ movfrh2gr_s(scratch, dst); -+ movgr2fr_w(dst, src_low); -+ movgr2frh_w(dst, scratch); -+} -+ -+void TurboAssembler::Move(FPURegister dst, uint32_t src) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, Operand(static_cast(src))); -+ movgr2fr_w(dst, scratch); -+} -+ -+void TurboAssembler::Move(FPURegister dst, uint64_t src) { -+ // Handle special values first. -+ if (src == bit_cast(0.0) && has_double_zero_reg_set_) { -+ fmov_d(dst, kDoubleRegZero); -+ } else if (src == bit_cast(-0.0) && has_double_zero_reg_set_) { -+ Neg_d(dst, kDoubleRegZero); -+ } else { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, Operand(static_cast(src))); -+ movgr2fr_d(dst, scratch); -+ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true; -+ } -+} -+ -+void TurboAssembler::Movz(Register rd, Register rj, Register rk) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ maskeqz(scratch, rj, rk); -+ masknez(rd, rd, rk); -+ or_(rd, rd, scratch); -+} -+ -+void TurboAssembler::Movn(Register rd, Register rj, Register rk) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ masknez(scratch, rj, rk); -+ maskeqz(rd, rd, rk); -+ or_(rd, rd, scratch); -+} -+ -+void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj, -+ const Operand& rk, Condition cond) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ switch (cond) { -+ case cc_always: -+ mov(rd, zero_reg); -+ break; -+ case eq: -+ if (rj == zero_reg) { -+ if (rk.is_reg()) { -+ LoadZeroIfConditionZero(rd, rk.rm()); -+ } else { -+ if (rk.immediate() == 0) { -+ mov(rd, zero_reg); -+ } else { -+ // nop(); -+ } -+ } -+ } else if (IsZero(rk)) { -+ LoadZeroIfConditionZero(rd, rj); -+ } else { -+ Sub_d(t7, rj, rk); -+ LoadZeroIfConditionZero(rd, t7); -+ } -+ break; -+ case ne: -+ if (rj == zero_reg) { -+ if (rk.is_reg()) { -+ LoadZeroIfConditionNotZero(rd, rk.rm()); -+ } else { -+ if (rk.immediate() != 0) { -+ mov(rd, zero_reg); -+ } else { -+ // nop(); -+ } -+ } -+ } else if (IsZero(rk)) { -+ LoadZeroIfConditionNotZero(rd, rj); -+ } else { -+ Sub_d(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ } -+ break; -+ -+ // Signed comparison. -+ case greater: -+ Sgt(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ break; -+ case greater_equal: -+ Sge(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ // rj >= rk -+ break; -+ case less: -+ Slt(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ // rj < rk -+ break; -+ case less_equal: -+ Sle(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ // rj <= rk -+ break; -+ -+ // Unsigned comparison. -+ case Ugreater: -+ Sgtu(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ // rj > rk -+ break; -+ -+ case Ugreater_equal: -+ Sgeu(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ // rj >= rk -+ break; -+ case Uless: -+ Sltu(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ // rj < rk -+ break; -+ case Uless_equal: -+ Sleu(t7, rj, rk); -+ LoadZeroIfConditionNotZero(rd, t7); -+ // rj <= rk -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, -+ Register condition) { -+ maskeqz(dest, dest, condition); -+} -+ -+void TurboAssembler::LoadZeroIfConditionZero(Register dest, -+ Register condition) { -+ masknez(dest, dest, condition); -+} -+ -+void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ movcf2gr(scratch, cc); -+ LoadZeroIfConditionNotZero(dest, scratch); -+} -+ -+void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ movcf2gr(scratch, cc); -+ LoadZeroIfConditionZero(dest, scratch); -+} -+ -+void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); } -+ -+void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); } -+ -+void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); } -+ -+void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); } -+ -+// TODO: Optimize like arm64, use simd instruction -+void TurboAssembler::Popcnt_w(Register rd, Register rj) { -+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel -+ // -+ // A generalization of the best bit counting method to integers of -+ // bit-widths up to 128 (parameterized by type T) is this: -+ // -+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp -+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp -+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp -+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count -+ // -+ // For comparison, for 32-bit quantities, this algorithm can be executed -+ // using 20 MIPS instructions (the calls to LoadConst32() generate two -+ // machine instructions each for the values being used in this algorithm). -+ // A(n unrolled) loop-based algorithm requires 25 instructions. -+ // -+ // For a 64-bit operand this can be performed in 24 instructions compared -+ // to a(n unrolled) loop based algorithm which requires 38 instructions. -+ // -+ // There are algorithms which are faster in the cases where very few -+ // bits are set but the algorithm here attempts to minimize the total -+ // number of instructions executed even when a large number of bits -+ // are set. -+ int32_t B0 = 0x55555555; // (T)~(T)0/3 -+ int32_t B1 = 0x33333333; // (T)~(T)0/15*3 -+ int32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15 -+ int32_t value = 0x01010101; // (T)~(T)0/255 -+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE -+ -+ UseScratchRegisterScope temps(this); -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = temps.Acquire(); -+ Register scratch2 = t8; -+ srli_w(scratch, rj, 1); -+ li(scratch2, B0); -+ And(scratch, scratch, scratch2); -+ Sub_w(scratch, rj, scratch); -+ li(scratch2, B1); -+ And(rd, scratch, scratch2); -+ srli_w(scratch, scratch, 2); -+ And(scratch, scratch, scratch2); -+ Add_w(scratch, rd, scratch); -+ srli_w(rd, scratch, 4); -+ Add_w(rd, rd, scratch); -+ li(scratch2, B2); -+ And(rd, rd, scratch2); -+ li(scratch, value); -+ Mul_w(rd, rd, scratch); -+ srli_w(rd, rd, shift); -+} -+ -+void TurboAssembler::Popcnt_d(Register rd, Register rj) { -+ int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 -+ int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 -+ int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15 -+ int64_t value = 0x0101010101010101l; // (T)~(T)0/255 -+ uint32_t shift = 56; // (sizeof(T) - 1) * BITS_PER_BYTE -+ -+ UseScratchRegisterScope temps(this); -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = temps.Acquire(); -+ Register scratch2 = t8; -+ srli_d(scratch, rj, 1); -+ li(scratch2, B0); -+ And(scratch, scratch, scratch2); -+ Sub_d(scratch, rj, scratch); -+ li(scratch2, B1); -+ And(rd, scratch, scratch2); -+ srli_d(scratch, scratch, 2); -+ And(scratch, scratch, scratch2); -+ Add_d(scratch, rd, scratch); -+ srli_d(rd, scratch, 4); -+ Add_d(rd, rd, scratch); -+ li(scratch2, B2); -+ And(rd, rd, scratch2); -+ li(scratch, value); -+ Mul_d(rd, rd, scratch); -+ srli_d(rd, rd, shift); -+} -+ -+void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, -+ int size, bool sign_extend) { -+ sra_d(dest, source, pos); -+ bstrpick_d(dest, dest, size - 1, 0); -+ if (sign_extend) { -+ switch (size) { -+ case 8: -+ ext_w_b(dest, dest); -+ break; -+ case 16: -+ ext_w_h(dest, dest); -+ break; -+ case 32: -+ // sign-extend word -+ slli_w(dest, dest, 0); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+} -+ -+void TurboAssembler::InsertBits(Register dest, Register source, Register pos, -+ int size) { -+ Rotr_d(dest, dest, pos); -+ bstrins_d(dest, source, size - 1, 0); -+ { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ Sub_d(scratch, zero_reg, pos); -+ Rotr_d(dest, dest, scratch); -+ } -+} -+ -+void MacroAssembler::EmitFPUTruncate( -+ FPURoundingMode rounding_mode, Register result, DoubleRegister double_input, -+ Register scratch, DoubleRegister double_scratch, Register except_flag, -+ CheckForInexactConversion check_inexact) { -+ break_(3); -+} -+ -+void TurboAssembler::TryInlineTruncateDoubleToI(Register result, -+ DoubleRegister double_input, -+ Label* done) { -+ DoubleRegister single_scratch = kScratchDoubleReg.low(); -+ UseScratchRegisterScope temps(this); -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = temps.Acquire(); -+ Register scratch2 = t7; -+ -+ // Clear cumulative exception flags and save the FCSR. -+ /* movfcsr2gr(scratch2, FCSR); -+ movgr2fcsr(FCSR, zero_reg); -+ // Try a conversion to a signed integer. -+ ftintrz_w_d(single_scratch, double_input); -+ movfr2gr_w(result, single_scratch); -+ // Retrieve and restore the FCSR. -+ movfcsr2gr(scratch, FCSR); -+ movgr2fcsr(FCSR, scratch2); -+ // Check for overflow and NaNs. -+ And(scratch, scratch, -+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | -+ kFCSRInvalidOpFlagMask); -+ // If we had no exceptions we are done. -+ Branch(done, eq, scratch, Operand(zero_reg));*/ -+ -+ CompareIsNanF64(double_input, double_input); -+ Move(result, zero_reg); -+ bcnez(FCC0, done); -+ ftintrz_l_d(single_scratch, double_input); -+ movfr2gr_d(scratch2, single_scratch); -+ li(scratch, 1L << 63); -+ Xor(scratch, scratch, scratch2); -+ rotri_d(scratch2, scratch, 1); -+ movfr2gr_s(result, single_scratch); -+ Branch(done, ne, scratch, Operand(scratch2)); -+} -+ -+void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, -+ Register result, -+ DoubleRegister double_input, -+ StubCallMode stub_mode) { -+ Label done; -+ -+ TryInlineTruncateDoubleToI(result, double_input, &done); -+ -+ // If we fell through then inline version didn't succeed - call stub instead. -+ Sub_d(sp, sp, -+ Operand(kDoubleSize + kSystemPointerSize)); // Put input on stack. -+ St_d(ra, MemOperand(sp, kSystemPointerSize)); -+ Fst_d(double_input, MemOperand(sp, 0)); -+ -+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { -+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); -+ } else { -+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); -+ } -+ -+ Pop(ra, result); -+ bind(&done); -+} -+ -+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. -+#define BRANCH_ARGS_CHECK(cond, rj, rk) \ -+ DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \ -+ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg))) -+ -+void TurboAssembler::Branch(Label* L, bool need_link) { -+ int offset = GetOffset(L, OffsetSize::kOffset26); -+ if (need_link) { -+ bl(offset); -+ } else { -+ b(offset); -+ } -+} -+ -+void TurboAssembler::Branch(Label* L, Condition cond, Register rj, -+ const Operand& rk, bool need_link) { -+ if (L->is_bound()) { -+ BRANCH_ARGS_CHECK(cond, rj, rk); -+ if (!BranchShortOrFallback(L, cond, rj, rk, need_link)) { -+ if (cond != cc_always) { -+ Label skip; -+ Condition neg_cond = NegateCondition(cond); -+ BranchShort(&skip, neg_cond, rj, rk, need_link); -+ Branch(L, need_link); -+ bind(&skip); -+ } else { -+ Branch(L); -+ } -+ } -+ } else { -+ if (is_trampoline_emitted()) { -+ if (cond != cc_always) { -+ Label skip; -+ Condition neg_cond = NegateCondition(cond); -+ BranchShort(&skip, neg_cond, rj, rk, need_link); -+ Branch(L, need_link); -+ bind(&skip); -+ } else { -+ Branch(L); -+ } -+ } else { -+ BranchShort(L, cond, rj, rk, need_link); -+ } -+ } -+} -+ -+void TurboAssembler::Branch(Label* L, Condition cond, Register rj, -+ RootIndex index) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ LoadRoot(scratch, index); -+ Branch(L, cond, rj, Operand(scratch)); -+} -+ -+int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) { -+ return branch_offset_helper(L, bits) >> 2; -+} -+ -+Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk, -+ Register scratch) { -+ Register r2 = no_reg; -+ if (rk.is_reg()) { -+ r2 = rk.rm(); -+ } else { -+ r2 = scratch; -+ li(r2, rk); -+ } -+ -+ return r2; -+} -+ -+bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond, -+ Register rj, const Operand& rk, -+ bool need_link) { -+ UseScratchRegisterScope temps(this); -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; -+ -+ // Be careful to always use shifted_branch_offset only just before the -+ // branch instruction, as the location will be remember for patching the -+ // target. -+ { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ int offset = 0; -+ switch (cond) { -+ case cc_always: -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ if (need_link) { -+ bl(offset); -+ } else { -+ b(offset); -+ } -+ break; -+ case eq: -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ // beq is used here to make the code patchable. Otherwise b should -+ // be used which has no condition field so is not patchable. -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ beq(rj, rj, offset); -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset21); -+ beqz(rj, offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ // We don't want any other register but scratch clobbered. -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ beq(rj, sc, offset); -+ } -+ break; -+ case ne: -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ // bne is used here to make the code patchable. Otherwise we -+ // should not generate any instruction. -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bne(rj, rj, offset); -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset21); -+ bnez(rj, offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ // We don't want any other register but scratch clobbered. -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bne(rj, sc, offset); -+ } -+ break; -+ -+ // Signed comparison. -+ case greater: -+ // rj > rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ // No code needs to be emitted. -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ blt(zero_reg, rj, offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ blt(sc, rj, offset); -+ } -+ break; -+ case greater_equal: -+ // rj >= rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bge(rj, zero_reg, offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bge(rj, sc, offset); -+ } -+ break; -+ case less: -+ // rj < rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ // No code needs to be emitted. -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ blt(rj, zero_reg, offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ blt(rj, sc, offset); -+ } -+ break; -+ case less_equal: -+ // rj <= rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bge(zero_reg, rj, offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bge(sc, rj, offset); -+ } -+ break; -+ -+ // Unsigned comparison. -+ case Ugreater: -+ // rj > rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ // No code needs to be emitted. -+ } else if (rj == zero_reg) { -+ // No code needs to be emitted. -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bltu(sc, rj, offset); -+ } -+ break; -+ case Ugreater_equal: -+ // rj >= rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else if (IsZero(rk)) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else if (rj == zero_reg) { -+ // No code needs to be emitted. -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bgeu(rj, sc, offset); -+ } -+ break; -+ case Uless: -+ // rj < rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ // No code needs to be emitted. -+ } else if (IsZero(rk)) { -+ // No code needs to be emitted. -+ } else if (rj == zero_reg) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bltu(rj, sc, offset); -+ } -+ break; -+ case Uless_equal: -+ // rj <= rk -+ if (rk.is_reg() && rj.code() == rk.rm().code()) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else if (rj == zero_reg) { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; -+ if (need_link) pcaddi(ra, 2); -+ offset = GetOffset(L, OffsetSize::kOffset26); -+ b(offset); -+ } else if (IsZero(rk)) { -+ // No code needs to be emitted. -+ } else { -+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; -+ if (need_link) pcaddi(ra, 2); -+ Register sc = GetRkAsRegisterHelper(rk, scratch); -+ DCHECK(rj != sc); -+ offset = GetOffset(L, OffsetSize::kOffset16); -+ bgeu(sc, rj, offset); -+ } -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+ return true; -+} -+ -+void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj, -+ const Operand& rk, bool need_link) { -+ BRANCH_ARGS_CHECK(cond, rj, rk); -+ bool result = BranchShortOrFallback(L, cond, rj, rk, need_link); -+ DCHECK(result); -+ USE(result); -+} -+ -+void TurboAssembler::LoadFromConstantsTable(Register destination, -+ int constant_index) { -+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); -+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); -+ Ld_d(destination, -+ FieldMemOperand(destination, FixedArray::kHeaderSize + -+ constant_index * kPointerSize)); -+} -+ -+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { -+ Ld_d(destination, MemOperand(kRootRegister, offset)); -+} -+ -+void TurboAssembler::LoadRootRegisterOffset(Register destination, -+ intptr_t offset) { -+ if (offset == 0) { -+ Move(destination, kRootRegister); -+ } else { -+ Add_d(destination, kRootRegister, Operand(offset)); -+ } -+} -+ -+void TurboAssembler::Jump(Register target, Condition cond, Register rj, -+ const Operand& rk) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ if (cond == cc_always) { -+ jirl(zero_reg, target, 0); -+ } else { -+ BRANCH_ARGS_CHECK(cond, rj, rk); -+ Label skip; -+ Branch(&skip, NegateCondition(cond), rj, rk); -+ jirl(zero_reg, target, 0); -+ bind(&skip); -+ } -+} -+ -+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, -+ Condition cond, Register rj, const Operand& rk) { -+ Label skip; -+ if (cond != cc_always) { -+ Branch(&skip, NegateCondition(cond), rj, rk); -+ } -+ { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ li(t7, Operand(target, rmode)); -+ jirl(zero_reg, t7, 0); -+ bind(&skip); -+ } -+} -+ -+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, -+ Register rj, const Operand& rk) { -+ DCHECK(!RelocInfo::IsCodeTarget(rmode)); -+ Jump(static_cast(target), rmode, cond, rj, rk); -+} -+ -+void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, -+ Condition cond, Register rj, const Operand& rk) { -+ DCHECK(RelocInfo::IsCodeTarget(rmode)); -+ -+ int builtin_index = Builtins::kNoBuiltinId; -+ bool target_is_isolate_independent_builtin = -+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && -+ Builtins::IsIsolateIndependent(builtin_index); -+ -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ if (root_array_available_ && options().isolate_independent_code) { -+ int offset = code->builtin_index() * kSystemPointerSize + -+ IsolateData::builtin_entry_table_offset(); -+ Ld_d(t7, MemOperand(kRootRegister, offset)); -+ Jump(t7, cond, rj, rk); -+ return; -+ } else if (options().inline_offheap_trampolines && -+ target_is_isolate_independent_builtin) { -+ // Inline the trampoline. -+ RecordCommentForOffHeapTrampoline(builtin_index); -+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); -+ EmbeddedData d = EmbeddedData::FromBlob(); -+ Address entry = d.InstructionStartOfBuiltin(builtin_index); -+ li(t7, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); -+ Jump(t7, cond, rj, rk); -+ return; -+ } -+ -+ Jump(static_cast(code.address()), rmode, cond, rj, rk); -+} -+ -+void TurboAssembler::Jump(const ExternalReference& reference) { -+ li(t7, reference); -+ Jump(t7); -+} -+ -+// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8]. -+void TurboAssembler::Call(Register target, Condition cond, Register rj, -+ const Operand& rk) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ if (cond == cc_always) { -+ jirl(ra, target, 0); -+ } else { -+ BRANCH_ARGS_CHECK(cond, rj, rk); -+ Label skip; -+ Branch(&skip, NegateCondition(cond), rj, rk); -+ jirl(ra, target, 0); -+ bind(&skip); -+ } -+} -+ -+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, -+ unsigned higher_limit, -+ Label* on_in_range) { -+ if (lower_limit != 0) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ Sub_d(scratch, value, Operand(lower_limit)); -+ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit)); -+ } else { -+ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit)); -+ } -+} -+ -+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, -+ Register rj, const Operand& rk) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ li(t7, Operand(static_cast(target), rmode), ADDRESS_LOAD); -+ Call(t7, cond, rj, rk); -+} -+ -+void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, -+ Condition cond, Register rj, const Operand& rk) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ int builtin_index = Builtins::kNoBuiltinId; -+ bool target_is_isolate_independent_builtin = -+ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && -+ Builtins::IsIsolateIndependent(builtin_index); -+ -+ if (root_array_available_ && options().isolate_independent_code) { -+ int offset = code->builtin_index() * kSystemPointerSize + -+ IsolateData::builtin_entry_table_offset(); -+ LoadRootRelative(t7, offset); -+ Call(t7, cond, rj, rk); -+ return; -+ } else if (options().inline_offheap_trampolines && -+ target_is_isolate_independent_builtin) { -+ // Inline the trampoline. -+ RecordCommentForOffHeapTrampoline(builtin_index); -+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); -+ EmbeddedData d = EmbeddedData::FromBlob(); -+ Address entry = d.InstructionStartOfBuiltin(builtin_index); -+ li(t7, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); -+ Call(t7, cond, rj, rk); -+ return; -+ } -+ -+ DCHECK(RelocInfo::IsCodeTarget(rmode)); -+ DCHECK(code->IsExecutable()); -+ Call(code.address(), rmode, cond, rj, rk); -+} -+ -+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { -+ STATIC_ASSERT(kSystemPointerSize == 8); -+ STATIC_ASSERT(kSmiTagSize == 1); -+ STATIC_ASSERT(kSmiTag == 0); -+ -+ // The builtin_index register contains the builtin index as a Smi. -+ SmiUntag(builtin_index, builtin_index); -+ Alsl_d(builtin_index, builtin_index, kRootRegister, kSystemPointerSizeLog2, -+ t7); -+ Ld_d(builtin_index, -+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); -+} -+ -+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { -+ LoadEntryFromBuiltinIndex(builtin_index); -+ Call(builtin_index); -+} -+ -+void TurboAssembler::PatchAndJump(Address target) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ pcaddi(scratch, 4); -+ Ld_d(t7, MemOperand(scratch, 0)); -+ jirl(zero_reg, t7, 0); -+ nop(); -+ DCHECK_EQ(reinterpret_cast(pc_) % 8, 0); -+ *reinterpret_cast(pc_) = target; // pc_ should be align. -+ pc_ += sizeof(uint64_t); -+} -+ -+void TurboAssembler::StoreReturnAddressAndCall(Register target) { -+ // This generates the final instruction sequence for calls to C functions -+ // once an exit frame has been constructed. -+ // -+ // Note that this assumes the caller code (i.e. the Code object currently -+ // being generated) is immovable or that the callee function cannot trigger -+ // GC, since the callee function will return to it. -+ -+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); -+ static constexpr int kNumInstructionsToJump = 2; -+ Label find_ra; -+ // Adjust the value in ra to point to the correct return location, 2nd -+ // instruction past the real call into C code (the jirl)), and push it. -+ // This is the return address of the exit frame. -+ pcaddi(ra, kNumInstructionsToJump + 1); -+ bind(&find_ra); -+ -+ // This spot was reserved in EnterExitFrame. -+ St_d(ra, MemOperand(sp, 0)); -+ // Stack is still aligned. -+ -+ // TODO can be jirl target? a0 -- a7? -+ jirl(zero_reg, target, 0); -+ // Make sure the stored 'ra' points to this position. -+ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); -+} -+ -+void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) { -+ Jump(ra, cond, rj, rk); -+} -+ -+void TurboAssembler::DropAndRet(int drop) { -+ DCHECK(is_int16(drop * kPointerSize)); -+ addi_d(sp, sp, drop * kPointerSize); -+ Ret(); -+} -+ -+void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, -+ const Operand& r2) { -+ // Both Drop and Ret need to be conditional. -+ Label skip; -+ if (cond != cc_always) { -+ Branch(&skip, NegateCondition(cond), r1, r2); -+ } -+ -+ Drop(drop); -+ Ret(); -+ -+ if (cond != cc_always) { -+ bind(&skip); -+ } -+} -+ -+void TurboAssembler::Drop(int count, Condition cond, Register reg, -+ const Operand& op) { -+ if (count <= 0) { -+ return; -+ } -+ -+ Label skip; -+ -+ if (cond != al) { -+ Branch(&skip, NegateCondition(cond), reg, op); -+ } -+ -+ Add_d(sp, sp, Operand(count * kPointerSize)); -+ -+ if (cond != al) { -+ bind(&skip); -+ } -+} -+ -+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { -+ if (scratch == no_reg) { -+ Xor(reg1, reg1, Operand(reg2)); -+ Xor(reg2, reg2, Operand(reg1)); -+ Xor(reg1, reg1, Operand(reg2)); -+ } else { -+ mov(scratch, reg1); -+ mov(reg1, reg2); -+ mov(reg2, scratch); -+ } -+} -+ -+void TurboAssembler::Call(Label* target) { Branch(target, true); } -+ -+void TurboAssembler::Push(Smi smi) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, Operand(smi)); -+ push(scratch); -+} -+ -+void TurboAssembler::Push(Handle handle) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, Operand(handle)); -+ push(scratch); -+} -+ -+void MacroAssembler::MaybeDropFrames() { -+ // Check whether we need to drop frames to restart a function on the stack. -+ li(a1, ExternalReference::debug_restart_fp_address(isolate())); -+ Ld_d(a1, MemOperand(a1, 0)); -+ Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, -+ ne, a1, Operand(zero_reg)); -+} -+ -+// --------------------------------------------------------------------------- -+// Exception handling. -+ -+void MacroAssembler::PushStackHandler() { -+ // Adjust this code if not the case. -+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); -+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); -+ -+ Push(Smi::zero()); // Padding. -+ -+ // Link the current handler as the next handler. -+ li(t2, -+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); -+ Ld_d(t1, MemOperand(t2, 0)); -+ push(t1); -+ -+ // Set this new handler as the current one. -+ St_d(sp, MemOperand(t2, 0)); -+} -+ -+void MacroAssembler::PopStackHandler() { -+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); -+ pop(a1); -+ Add_d(sp, sp, -+ Operand( -+ static_cast(StackHandlerConstants::kSize - kPointerSize))); -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, -+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); -+ St_d(a1, MemOperand(scratch, 0)); -+} -+ -+void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, -+ const DoubleRegister src) { -+ fsub_d(dst, src, kDoubleRegZero); -+} -+ -+void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { -+ Move(dst, f0); // Reg f0 is loongarch return value -+} -+ -+void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { -+ Move(dst, f0); // Reg f0 is loongarch first argument value. -+} -+ -+void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(f0, src); } -+ -+void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(f0, src); } -+ -+void TurboAssembler::MovToFloatParameters(DoubleRegister src1, -+ DoubleRegister src2) { -+ const DoubleRegister fparg2 = f1; -+ if (src2 == f0) { -+ DCHECK(src1 != fparg2); -+ Move(fparg2, src2); -+ Move(f0, src1); -+ } else { -+ Move(f0, src1); -+ Move(fparg2, src2); -+ } -+} -+ -+// ----------------------------------------------------------------------------- -+// JavaScript invokes. -+ -+void TurboAssembler::PrepareForTailCall(Register callee_args_count, -+ Register caller_args_count, -+ Register scratch0, Register scratch1) { -+ // Calculate the end of destination area where we will put the arguments -+ // after we drop current frame. We add kPointerSize to count the receiver -+ // argument which is not included into formal parameters count. -+ Register dst_reg = scratch0; -+ Alsl_d(dst_reg, caller_args_count, fp, kPointerSizeLog2, t7); -+ Add_d(dst_reg, dst_reg, -+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); -+ -+ Register src_reg = caller_args_count; -+ // Calculate the end of source area. +kPointerSize is for the receiver. -+ Alsl_d(src_reg, callee_args_count, sp, kPointerSizeLog2, t7); -+ Add_d(src_reg, src_reg, Operand(kPointerSize)); -+ -+ if (FLAG_debug_code) { -+ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg, -+ Operand(dst_reg)); -+ } -+ -+ // Restore caller's frame pointer and return address now as they will be -+ // overwritten by the copying loop. -+ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); -+ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); -+ -+ // Now copy callee arguments to the caller frame going backwards to avoid -+ // callee arguments corruption (source and destination areas could overlap). -+ -+ // Both src_reg and dst_reg are pointing to the word after the one to copy, -+ // so they must be pre-decremented in the loop. -+ Register tmp_reg = scratch1; -+ Label loop, entry; -+ Branch(&entry); -+ bind(&loop); -+ Sub_d(src_reg, src_reg, Operand(kPointerSize)); -+ Sub_d(dst_reg, dst_reg, Operand(kPointerSize)); -+ Ld_d(tmp_reg, MemOperand(src_reg, 0)); -+ St_d(tmp_reg, MemOperand(dst_reg, 0)); -+ bind(&entry); -+ Branch(&loop, ne, sp, Operand(src_reg)); -+ -+ // Leave current frame. -+ mov(sp, dst_reg); -+} -+ -+void MacroAssembler::InvokePrologue(Register expected_parameter_count, -+ Register actual_parameter_count, -+ Label* done, InvokeFlag flag) { -+ Label regular_invoke; -+ -+ // Check whether the expected and actual arguments count match. The registers -+ // are set up according to contract with ArgumentsAdaptorTrampoline: -+ // a0: actual arguments count -+ // a1: function (passed through to callee) -+ // a2: expected arguments count -+ -+ // The code below is made a lot easier because the calling code already sets -+ // up actual and expected registers according to the contract. -+ -+ DCHECK_EQ(actual_parameter_count, a0); -+ DCHECK_EQ(expected_parameter_count, a2); -+ -+ Branch(®ular_invoke, eq, expected_parameter_count, -+ Operand(actual_parameter_count)); -+ -+ Handle adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline); -+ if (flag == CALL_FUNCTION) { -+ Call(adaptor); -+ Branch(done); -+ } else { -+ Jump(adaptor, RelocInfo::CODE_TARGET); -+ } -+ -+ bind(®ular_invoke); -+} -+ -+void MacroAssembler::CheckDebugHook(Register fun, Register new_target, -+ Register expected_parameter_count, -+ Register actual_parameter_count) { -+ Label skip_hook; -+ -+ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate())); -+ Ld_b(t0, MemOperand(t0, 0)); -+ Branch(&skip_hook, eq, t0, Operand(zero_reg)); -+ -+ { -+ // Load receiver to pass it later to DebugOnFunctionCall hook. -+ Alsl_d(t0, actual_parameter_count, sp, kPointerSizeLog2, t7); -+ Ld_d(t0, MemOperand(t0, 0)); -+ FrameScope frame(this, -+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); -+ SmiTag(expected_parameter_count); -+ Push(expected_parameter_count); -+ -+ SmiTag(actual_parameter_count); -+ Push(actual_parameter_count); -+ -+ if (new_target.is_valid()) { -+ Push(new_target); -+ } -+ // TODO: MultiPush/Pop -+ Push(fun); -+ Push(fun); -+ Push(t0); -+ CallRuntime(Runtime::kDebugOnFunctionCall); -+ Pop(fun); -+ if (new_target.is_valid()) { -+ Pop(new_target); -+ } -+ -+ Pop(actual_parameter_count); -+ SmiUntag(actual_parameter_count); -+ -+ Pop(expected_parameter_count); -+ SmiUntag(expected_parameter_count); -+ } -+ bind(&skip_hook); -+} -+ -+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, -+ Register expected_parameter_count, -+ Register actual_parameter_count, -+ InvokeFlag flag) { -+ // You can't call a function without a valid frame. -+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); -+ DCHECK_EQ(function, a1); -+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3); -+ -+ // On function call, call into the debugger if necessary. -+ CheckDebugHook(function, new_target, expected_parameter_count, -+ actual_parameter_count); -+ -+ // Clear the new.target register if not given. -+ if (!new_target.is_valid()) { -+ LoadRoot(a3, RootIndex::kUndefinedValue); -+ } -+ -+ Label done; -+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag); -+ // We call indirectly through the code field in the function to -+ // allow recompilation to take effect without changing any of the -+ // call sites. -+ Register code = kJavaScriptCallCodeStartRegister; -+ Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset)); -+ if (flag == CALL_FUNCTION) { -+ CallCodeObject(code); -+ } else { -+ DCHECK(flag == JUMP_FUNCTION); -+ JumpCodeObject(code); -+ } -+ -+ // Continue here if InvokePrologue does handle the invocation due to -+ // mismatched parameter counts. -+ bind(&done); -+} -+ -+void MacroAssembler::InvokeFunctionWithNewTarget( -+ Register function, Register new_target, Register actual_parameter_count, -+ InvokeFlag flag) { -+ // You can't call a function without a valid frame. -+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); -+ -+ // Contract with called JS functions requires that function is passed in a1. -+ DCHECK_EQ(function, a1); -+ Register expected_parameter_count = a2; -+ Register temp_reg = t0; -+ Ld_d(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); -+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); -+ // The argument count is stored as uint16_t -+ Ld_hu(expected_parameter_count, -+ FieldMemOperand(temp_reg, -+ SharedFunctionInfo::kFormalParameterCountOffset)); -+ -+ InvokeFunctionCode(a1, new_target, expected_parameter_count, -+ actual_parameter_count, flag); -+} -+ -+void MacroAssembler::InvokeFunction(Register function, -+ Register expected_parameter_count, -+ Register actual_parameter_count, -+ InvokeFlag flag) { -+ // You can't call a function without a valid frame. -+ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); -+ -+ // Contract with called JS functions requires that function is passed in a1. -+ DCHECK_EQ(function, a1); -+ -+ // Get the function and setup the context. -+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); -+ -+ InvokeFunctionCode(a1, no_reg, expected_parameter_count, -+ actual_parameter_count, flag); -+} -+ -+// --------------------------------------------------------------------------- -+// Support functions. -+ -+void MacroAssembler::GetObjectType(Register object, Register map, -+ Register type_reg) { -+ LoadMap(map, object); -+ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); -+} -+ -+// ----------------------------------------------------------------------------- -+// Runtime calls. -+ -+void TurboAssembler::AdddOverflow(Register dst, Register left, -+ const Operand& right, Register overflow) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register right_reg = no_reg; -+ Register scratch = t8; -+ if (!right.is_reg()) { -+ li(t7, Operand(right)); -+ right_reg = t7; -+ } else { -+ right_reg = right.rm(); -+ } -+ -+ DCHECK(left != scratch && right_reg != scratch && dst != scratch && -+ overflow != scratch); -+ DCHECK(overflow != left && overflow != right_reg); -+ -+ if (dst == left || dst == right_reg) { -+ add_d(scratch, left, right_reg); -+ xor_(overflow, scratch, left); -+ xor_(t7, scratch, right_reg); -+ and_(overflow, overflow, t7); -+ mov(dst, scratch); -+ } else { -+ add_d(dst, left, right_reg); -+ xor_(overflow, dst, left); -+ xor_(t7, dst, right_reg); -+ and_(overflow, overflow, t7); -+ } -+} -+ -+void TurboAssembler::SubdOverflow(Register dst, Register left, -+ const Operand& right, Register overflow) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register right_reg = no_reg; -+ Register scratch = t8; -+ if (!right.is_reg()) { -+ li(t7, Operand(right)); -+ right_reg = t7; -+ } else { -+ right_reg = right.rm(); -+ } -+ -+ DCHECK(left != scratch && right_reg != scratch && dst != scratch && -+ overflow != scratch); -+ DCHECK(overflow != left && overflow != right_reg); -+ -+ if (dst == left || dst == right_reg) { -+ Sub_d(scratch, left, right_reg); -+ xor_(overflow, left, scratch); -+ xor_(t7, left, right_reg); -+ and_(overflow, overflow, t7); -+ mov(dst, scratch); -+ } else { -+ sub_d(dst, left, right_reg); -+ xor_(overflow, left, dst); -+ xor_(t7, left, right_reg); -+ and_(overflow, overflow, t7); -+ } -+} -+ -+void TurboAssembler::MulOverflow(Register dst, Register left, -+ const Operand& right, Register overflow) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ Register right_reg = no_reg; -+ Register scratch = t8; -+ if (!right.is_reg()) { -+ li(t7, Operand(right)); -+ right_reg = t7; -+ } else { -+ right_reg = right.rm(); -+ } -+ -+ DCHECK(left != scratch && right_reg != scratch && dst != scratch && -+ overflow != scratch); -+ DCHECK(overflow != left && overflow != right_reg); -+ -+ if (dst == left || dst == right_reg) { -+ Mul_w(scratch, left, right_reg); -+ Mulh_w(overflow, left, right_reg); -+ mov(dst, scratch); -+ } else { -+ Mul_w(dst, left, right_reg); -+ Mulh_w(overflow, left, right_reg); -+ } -+ -+ srai_d(scratch, dst, 32); -+ xor_(overflow, overflow, scratch); -+} -+ -+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, -+ SaveFPRegsMode save_doubles) { -+ // All parameters are on the stack. v0 has the return value after call. -+ -+ // If the expected number of arguments of the runtime function is -+ // constant, we check that the actual number of arguments match the -+ // expectation. -+ CHECK(f->nargs < 0 || f->nargs == num_arguments); -+ -+ // TODO(1236192): Most runtime routines don't need the number of -+ // arguments passed in because it is constant. At some point we -+ // should remove this need and make the runtime routine entry code -+ // smarter. -+ PrepareCEntryArgs(num_arguments); -+ PrepareCEntryFunction(ExternalReference::Create(f)); -+ Handle code = -+ CodeFactory::CEntry(isolate(), f->result_size, save_doubles); -+ Call(code, RelocInfo::CODE_TARGET); -+} -+ -+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { -+ const Runtime::Function* function = Runtime::FunctionForId(fid); -+ DCHECK_EQ(1, function->result_size); -+ if (function->nargs >= 0) { -+ PrepareCEntryArgs(function->nargs); -+ } -+ JumpToExternalReference(ExternalReference::Create(fid)); -+} -+ -+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, -+ bool builtin_exit_frame) { -+ PrepareCEntryFunction(builtin); -+ Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, -+ kArgvOnStack, builtin_exit_frame); -+ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg)); -+} -+ -+void MacroAssembler::JumpToInstructionStream(Address entry) { -+ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); -+ Jump(kOffHeapTrampolineRegister); -+} -+ -+void MacroAssembler::LoadWeakValue(Register out, Register in, -+ Label* target_if_cleared) { -+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); -+ -+ And(out, in, Operand(~kWeakHeapObjectMask)); -+} -+ -+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, -+ Register scratch1, Register scratch2) { -+ DCHECK_GT(value, 0); -+ if (FLAG_native_code_counters && counter->Enabled()) { -+ // This operation has to be exactly 32-bit wide in case the external -+ // reference table redirects the counter to a uint32_t dummy_stats_counter_ -+ // field. -+ li(scratch2, ExternalReference::Create(counter)); -+ Ld_w(scratch1, MemOperand(scratch2, 0)); -+ Add_w(scratch1, scratch1, Operand(value)); -+ St_w(scratch1, MemOperand(scratch2, 0)); -+ } -+} -+ -+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, -+ Register scratch1, Register scratch2) { -+ DCHECK_GT(value, 0); -+ if (FLAG_native_code_counters && counter->Enabled()) { -+ // This operation has to be exactly 32-bit wide in case the external -+ // reference table redirects the counter to a uint32_t dummy_stats_counter_ -+ // field. -+ li(scratch2, ExternalReference::Create(counter)); -+ Ld_w(scratch1, MemOperand(scratch2, 0)); -+ Sub_w(scratch1, scratch1, Operand(value)); -+ St_w(scratch1, MemOperand(scratch2, 0)); -+ } -+} -+ -+// ----------------------------------------------------------------------------- -+// Debugging. -+ -+void TurboAssembler::Trap() { stop(); } -+void TurboAssembler::DebugBreak() { stop(); } -+ -+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, -+ Operand rk) { -+ if (emit_debug_code()) Check(cc, reason, rs, rk); -+} -+ -+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj, -+ Operand rk) { -+ Label L; -+ Branch(&L, cc, rj, rk); -+ Abort(reason); -+ // Will not return here. -+ bind(&L); -+} -+ -+void TurboAssembler::Abort(AbortReason reason) { -+ Label abort_start; -+ bind(&abort_start); -+#ifdef DEBUG -+ const char* msg = GetAbortReason(reason); -+ RecordComment("Abort message: "); -+ RecordComment(msg); -+#endif -+ -+ // Avoid emitting call to builtin if requested. -+ if (trap_on_abort()) { -+ stop(); -+ return; -+ } -+ -+ if (should_abort_hard()) { -+ // We don't care if we constructed a frame. Just pretend we did. -+ FrameScope assume_frame(this, StackFrame::NONE); -+ PrepareCallCFunction(0, a0); -+ li(a0, Operand(static_cast(reason))); -+ CallCFunction(ExternalReference::abort_with_reason(), 1); -+ return; -+ } -+ -+ Move(a0, Smi::FromInt(static_cast(reason))); -+ -+ // Disable stub call restrictions to always allow calls to abort. -+ if (!has_frame()) { -+ // We don't actually want to generate a pile of code for this, so just -+ // claim there is a stack frame, without generating one. -+ FrameScope scope(this, StackFrame::NONE); -+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); -+ } else { -+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); -+ } -+ // Will not return here. -+ if (is_trampoline_pool_blocked()) { -+ // If the calling code cares about the exact number of -+ // instructions generated, we insert padding here to keep the size -+ // of the Abort macro constant. -+ // Currently in debug mode with debug_code enabled the number of -+ // generated instructions is 10, so we use this as a maximum value. -+ static const int kExpectedAbortInstructions = 10; -+ int abort_instructions = InstructionsGeneratedSince(&abort_start); -+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions); -+ while (abort_instructions++ < kExpectedAbortInstructions) { -+ nop(); -+ } -+ } -+} -+ -+void MacroAssembler::LoadMap(Register destination, Register object) { -+ Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset)); -+} -+ -+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { -+ LoadMap(dst, cp); -+ Ld_d(dst, FieldMemOperand( -+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); -+ Ld_d(dst, MemOperand(dst, Context::SlotOffset(index))); -+} -+ -+void TurboAssembler::StubPrologue(StackFrame::Type type) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, Operand(StackFrame::TypeToMarker(type))); -+ PushCommonFrame(scratch); -+} -+ -+void TurboAssembler::Prologue() { PushStandardFrame(a1); } -+ -+void TurboAssembler::EnterFrame(StackFrame::Type type) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ int stack_offset = -3 * kPointerSize; -+ const int fp_offset = 1 * kPointerSize; -+ addi_d(sp, sp, stack_offset); -+ stack_offset = -stack_offset - kPointerSize; -+ St_d(ra, MemOperand(sp, stack_offset)); -+ stack_offset -= kPointerSize; -+ St_d(fp, MemOperand(sp, stack_offset)); -+ stack_offset -= kPointerSize; -+ li(t7, Operand(StackFrame::TypeToMarker(type))); -+ St_d(t7, MemOperand(sp, stack_offset)); -+ // Adjust FP to point to saved FP. -+ DCHECK_EQ(stack_offset, 0); -+ Add_d(fp, sp, Operand(fp_offset)); -+} -+ -+void TurboAssembler::LeaveFrame(StackFrame::Type type) { -+ addi_d(sp, fp, 2 * kPointerSize); -+ Ld_d(ra, MemOperand(fp, 1 * kPointerSize)); -+ Ld_d(fp, MemOperand(fp, 0 * kPointerSize)); -+} -+ -+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, -+ StackFrame::Type frame_type) { -+ DCHECK(frame_type == StackFrame::EXIT || -+ frame_type == StackFrame::BUILTIN_EXIT); -+ -+ // Set up the frame structure on the stack. -+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); -+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); -+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); -+ -+ // This is how the stack will look: -+ // fp + 2 (==kCallerSPDisplacement) - old stack's end -+ // [fp + 1 (==kCallerPCOffset)] - saved old ra -+ // [fp + 0 (==kCallerFPOffset)] - saved old fp -+ // [fp - 1 StackFrame::EXIT Smi -+ // [fp - 2 (==kSPOffset)] - sp of the called function -+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the -+ // new stack (will contain saved ra) -+ -+ // Save registers and reserve room for saved entry sp. -+ addi_d(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); -+ St_d(ra, MemOperand(sp, 3 * kPointerSize)); -+ St_d(fp, MemOperand(sp, 2 * kPointerSize)); -+ { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ li(scratch, Operand(StackFrame::TypeToMarker(frame_type))); -+ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); -+ } -+ // Set up new frame pointer. -+ addi_d(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); -+ -+ if (emit_debug_code()) { -+ St_d(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); -+ } -+ -+ { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Save the frame pointer and the context in top. -+ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, -+ isolate())); -+ St_d(fp, MemOperand(t8, 0)); -+ li(t8, -+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); -+ St_d(cp, MemOperand(t8, 0)); -+ } -+ -+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); -+ if (save_doubles) { -+ // The stack is already aligned to 0 modulo 8 for stores with sdc1. -+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; -+ int space = kNumOfSavedRegisters * kDoubleSize; -+ Sub_d(sp, sp, Operand(space)); -+ // Remember: we only need to save every 2nd double FPU value. -+ for (int i = 0; i < kNumOfSavedRegisters; i++) { -+ FPURegister reg = FPURegister::from_code(2 * i); -+ Fst_d(reg, MemOperand(sp, i * kDoubleSize)); -+ } -+ } -+ -+ // Reserve place for the return address, stack space and an optional slot -+ // (used by DirectCEntry to hold the return value if a struct is -+ // returned) and align the frame preparing for calling the runtime function. -+ DCHECK_GE(stack_space, 0); -+ Sub_d(sp, sp, Operand((stack_space + 2) * kPointerSize)); -+ if (frame_alignment > 0) { -+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); -+ And(sp, sp, Operand(-frame_alignment)); // Align stack. -+ } -+ -+ // Set the exit frame sp value to point just before the return address -+ // location. -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ addi_d(scratch, sp, kPointerSize); -+ St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); -+} -+ -+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, -+ bool do_return, -+ bool argument_count_is_length) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ // Optionally restore all double registers. -+ if (save_doubles) { -+ // Remember: we only need to restore every 2nd double FPU value. -+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; -+ Sub_d(t8, fp, -+ Operand(ExitFrameConstants::kFixedFrameSizeFromFp + -+ kNumOfSavedRegisters * kDoubleSize)); -+ for (int i = 0; i < kNumOfSavedRegisters; i++) { -+ FPURegister reg = FPURegister::from_code(2 * i); -+ Fld_d(reg, MemOperand(t8, i * kDoubleSize)); -+ } -+ } -+ -+ // Clear top frame. -+ li(t8, -+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); -+ St_d(zero_reg, MemOperand(t8, 0)); -+ -+ // Restore current context from top and clear it in debug mode. -+ li(t8, -+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); -+ Ld_d(cp, MemOperand(t8, 0)); -+ -+#ifdef DEBUG -+ li(t8, -+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); -+ St_d(a3, MemOperand(t8, 0)); -+#endif -+ -+ // Pop the arguments, restore registers, and return. -+ mov(sp, fp); // Respect ABI stack constraint. -+ Ld_d(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); -+ Ld_d(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); -+ -+ if (argument_count.is_valid()) { -+ if (argument_count_is_length) { -+ add_d(sp, sp, argument_count); -+ } else { -+ Alsl_d(sp, argument_count, sp, kPointerSizeLog2, t8); -+ } -+ } -+ -+ addi_d(sp, sp, 2 * kPointerSize); -+ if (do_return) { -+ Ret(); -+ } -+} -+ -+int TurboAssembler::ActivationFrameAlignment() { -+#if V8_HOST_ARCH_LOONG64 -+ // Running on the real platform. Use the alignment as mandated by the local -+ // environment. -+ // Note: This will break if we ever start generating snapshots on one Mips -+ // platform for another Mips platform with a different alignment. -+ return base::OS::ActivationFrameAlignment(); -+#else // V8_HOST_ARCH_LOONG64 -+ // If we are using the simulator then we should always align to the expected -+ // alignment. As the simulator is used to generate snapshots we do not know -+ // if the target platform will need alignment, so this is controlled from a -+ // flag. -+ return FLAG_sim_stack_alignment; -+#endif // V8_HOST_ARCH_LOONG64 -+} -+ -+void MacroAssembler::AssertStackIsAligned() { -+ if (emit_debug_code()) { -+ const int frame_alignment = ActivationFrameAlignment(); -+ const int frame_alignment_mask = frame_alignment - 1; -+ -+ if (frame_alignment > kPointerSize) { -+ Label alignment_as_expected; -+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); -+ { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ andi(scratch, sp, frame_alignment_mask); -+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); -+ } -+ // Don't use Check here, as it will call Runtime_Abort re-entering here. -+ stop(); -+ bind(&alignment_as_expected); -+ } -+ } -+} -+ -+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { -+ if (SmiValuesAre32Bits()) { -+ Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset()))); -+ } else { -+ DCHECK(SmiValuesAre31Bits()); -+ Ld_w(dst, src); -+ SmiUntag(dst); -+ } -+} -+ -+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, -+ Register scratch) { -+ DCHECK_EQ(0, kSmiTag); -+ andi(scratch, value, kSmiTagMask); -+ Branch(smi_label, eq, scratch, Operand(zero_reg)); -+} -+ -+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, -+ Register scratch) { -+ DCHECK_EQ(0, kSmiTag); -+ andi(scratch, value, kSmiTagMask); -+ Branch(not_smi_label, ne, scratch, Operand(zero_reg)); -+} -+ -+void MacroAssembler::AssertNotSmi(Register object) { -+ if (emit_debug_code()) { -+ STATIC_ASSERT(kSmiTag == 0); -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ andi(scratch, object, kSmiTagMask); -+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); -+ } -+} -+ -+void MacroAssembler::AssertSmi(Register object) { -+ if (emit_debug_code()) { -+ STATIC_ASSERT(kSmiTag == 0); -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ andi(scratch, object, kSmiTagMask); -+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); -+ } -+} -+ -+void MacroAssembler::AssertConstructor(Register object) { -+ if (emit_debug_code()) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ STATIC_ASSERT(kSmiTag == 0); -+ SmiTst(object, t8); -+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8, -+ Operand(zero_reg)); -+ -+ LoadMap(t8, object); -+ Ld_bu(t8, FieldMemOperand(t8, Map::kBitFieldOffset)); -+ And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask)); -+ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg)); -+ } -+} -+ -+void MacroAssembler::AssertFunction(Register object) { -+ if (emit_debug_code()) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ STATIC_ASSERT(kSmiTag == 0); -+ SmiTst(object, t8); -+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8, -+ Operand(zero_reg)); -+ GetObjectType(object, t8, t8); -+ Check(eq, AbortReason::kOperandIsNotAFunction, t8, -+ Operand(JS_FUNCTION_TYPE)); -+ } -+} -+ -+void MacroAssembler::AssertBoundFunction(Register object) { -+ if (emit_debug_code()) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ STATIC_ASSERT(kSmiTag == 0); -+ SmiTst(object, t8); -+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8, -+ Operand(zero_reg)); -+ GetObjectType(object, t8, t8); -+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8, -+ Operand(JS_BOUND_FUNCTION_TYPE)); -+ } -+} -+ -+void MacroAssembler::AssertGeneratorObject(Register object) { -+ if (!emit_debug_code()) return; -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ STATIC_ASSERT(kSmiTag == 0); -+ SmiTst(object, t8); -+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8, -+ Operand(zero_reg)); -+ -+ GetObjectType(object, t8, t8); -+ -+ Label done; -+ -+ // Check if JSGeneratorObject -+ Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE)); -+ -+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) -+ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); -+ -+ // Check if JSAsyncGeneratorObject -+ Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); -+ -+ Abort(AbortReason::kOperandIsNotAGeneratorObject); -+ -+ bind(&done); -+} -+ -+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, -+ Register scratch) { -+ if (emit_debug_code()) { -+ Label done_checking; -+ AssertNotSmi(object); -+ LoadRoot(scratch, RootIndex::kUndefinedValue); -+ Branch(&done_checking, eq, object, Operand(scratch)); -+ GetObjectType(object, scratch, scratch); -+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch, -+ Operand(ALLOCATION_SITE_TYPE)); -+ bind(&done_checking); -+ } -+} -+ -+void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, -+ FPURegister src2, Label* out_of_line) { -+ if (src1 == src2) { -+ Move_s(dst, src1); -+ return; -+ } -+ -+ // Check if one of operands is NaN. -+ CompareIsNanF32(src1, src2); -+ BranchTrueF(out_of_line); -+ -+ fmax_s(dst, src1, src2); -+} -+ -+void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, -+ FPURegister src2) { -+ fadd_s(dst, src1, src2); -+} -+ -+void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, -+ FPURegister src2, Label* out_of_line) { -+ if (src1 == src2) { -+ Move_s(dst, src1); -+ return; -+ } -+ -+ // Check if one of operands is NaN. -+ CompareIsNanF32(src1, src2); -+ BranchTrueF(out_of_line); -+ -+ fmin_s(dst, src1, src2); -+} -+ -+void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, -+ FPURegister src2) { -+ fadd_s(dst, src1, src2); -+} -+ -+void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, -+ FPURegister src2, Label* out_of_line) { -+ if (src1 == src2) { -+ Move_d(dst, src1); -+ return; -+ } -+ -+ // Check if one of operands is NaN. -+ CompareIsNanF64(src1, src2); -+ BranchTrueF(out_of_line); -+ -+ fmax_d(dst, src1, src2); -+} -+ -+void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, -+ FPURegister src2) { -+ fadd_d(dst, src1, src2); -+} -+ -+void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, -+ FPURegister src2, Label* out_of_line) { -+ if (src1 == src2) { -+ Move_d(dst, src1); -+ return; -+ } -+ -+ // Check if one of operands is NaN. -+ CompareIsNanF64(src1, src2); -+ BranchTrueF(out_of_line); -+ -+ fmin_d(dst, src1, src2); -+} -+ -+void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, -+ FPURegister src2) { -+ fadd_d(dst, src1, src2); -+} -+ -+static const int kRegisterPassedArguments = 8; -+ -+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, -+ int num_double_arguments) { -+ int stack_passed_words = 0; -+ num_reg_arguments += 2 * num_double_arguments; -+ -+ // O32: Up to four simple arguments are passed in registers a0..a3. -+ // N64: Up to eight simple arguments are passed in registers a0..a7. -+ if (num_reg_arguments > kRegisterPassedArguments) { -+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments; -+ } -+ stack_passed_words += kCArgSlotCount; -+ return stack_passed_words; -+} -+ -+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, -+ int num_double_arguments, -+ Register scratch) { -+ int frame_alignment = ActivationFrameAlignment(); -+ -+ // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots. -+ // O32: Up to four simple arguments are passed in registers a0..a3. -+ // Those four arguments must have reserved argument slots on the stack for -+ // mips, even though those argument slots are not normally used. -+ // Both ABIs: Remaining arguments are pushed on the stack, above (higher -+ // address than) the (O32) argument slots. (arg slot calculation handled by -+ // CalculateStackPassedWords()). -+ int stack_passed_arguments = -+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments); -+ if (frame_alignment > kPointerSize) { -+ // Make stack end at alignment and make room for num_arguments - 4 words -+ // and the original value of sp. -+ mov(scratch, sp); -+ Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); -+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); -+ bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0); -+ St_d(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); -+ } else { -+ Sub_d(sp, sp, Operand(stack_passed_arguments * kPointerSize)); -+ } -+} -+ -+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, -+ Register scratch) { -+ PrepareCallCFunction(num_reg_arguments, 0, scratch); -+} -+ -+void TurboAssembler::CallCFunction(ExternalReference function, -+ int num_reg_arguments, -+ int num_double_arguments) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ li(t7, function); -+ CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments); -+} -+ -+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, -+ int num_double_arguments) { -+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); -+} -+ -+void TurboAssembler::CallCFunction(ExternalReference function, -+ int num_arguments) { -+ CallCFunction(function, num_arguments, 0); -+} -+ -+void TurboAssembler::CallCFunction(Register function, int num_arguments) { -+ CallCFunction(function, num_arguments, 0); -+} -+ -+void TurboAssembler::CallCFunctionHelper(Register function, -+ int num_reg_arguments, -+ int num_double_arguments) { -+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); -+ DCHECK(has_frame()); -+ // Make sure that the stack is aligned before calling a C function unless -+ // running in the simulator. The simulator has its own alignment check which -+ // provides more information. -+ // The argument stots are presumed to have been set up by -+ // PrepareCallCFunction. The C function must be called via t9, for mips ABI. -+ -+#if V8_HOST_ARCH_LOONG64 -+ if (emit_debug_code()) { -+ int frame_alignment = base::OS::ActivationFrameAlignment(); -+ int frame_alignment_mask = frame_alignment - 1; -+ if (frame_alignment > kPointerSize) { -+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); -+ Label alignment_as_expected; -+ { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ And(scratch, sp, Operand(frame_alignment_mask)); -+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); -+ } -+ // Don't use Check here, as it will call Runtime_Abort possibly -+ // re-entering here. -+ stop(); -+ bind(&alignment_as_expected); -+ } -+ } -+#endif // V8_HOST_ARCH_LOONG64 -+ -+ // Just call directly. The function called cannot cause a GC, or -+ // allow preemption, so the return address in the link register -+ // stays correct. -+ { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ if (function != t7) { -+ mov(t7, function); -+ function = t7; -+ } -+ -+ // Save the frame pointer and PC so that the stack layout remains iterable, -+ // even without an ExitFrame which normally exists between JS and C frames. -+ // 't' registers are caller-saved so this is safe as a scratch register. -+ Register pc_scratch = t1; -+ Register scratch = t2; -+ DCHECK(!AreAliased(pc_scratch, scratch, function)); -+ -+ pcaddi(pc_scratch, 1); -+ -+ // See x64 code for reasoning about how to address the isolate data fields. -+ if (root_array_available()) { -+ St_d(pc_scratch, MemOperand(kRootRegister, -+ IsolateData::fast_c_call_caller_pc_offset())); -+ St_d(fp, MemOperand(kRootRegister, -+ IsolateData::fast_c_call_caller_fp_offset())); -+ } else { -+ DCHECK_NOT_NULL(isolate()); -+ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); -+ St_d(pc_scratch, MemOperand(scratch, 0)); -+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); -+ St_d(fp, MemOperand(scratch, 0)); -+ } -+ -+ Call(function); -+ -+ // We don't unset the PC; the FP is the source of truth. -+ if (root_array_available()) { -+ St_d(zero_reg, MemOperand(kRootRegister, -+ IsolateData::fast_c_call_caller_fp_offset())); -+ } else { -+ DCHECK_NOT_NULL(isolate()); -+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); -+ St_d(zero_reg, MemOperand(scratch, 0)); -+ } -+ } -+ -+ int stack_passed_arguments = -+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments); -+ -+ if (base::OS::ActivationFrameAlignment() > kPointerSize) { -+ Ld_d(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); -+ } else { -+ Add_d(sp, sp, Operand(stack_passed_arguments * kPointerSize)); -+ } -+} -+ -+#undef BRANCH_ARGS_CHECK -+ -+void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, -+ Condition cc, Label* condition_met) { -+ And(scratch, object, Operand(~kPageAlignmentMask)); -+ Ld_d(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); -+ And(scratch, scratch, Operand(mask)); -+ Branch(condition_met, cc, scratch, Operand(zero_reg)); -+} -+ -+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, -+ Register reg4, Register reg5, -+ Register reg6) { -+ RegList regs = 0; -+ if (reg1.is_valid()) regs |= reg1.bit(); -+ if (reg2.is_valid()) regs |= reg2.bit(); -+ if (reg3.is_valid()) regs |= reg3.bit(); -+ if (reg4.is_valid()) regs |= reg4.bit(); -+ if (reg5.is_valid()) regs |= reg5.bit(); -+ if (reg6.is_valid()) regs |= reg6.bit(); -+ -+ const RegisterConfiguration* config = RegisterConfiguration::Default(); -+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { -+ int code = config->GetAllocatableGeneralCode(i); -+ Register candidate = Register::from_code(code); -+ if (regs & candidate.bit()) continue; -+ return candidate; -+ } -+ UNREACHABLE(); -+} -+ -+void TurboAssembler::ComputeCodeStartAddress(Register dst) { -+ // TODO: range check, add Pcadd macro function? -+ pcaddi(dst, -pc_offset() >> 2); -+} -+ -+void TurboAssembler::ResetSpeculationPoisonRegister() { -+ li(kSpeculationPoisonRegister, -1); -+} -+ -+void TurboAssembler::CallForDeoptimization(Address target, int deopt_id, -+ Label* exit, DeoptimizeKind kind) { -+ USE(exit, kind); -+ NoRootArrayScope no_root_array(this); -+ -+ // Save the deopt id in kRootRegister (we don't need the roots array from now -+ // on). -+ DCHECK_LE(deopt_id, 0xFFFF); -+ li(kRootRegister, deopt_id); -+ Call(target, RelocInfo::RUNTIME_ENTRY); -+} -+ -+void TurboAssembler::LoadCodeObjectEntry(Register destination, -+ Register code_object) { -+ // Code objects are called differently depending on whether we are generating -+ // builtin code (which will later be embedded into the binary) or compiling -+ // user JS code at runtime. -+ // * Builtin code runs in --jitless mode and thus must not call into on-heap -+ // Code targets. Instead, we dispatch through the builtins entry table. -+ // * Codegen at runtime does not have this restriction and we can use the -+ // shorter, branchless instruction sequence. The assumption here is that -+ // targets are usually generated code and not builtin Code objects. -+ if (options().isolate_independent_code) { -+ DCHECK(root_array_available()); -+ Label if_code_is_off_heap, out; -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ -+ DCHECK(!AreAliased(destination, scratch)); -+ DCHECK(!AreAliased(code_object, scratch)); -+ -+ // Check whether the Code object is an off-heap trampoline. If so, call its -+ // (off-heap) entry point directly without going through the (on-heap) -+ // trampoline. Otherwise, just call the Code object as always. -+ Ld_w(scratch, FieldMemOperand(code_object, Code::kFlagsOffset)); -+ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask)); -+ BranchShort(&if_code_is_off_heap, ne, scratch, Operand(zero_reg)); -+ // Not an off-heap trampoline object, the entry point is at -+ // Code::raw_instruction_start(). -+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag); -+ Branch(&out); -+ -+ // An off-heap trampoline, the entry point is loaded from the builtin entry -+ // table. -+ bind(&if_code_is_off_heap); -+ Ld_w(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset)); -+ slli_d(destination, scratch, kSystemPointerSizeLog2); -+ Add_d(destination, destination, kRootRegister); -+ Ld_d(destination, -+ MemOperand(destination, IsolateData::builtin_entry_table_offset())); -+ -+ bind(&out); -+ } else { -+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag); -+ } -+} -+ -+void TurboAssembler::CallCodeObject(Register code_object) { -+ LoadCodeObjectEntry(code_object, code_object); -+ Call(code_object); -+} -+ -+void TurboAssembler::JumpCodeObject(Register code_object) { -+ LoadCodeObjectEntry(code_object, code_object); -+ Jump(code_object); -+} -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h -new file mode 100644 -index 00000000..497d61fb ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h -@@ -0,0 +1,1077 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H -+#error This header must be included via macro-assembler.h -+#endif -+ -+#ifndef V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_ -+#define V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_ -+ -+#include "src/codegen/assembler.h" -+#include "src/codegen/loong64/assembler-loong64.h" -+#include "src/common/globals.h" -+ -+namespace v8 { -+namespace internal { -+ -+// Forward declarations. -+enum class AbortReason : uint8_t; -+ -+// Reserved Register Usage Summary. -+// -+// Registers t8 and t7 are reserved for use by the MacroAssembler. -+// -+// The programmer should know that the MacroAssembler may clobber these two, -+// but won't touch other registers except in special cases. -+// -+// Per the MIPS ABI, register t0 -- t8 must be used for indirect function call -+// via 'jirl t[0-8]' instructions. gcc? -+ -+// Flags used for LeaveExitFrame function. -+enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false }; -+ -+// Flags used for the li macro-assembler function. -+enum LiFlags { -+ // If the constant value can be represented in just 12 bits, then -+ // optimize the li to use a single instruction, rather than lu12i_w/lu32i_d/ -+ // lu52i_d/ori sequence. A number of other optimizations that emits less than -+ // maximum number of instructions exists. -+ OPTIMIZE_SIZE = 0, -+ // Always use 4 instructions (lu12i_w/ori/lu32i_d/lu52i_d sequence), -+ // even if the constant could be loaded with just one, so that this value is -+ // patchable later. -+ CONSTANT_SIZE = 1, -+ // For address loads only 3 instruction are required. Used to mark -+ // constant load that will be used as address without relocation -+ // information. It ensures predictable code size, so specific sites -+ // in code are patchable. -+ ADDRESS_LOAD = 2 -+}; -+ -+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; -+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; -+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; -+ -+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, -+ Register reg3 = no_reg, -+ Register reg4 = no_reg, -+ Register reg5 = no_reg, -+ Register reg6 = no_reg); -+ -+// ----------------------------------------------------------------------------- -+// Static helper functions. -+ -+#define SmiWordOffset(offset) (offset + kPointerSize / 2) -+ -+// Generate a MemOperand for loading a field from an object. -+inline MemOperand FieldMemOperand(Register object, int offset) { -+ return MemOperand(object, offset - kHeapObjectTag); -+} -+ -+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { -+ public: -+ using TurboAssemblerBase::TurboAssemblerBase; -+ -+ // Activation support. -+ void EnterFrame(StackFrame::Type type); -+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { -+ // Out-of-line constant pool not implemented on loong64. -+ UNREACHABLE(); -+ } -+ void LeaveFrame(StackFrame::Type type); -+ -+ // Generates function and stub prologue code. -+ void StubPrologue(StackFrame::Type type); -+ void Prologue(); -+ -+ void InitializeRootRegister() { -+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); -+ li(kRootRegister, Operand(isolate_root)); -+ } -+ -+ // Jump unconditionally to given label. -+ // Use rather b(Label) for code generation. -+ void jmp(Label* L) { Branch(L); } -+ -+ // ------------------------------------------------------------------------- -+ // Debugging. -+ -+ void Trap() override; -+ void DebugBreak() override; -+ -+ // Calls Abort(msg) if the condition cc is not satisfied. -+ // Use --debug_code to enable. -+ void Assert(Condition cc, AbortReason reason, Register rj, Operand rk); -+ -+ // Like Assert(), but always enabled. -+ void Check(Condition cc, AbortReason reason, Register rj, Operand rk); -+ -+ // Print a message to stdout and abort execution. -+ void Abort(AbortReason msg); -+ -+ void Branch(Label* label, bool need_link = false); -+ void Branch(Label* label, Condition cond, Register r1, const Operand& r2, -+ bool need_link = false); -+ void BranchShort(Label* label, Condition cond, Register r1, const Operand& r2, -+ bool need_link = false); -+ void Branch(Label* L, Condition cond, Register rj, RootIndex index); -+ -+ // Floating point branches -+ void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, -+ CFRegister cd = FCC0) { -+ CompareF(cmp1, cmp2, cc, cd, true); -+ } -+ -+ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, -+ CFRegister cd = FCC0) { -+ CompareIsNanF(cmp1, cmp2, cd, true); -+ } -+ -+ void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, -+ CFRegister cd = FCC0) { -+ CompareF(cmp1, cmp2, cc, cd, false); -+ } -+ -+ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, -+ CFRegister cd = FCC0) { -+ CompareIsNanF(cmp1, cmp2, cd, false); -+ } -+ -+ void BranchTrueShortF(Label* target, CFRegister cc = FCC0); -+ void BranchFalseShortF(Label* target, CFRegister cc = FCC0); -+ -+ void BranchTrueF(Label* target, CFRegister cc = FCC0); -+ void BranchFalseF(Label* target, CFRegister cc = FCC0); -+ -+ static int InstrCountForLi64Bit(int64_t value); -+ inline void LiLower32BitHelper(Register rd, Operand j); -+ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); -+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); -+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { -+ li(rd, Operand(j), mode); -+ } -+ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { -+ li(rd, Operand(static_cast(j)), mode); -+ } -+ void li(Register dst, Handle value, LiFlags mode = OPTIMIZE_SIZE); -+ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); -+ void li(Register dst, const StringConstantBase* string, -+ LiFlags mode = OPTIMIZE_SIZE); -+ -+ void LoadFromConstantsTable(Register destination, -+ int constant_index) override; -+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override; -+ void LoadRootRelative(Register destination, int32_t offset) override; -+ -+// Jump, Call, and Ret pseudo instructions implementing inter-working. -+#define COND_ARGS \ -+ Condition cond = al, Register rj = zero_reg, \ -+ const Operand &rk = Operand(zero_reg) -+ -+ void Jump(Register target, COND_ARGS); -+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); -+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); -+ // Deffer from li, this method save target to the memory, and then load -+ // it to register use ld_d, it can be used in wasm jump table for concurrent -+ // patching. -+ void PatchAndJump(Address target); -+ void Jump(Handle code, RelocInfo::Mode rmode, COND_ARGS); -+ void Jump(const ExternalReference& reference) override; -+ void Call(Register target, COND_ARGS); -+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); -+ void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, -+ COND_ARGS); -+ void Call(Label* target); -+ void LoadAddress(Register dst, Label* target); -+ -+ // Load the builtin given by the Smi in |builtin_index| into the same -+ // register. -+ void LoadEntryFromBuiltinIndex(Register builtin_index); -+ void CallBuiltinByIndex(Register builtin_index) override; -+ void CallBuiltin(int builtin_index); -+ -+ void LoadCodeObjectEntry(Register destination, Register code_object) override; -+ -+ void CallCodeObject(Register code_object) override; -+ -+ void JumpCodeObject(Register code_object) override; -+ -+ // Generates an instruction sequence s.t. the return address points to the -+ // instruction following the call. -+ // The return address on the stack is used by frame iteration. -+ void StoreReturnAddressAndCall(Register target); -+ -+ void CallForDeoptimization(Address target, int deopt_id, Label* exit, -+ DeoptimizeKind kind); -+ -+ void Ret(COND_ARGS); -+ -+ // Emit code to discard a non-negative number of pointer-sized elements -+ // from the stack, clobbering only the sp register. -+ void Drop(int count, Condition cond = cc_always, Register reg = no_reg, -+ const Operand& op = Operand(no_reg)); -+ -+ // Trivial case of DropAndRet that utilizes the delay slot and only emits -+ // 2 instructions. -+ void DropAndRet(int drop); -+ -+ void DropAndRet(int drop, Condition cond, Register reg, const Operand& op); -+ -+ void Ld_d(Register rd, const MemOperand& rj); -+ void St_d(Register rd, const MemOperand& rj); -+ -+ void push(Register src) { -+ Add_d(sp, sp, Operand(-kPointerSize)); -+ St_d(src, MemOperand(sp, 0)); -+ } -+ void Push(Register src) { push(src); } -+ void Push(Handle handle); -+ void Push(Smi smi); -+ -+ // Push two registers. Pushes leftmost register first (to highest address). -+ void Push(Register src1, Register src2) { -+ Sub_d(sp, sp, Operand(2 * kPointerSize)); -+ St_d(src1, MemOperand(sp, 1 * kPointerSize)); -+ St_d(src2, MemOperand(sp, 0 * kPointerSize)); -+ } -+ -+ // Push three registers. Pushes leftmost register first (to highest address). -+ void Push(Register src1, Register src2, Register src3) { -+ Sub_d(sp, sp, Operand(3 * kPointerSize)); -+ St_d(src1, MemOperand(sp, 2 * kPointerSize)); -+ St_d(src2, MemOperand(sp, 1 * kPointerSize)); -+ St_d(src3, MemOperand(sp, 0 * kPointerSize)); -+ } -+ -+ // Push four registers. Pushes leftmost register first (to highest address). -+ void Push(Register src1, Register src2, Register src3, Register src4) { -+ Sub_d(sp, sp, Operand(4 * kPointerSize)); -+ St_d(src1, MemOperand(sp, 3 * kPointerSize)); -+ St_d(src2, MemOperand(sp, 2 * kPointerSize)); -+ St_d(src3, MemOperand(sp, 1 * kPointerSize)); -+ St_d(src4, MemOperand(sp, 0 * kPointerSize)); -+ } -+ -+ // Push five registers. Pushes leftmost register first (to highest address). -+ void Push(Register src1, Register src2, Register src3, Register src4, -+ Register src5) { -+ Sub_d(sp, sp, Operand(5 * kPointerSize)); -+ St_d(src1, MemOperand(sp, 4 * kPointerSize)); -+ St_d(src2, MemOperand(sp, 3 * kPointerSize)); -+ St_d(src3, MemOperand(sp, 2 * kPointerSize)); -+ St_d(src4, MemOperand(sp, 1 * kPointerSize)); -+ St_d(src5, MemOperand(sp, 0 * kPointerSize)); -+ } -+ -+ void Push(Register src, Condition cond, Register tst1, Register tst2) { -+ // Since we don't have conditional execution we use a Branch. -+ Label skip; -+ Branch(&skip, cond, tst1, Operand(tst2)); -+ addi_d(sp, sp, -kPointerSize); -+ st_d(src, sp, 0); -+ bind(&skip); -+ } -+ -+ void SaveRegisters(RegList registers); -+ void RestoreRegisters(RegList registers); -+ -+ void CallRecordWriteStub(Register object, Register address, -+ RememberedSetAction remembered_set_action, -+ SaveFPRegsMode fp_mode); -+ void CallRecordWriteStub(Register object, Register address, -+ RememberedSetAction remembered_set_action, -+ SaveFPRegsMode fp_mode, Address wasm_target); -+ void CallEphemeronKeyBarrier(Register object, Register address, -+ SaveFPRegsMode fp_mode); -+ -+ // Push multiple registers on the stack. -+ // Registers are saved in numerical order, with higher numbered registers -+ // saved in higher memory addresses. -+ void MultiPush(RegList regs); -+ void MultiPush(RegList regs1, RegList regs2); -+ void MultiPush(RegList regs1, RegList regs2, RegList regs3); -+ void MultiPushFPU(RegList regs); -+ -+ // Calculate how much stack space (in bytes) are required to store caller -+ // registers excluding those specified in the arguments. -+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, -+ Register exclusion1 = no_reg, -+ Register exclusion2 = no_reg, -+ Register exclusion3 = no_reg) const; -+ -+ // Push caller saved registers on the stack, and return the number of bytes -+ // stack pointer is adjusted. -+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, -+ Register exclusion2 = no_reg, -+ Register exclusion3 = no_reg); -+ // Restore caller saved registers from the stack, and return the number of -+ // bytes stack pointer is adjusted. -+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, -+ Register exclusion2 = no_reg, -+ Register exclusion3 = no_reg); -+ -+ void pop(Register dst) { -+ Ld_d(dst, MemOperand(sp, 0)); -+ Add_d(sp, sp, Operand(kPointerSize)); -+ } -+ void Pop(Register dst) { pop(dst); } -+ -+ // Pop two registers. Pops rightmost register first (from lower address). -+ void Pop(Register src1, Register src2) { -+ DCHECK(src1 != src2); -+ Ld_d(src2, MemOperand(sp, 0 * kPointerSize)); -+ Ld_d(src1, MemOperand(sp, 1 * kPointerSize)); -+ Add_d(sp, sp, 2 * kPointerSize); -+ } -+ -+ // Pop three registers. Pops rightmost register first (from lower address). -+ void Pop(Register src1, Register src2, Register src3) { -+ Ld_d(src3, MemOperand(sp, 0 * kPointerSize)); -+ Ld_d(src2, MemOperand(sp, 1 * kPointerSize)); -+ Ld_d(src1, MemOperand(sp, 2 * kPointerSize)); -+ Add_d(sp, sp, 3 * kPointerSize); -+ } -+ -+ void Pop(uint32_t count = 1) { Add_d(sp, sp, Operand(count * kPointerSize)); } -+ -+ // Pops multiple values from the stack and load them in the -+ // registers specified in regs. Pop order is the opposite as in MultiPush. -+ void MultiPop(RegList regs); -+ void MultiPop(RegList regs1, RegList regs2); -+ void MultiPop(RegList regs1, RegList regs2, RegList regs3); -+ -+ void MultiPopFPU(RegList regs); -+ -+#define DEFINE_INSTRUCTION(instr) \ -+ void instr(Register rd, Register rj, const Operand& rk); \ -+ void instr(Register rd, Register rj, Register rk) { \ -+ instr(rd, rj, Operand(rk)); \ -+ } \ -+ void instr(Register rj, Register rk, int32_t j) { instr(rj, rk, Operand(j)); } -+ -+#define DEFINE_INSTRUCTION2(instr) \ -+ void instr(Register rj, const Operand& rk); \ -+ void instr(Register rj, Register rk) { instr(rj, Operand(rk)); } \ -+ void instr(Register rj, int32_t j) { instr(rj, Operand(j)); } -+ -+ DEFINE_INSTRUCTION(Add_w) -+ DEFINE_INSTRUCTION(Add_d) -+ DEFINE_INSTRUCTION(Div_w) -+ DEFINE_INSTRUCTION(Div_wu) -+ DEFINE_INSTRUCTION(Div_du) -+ DEFINE_INSTRUCTION(Mod_w) -+ DEFINE_INSTRUCTION(Mod_wu) -+ DEFINE_INSTRUCTION(Div_d) -+ DEFINE_INSTRUCTION(Sub_w) -+ DEFINE_INSTRUCTION(Sub_d) -+ DEFINE_INSTRUCTION(Mod_d) -+ DEFINE_INSTRUCTION(Mod_du) -+ DEFINE_INSTRUCTION(Mul_w) -+ DEFINE_INSTRUCTION(Mulh_w) -+ DEFINE_INSTRUCTION(Mulh_wu) -+ DEFINE_INSTRUCTION(Mul_d) -+ DEFINE_INSTRUCTION(Mulh_d) -+ DEFINE_INSTRUCTION2(Div_w) -+ DEFINE_INSTRUCTION2(Div_d) -+ DEFINE_INSTRUCTION2(Div_wu) -+ DEFINE_INSTRUCTION2(Div_du) -+ -+ DEFINE_INSTRUCTION(And) -+ DEFINE_INSTRUCTION(Or) -+ DEFINE_INSTRUCTION(Xor) -+ DEFINE_INSTRUCTION(Nor) -+ DEFINE_INSTRUCTION2(Neg) -+ DEFINE_INSTRUCTION(Andn) -+ DEFINE_INSTRUCTION(Orn) -+ -+ DEFINE_INSTRUCTION(Slt) -+ DEFINE_INSTRUCTION(Sltu) -+ DEFINE_INSTRUCTION(Slti) -+ DEFINE_INSTRUCTION(Sltiu) -+ DEFINE_INSTRUCTION(Sle) -+ DEFINE_INSTRUCTION(Sleu) -+ DEFINE_INSTRUCTION(Sgt) -+ DEFINE_INSTRUCTION(Sgtu) -+ DEFINE_INSTRUCTION(Sge) -+ DEFINE_INSTRUCTION(Sgeu) -+ -+ DEFINE_INSTRUCTION(Rotr_w) -+ DEFINE_INSTRUCTION(Rotr_d) -+ -+#undef DEFINE_INSTRUCTION -+#undef DEFINE_INSTRUCTION2 -+#undef DEFINE_INSTRUCTION3 -+ -+ void SmiUntag(Register dst, const MemOperand& src); -+ void SmiUntag(Register dst, Register src) { -+ if (SmiValuesAre32Bits()) { -+ srai_d(dst, src, kSmiShift); -+ } else { -+ DCHECK(SmiValuesAre31Bits()); -+ srai_w(dst, src, kSmiShift); -+ } -+ } -+ -+ void SmiUntag(Register reg) { SmiUntag(reg, reg); } -+ -+ // Removes current frame and its arguments from the stack preserving -+ // the arguments and a return address pushed to the stack for the next call. -+ // Both |callee_args_count| and |caller_args_count| do not include -+ // receiver. |callee_args_count| is not modified. |caller_args_count| -+ // is trashed. -+ void PrepareForTailCall(Register callee_args_count, -+ Register caller_args_count, Register scratch0, -+ Register scratch1); -+ -+ int CalculateStackPassedWords(int num_reg_arguments, -+ int num_double_arguments); -+ -+ // Before calling a C-function from generated code, align arguments on stack -+ // and add space for the four mips argument slots. -+ // After aligning the frame, non-register arguments must be stored on the -+ // stack, after the argument-slots using helper: CFunctionArgumentOperand(). -+ // The argument count assumes all arguments are word sized. -+ // Some compilers/platforms require the stack to be aligned when calling -+ // C++ code. -+ // Needs a scratch register to do some arithmetic. This register will be -+ // trashed. -+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, -+ Register scratch); -+ void PrepareCallCFunction(int num_reg_arguments, Register scratch); -+ -+ // Calls a C function and cleans up the space for arguments allocated -+ // by PrepareCallCFunction. The called function is not allowed to trigger a -+ // garbage collection, since that might move the code and invalidate the -+ // return address (unless this is somehow accounted for by the called -+ // function). -+ void CallCFunction(ExternalReference function, int num_arguments); -+ void CallCFunction(Register function, int num_arguments); -+ void CallCFunction(ExternalReference function, int num_reg_arguments, -+ int num_double_arguments); -+ void CallCFunction(Register function, int num_reg_arguments, -+ int num_double_arguments); -+ void MovFromFloatResult(DoubleRegister dst); -+ void MovFromFloatParameter(DoubleRegister dst); -+ -+ // There are two ways of passing double arguments on MIPS, depending on -+ // whether soft or hard floating point ABI is used. These functions -+ // abstract parameter passing for the three different ways we call -+ // C functions from generated code. -+ void MovToFloatParameter(DoubleRegister src); -+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); -+ void MovToFloatResult(DoubleRegister src); -+ -+ // See comments at the beginning of Builtins::Generate_CEntry. -+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } -+ inline void PrepareCEntryFunction(const ExternalReference& ref) { -+ li(a1, ref); -+ } -+ -+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, -+ Label* condition_met); -+#undef COND_ARGS -+ -+ // Performs a truncating conversion of a floating point number as used by -+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. -+ // Exits with 'result' holding the answer. -+ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, -+ DoubleRegister double_input, StubCallMode stub_mode); -+ -+ // Conditional move. -+ void Movz(Register rd, Register rj, Register rk); -+ void Movn(Register rd, Register rj, Register rk); -+ -+ void LoadZeroIfFPUCondition(Register dest, CFRegister = FCC0); -+ void LoadZeroIfNotFPUCondition(Register dest, CFRegister = FCC0); -+ -+ void LoadZeroIfConditionNotZero(Register dest, Register condition); -+ void LoadZeroIfConditionZero(Register dest, Register condition); -+ void LoadZeroOnCondition(Register rd, Register rj, const Operand& rk, -+ Condition cond); -+ -+ void Clz_w(Register rd, Register rj); -+ void Clz_d(Register rd, Register rj); -+ void Ctz_w(Register rd, Register rj); -+ void Ctz_d(Register rd, Register rj); -+ void Popcnt_w(Register rd, Register rj); -+ void Popcnt_d(Register rd, Register rj); -+ -+ void ExtractBits(Register dest, Register source, Register pos, int size, -+ bool sign_extend = false); -+ void InsertBits(Register dest, Register source, Register pos, int size); -+ -+ void Bstrins_w(Register rk, Register rj, uint16_t msbw, uint16_t lswb); -+ void Bstrins_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); -+ void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); -+ void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); -+ void Neg_s(FPURegister fd, FPURegister fj); -+ void Neg_d(FPURegister fd, FPURegister fk); -+ -+ // Convert single to unsigned word. -+ void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch); -+ void Trunc_uw_s(Register rd, FPURegister fj, FPURegister scratch); -+ -+ // Change endianness -+ void ByteSwapSigned(Register dest, Register src, int operand_size); -+ void ByteSwapUnsigned(Register dest, Register src, int operand_size); -+ -+ void Ld_b(Register rd, const MemOperand& rj); -+ void Ld_bu(Register rd, const MemOperand& rj); -+ void St_b(Register rd, const MemOperand& rj); -+ -+ void Ld_h(Register rd, const MemOperand& rj); -+ void Ld_hu(Register rd, const MemOperand& rj); -+ void St_h(Register rd, const MemOperand& rj); -+ -+ void Ld_w(Register rd, const MemOperand& rj); -+ void Ld_wu(Register rd, const MemOperand& rj); -+ void St_w(Register rd, const MemOperand& rj); -+ -+ void Fld_s(FPURegister fd, const MemOperand& src); -+ void Fst_s(FPURegister fj, const MemOperand& dst); -+ -+ void Fld_d(FPURegister fd, const MemOperand& src); -+ void Fst_d(FPURegister fj, const MemOperand& dst); -+ -+ void Ll_w(Register rd, const MemOperand& rj); -+ void Sc_w(Register rd, const MemOperand& rj); -+ -+ void Ll_d(Register rd, const MemOperand& rj); -+ void Sc_d(Register rd, const MemOperand& rj); -+ -+ // These functions assume (and assert) that src1!=src2. It is permitted -+ // for the result to alias either input register. -+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, -+ Label* out_of_line); -+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, -+ Label* out_of_line); -+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, -+ Label* out_of_line); -+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, -+ Label* out_of_line); -+ -+ // Generate out-of-line cases for the macros above. -+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); -+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); -+ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); -+ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); -+ -+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } -+ -+ void mov(Register rd, Register rj) { or_(rd, rj, zero_reg); } -+ -+ inline void Move(Register dst, Handle handle) { li(dst, handle); } -+ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); } -+ -+ inline void Move(Register dst, Register src) { -+ if (dst != src) { -+ mov(dst, src); -+ } -+ } -+ -+ inline void FmoveLow(Register dst_low, FPURegister src) { -+ movfr2gr_s(dst_low, src); -+ } -+ -+ void FmoveLow(FPURegister dst, Register src_low); -+ -+ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); } -+ -+ inline void Move_d(FPURegister dst, FPURegister src) { -+ if (dst != src) { -+ fmov_d(dst, src); -+ } -+ } -+ -+ inline void Move_s(FPURegister dst, FPURegister src) { -+ if (dst != src) { -+ fmov_s(dst, src); -+ } -+ } -+ -+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast(imm)); } -+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast(imm)); } -+ void Move(FPURegister dst, uint32_t src); -+ void Move(FPURegister dst, uint64_t src); -+ -+ // AdddOverflow sets overflow register to a negative value if -+ // overflow occured, otherwise it is zero or positive -+ void AdddOverflow(Register dst, Register left, const Operand& right, -+ Register overflow); -+ // SubdOverflow sets overflow register to a negative value if -+ // overflow occured, otherwise it is zero or positive -+ void SubdOverflow(Register dst, Register left, const Operand& right, -+ Register overflow); -+ // MulOverflow sets overflow register to zero if no overflow occured -+ void MulOverflow(Register dst, Register left, const Operand& right, -+ Register overflow); -+ -+ // Number of instructions needed for calculation of switch table entry address -+ static const int kSwitchTablePrologueSize = 5; -+ -+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a -+ // functor/function with 'Label *func(size_t index)' declaration. -+ template -+ void GenerateSwitchTable(Register index, size_t case_count, -+ Func GetLabelFunction); -+ -+ // Load an object from the root table. -+ void LoadRoot(Register destination, RootIndex index) override; -+ void LoadRoot(Register destination, RootIndex index, Condition cond, -+ Register src1, const Operand& src2); -+ -+ // If the value is a NaN, canonicalize the value, src must be nan. -+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); -+ -+ // --------------------------------------------------------------------------- -+ // FPU macros. These do not handle special cases like NaN or +- inf. -+ -+ // Convert unsigned word to double. -+ void Ffint_d_uw(FPURegister fd, FPURegister fj); -+ void Ffint_d_uw(FPURegister fd, Register rj); -+ -+ // Convert unsigned long to double. -+ void Ffint_d_ul(FPURegister fd, FPURegister fj); -+ void Ffint_d_ul(FPURegister fd, Register rj); -+ -+ // Convert unsigned word to float. -+ void Ffint_s_uw(FPURegister fd, FPURegister fj); -+ void Ffint_s_uw(FPURegister fd, Register rj); -+ -+ // Convert unsigned long to float. -+ void Ffint_s_ul(FPURegister fd, FPURegister fj); -+ void Ffint_s_ul(FPURegister fd, Register rj); -+ -+ // Convert double to unsigned word. -+ void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch); -+ void Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch); -+ -+ // Convert single to unsigned word. -+ void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch); -+ void Ftintrz_uw_s(Register rd, FPURegister fs, FPURegister scratch); -+ -+ // Convert double to unsigned long. -+ void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch, -+ Register result = no_reg); -+ void Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch, -+ Register result = no_reg); -+ -+ // Convert single to unsigned long. -+ void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch, -+ Register result = no_reg); -+ void Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch, -+ Register result = no_reg); -+ -+ // Round double functions -+ void Trunc_d(FPURegister fd, FPURegister fj); -+ void Round_d(FPURegister fd, FPURegister fj); -+ void Floor_d(FPURegister fd, FPURegister fj); -+ void Ceil_d(FPURegister fd, FPURegister fj); -+ -+ // Round float functions -+ void Trunc_s(FPURegister fd, FPURegister fj); -+ void Round_s(FPURegister fd, FPURegister fj); -+ void Floor_s(FPURegister fd, FPURegister fj); -+ void Ceil_s(FPURegister fd, FPURegister fj); -+ -+ // Jump the register contains a smi. -+ void JumpIfSmi(Register value, Label* smi_label, Register scratch = t7); -+ -+ void JumpIfEqual(Register a, int32_t b, Label* dest) { -+ li(kScratchReg, Operand(b)); -+ Branch(dest, eq, a, Operand(kScratchReg)); -+ } -+ -+ void JumpIfLessThan(Register a, int32_t b, Label* dest) { -+ li(kScratchReg, Operand(b)); -+ Branch(dest, lt, a, Operand(kScratchReg)); -+ } -+ -+ // Push a standard frame, consisting of ra, fp, context and JS function. -+ void PushStandardFrame(Register function_reg); -+ -+ // Get the actual activation frame alignment for target environment. -+ static int ActivationFrameAlignment(); -+ -+ // Load Scaled Address instructions. Parameter sa (shift argument) must be -+ // between [1, 31] (inclusive). The scratch register may be clobbered. -+ void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, -+ Register scratch = t7); -+ void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, -+ Register scratch = t7); -+ -+ // Compute the start of the generated instruction stream from the current PC. -+ // This is an alternative to embedding the {CodeObject} handle as a reference. -+ void ComputeCodeStartAddress(Register dst); -+ -+ void ResetSpeculationPoisonRegister(); -+ -+ // Control-flow integrity: -+ -+ // Define a function entrypoint. This doesn't emit any code for this -+ // architecture, as control-flow integrity is not supported for it. -+ void CodeEntry() {} -+ // Define an exception handler. -+ void ExceptionHandler() {} -+ // Define an exception handler and bind a label. -+ void BindExceptionHandler(Label* label) { bind(label); } -+ -+ protected: -+ inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch); -+ inline int32_t GetOffset(Label* L, OffsetSize bits); -+ -+ private: -+ bool has_double_zero_reg_set_ = false; -+ -+ // Performs a truncating conversion of a floating point number as used by -+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it -+ // succeeds, otherwise falls through if result is saturated. On return -+ // 'result' either holds answer, or is clobbered on fall through. -+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, -+ Label* done); -+ -+ bool BranchShortOrFallback(Label* L, Condition cond, Register rj, -+ const Operand& rk, bool need_link); -+ -+ // f32 or f64 -+ void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, -+ CFRegister cd, bool f32 = true); -+ -+ void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, -+ bool f32 = true); -+ -+ void CallCFunctionHelper(Register function, int num_reg_arguments, -+ int num_double_arguments); -+ -+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); -+ -+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode); -+ -+ // Push a fixed frame, consisting of ra, fp. -+ void PushCommonFrame(Register marker_reg = no_reg); -+ -+ void CallRecordWriteStub(Register object, Register address, -+ RememberedSetAction remembered_set_action, -+ SaveFPRegsMode fp_mode, Handle code_target, -+ Address wasm_target); -+}; -+ -+// MacroAssembler implements a collection of frequently used macros. -+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { -+ public: -+ using TurboAssembler::TurboAssembler; -+ -+ bool IsNear(Label* L, Condition cond, int rs_reg); -+ -+ // Swap two registers. If the scratch register is omitted then a slightly -+ // less efficient form using xor instead of mov is emitted. -+ void Swap(Register reg1, Register reg2, Register scratch = no_reg); -+ -+ void PushRoot(RootIndex index) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ LoadRoot(scratch, index); -+ Push(scratch); -+ } -+ -+ // Compare the object in a register to a value and jump if they are equal. -+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ LoadRoot(scratch, index); -+ Branch(if_equal, eq, with, Operand(scratch)); -+ } -+ -+ // Compare the object in a register to a value and jump if they are not equal. -+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ LoadRoot(scratch, index); -+ Branch(if_not_equal, ne, with, Operand(scratch)); -+ } -+ -+ // Checks if value is in range [lower_limit, higher_limit] using a single -+ // comparison. -+ void JumpIfIsInRange(Register value, unsigned lower_limit, -+ unsigned higher_limit, Label* on_in_range); -+ -+ // --------------------------------------------------------------------------- -+ // GC Support -+ -+ // Notify the garbage collector that we wrote a pointer into an object. -+ // |object| is the object being stored into, |value| is the object being -+ // stored. value and scratch registers are clobbered by the operation. -+ // The offset is the offset from the start of the object, not the offset from -+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off). -+ void RecordWriteField( -+ Register object, int offset, Register value, Register scratch, -+ RAStatus ra_status, SaveFPRegsMode save_fp, -+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, -+ SmiCheck smi_check = INLINE_SMI_CHECK); -+ -+ // For a given |object| notify the garbage collector that the slot |address| -+ // has been written. |value| is the object being stored. The value and -+ // address registers are clobbered by the operation. -+ void RecordWrite( -+ Register object, Register address, Register value, RAStatus ra_status, -+ SaveFPRegsMode save_fp, -+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, -+ SmiCheck smi_check = INLINE_SMI_CHECK); -+ -+ void Pref(int32_t hint, const MemOperand& rs); -+ -+ // --------------------------------------------------------------------------- -+ // Pseudo-instructions. -+ -+ void LoadWordPair(Register rd, const MemOperand& rj, Register scratch); -+ void StoreWordPair(Register rd, const MemOperand& rj, Register scratch); -+ -+ // Convert double to unsigned long. -+ void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch); -+ -+ void Ftintrz_l_d(FPURegister fd, FPURegister fj); -+ void Ftintrne_l_d(FPURegister fd, FPURegister fj); -+ void Ftintrm_l_d(FPURegister fd, FPURegister fj); -+ void Ftintrp_l_d(FPURegister fd, FPURegister fj); -+ -+ void Ftintrz_w_d(FPURegister fd, FPURegister fj); -+ void Ftintrne_w_d(FPURegister fd, FPURegister fj); -+ void Ftintrm_w_d(FPURegister fd, FPURegister fj); -+ void Ftintrp_w_d(FPURegister fd, FPURegister fj); -+ -+ void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); -+ void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); -+ void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); -+ void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); -+ -+ // Truncates a double using a specific rounding mode, and writes the value -+ // to the result register. -+ // The except_flag will contain any exceptions caused by the instruction. -+ // If check_inexact is kDontCheckForInexactConversion, then the inexact -+ // exception is masked. -+ void EmitFPUTruncate( -+ FPURoundingMode rounding_mode, Register result, -+ DoubleRegister double_input, Register scratch, -+ DoubleRegister double_scratch, Register except_flag, -+ CheckForInexactConversion check_inexact = kDontCheckForInexactConversion); -+ -+ // Enter exit frame. -+ // argc - argument count to be dropped by LeaveExitFrame. -+ // save_doubles - saves FPU registers on stack, currently disabled. -+ // stack_space - extra stack space. -+ void EnterExitFrame(bool save_doubles, int stack_space = 0, -+ StackFrame::Type frame_type = StackFrame::EXIT); -+ -+ // Leave the current exit frame. -+ void LeaveExitFrame(bool save_doubles, Register arg_count, -+ bool do_return = NO_EMIT_RETURN, -+ bool argument_count_is_length = false); -+ -+ void LoadMap(Register destination, Register object); -+ -+ // Make sure the stack is aligned. Only emits code in debug mode. -+ void AssertStackIsAligned(); -+ -+ // Load the global proxy from the current context. -+ void LoadGlobalProxy(Register dst) { -+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); -+ } -+ -+ void LoadNativeContextSlot(int index, Register dst); -+ -+ // Load the initial map from the global function. The registers -+ // function and map can be the same, function is then overwritten. -+ void LoadGlobalFunctionInitialMap(Register function, Register map, -+ Register scratch); -+ -+ // ------------------------------------------------------------------------- -+ // JavaScript invokes. -+ -+ // Invoke the JavaScript function code by either calling or jumping. -+ void InvokeFunctionCode(Register function, Register new_target, -+ Register expected_parameter_count, -+ Register actual_parameter_count, InvokeFlag flag); -+ -+ // On function call, call into the debugger if necessary. -+ void CheckDebugHook(Register fun, Register new_target, -+ Register expected_parameter_count, -+ Register actual_parameter_count); -+ -+ // Invoke the JavaScript function in the given register. Changes the -+ // current context to the context in the function before invoking. -+ void InvokeFunctionWithNewTarget(Register function, Register new_target, -+ Register actual_parameter_count, -+ InvokeFlag flag); -+ void InvokeFunction(Register function, Register expected_parameter_count, -+ Register actual_parameter_count, InvokeFlag flag); -+ -+ // Frame restart support. -+ void MaybeDropFrames(); -+ -+ // Exception handling. -+ -+ // Push a new stack handler and link into stack handler chain. -+ void PushStackHandler(); -+ -+ // Unlink the stack handler on top of the stack from the stack handler chain. -+ // Must preserve the result register. -+ void PopStackHandler(); -+ -+ // ------------------------------------------------------------------------- -+ // Support functions. -+ -+ void GetObjectType(Register function, Register map, Register type_reg); -+ -+ // ------------------------------------------------------------------------- -+ // Runtime calls. -+ -+ // Call a runtime routine. -+ void CallRuntime(const Runtime::Function* f, int num_arguments, -+ SaveFPRegsMode save_doubles = kDontSaveFPRegs); -+ -+ // Convenience function: Same as above, but takes the fid instead. -+ void CallRuntime(Runtime::FunctionId fid, -+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { -+ const Runtime::Function* function = Runtime::FunctionForId(fid); -+ CallRuntime(function, function->nargs, save_doubles); -+ } -+ -+ // Convenience function: Same as above, but takes the fid instead. -+ void CallRuntime(Runtime::FunctionId fid, int num_arguments, -+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { -+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); -+ } -+ -+ // Convenience function: tail call a runtime routine (jump). -+ void TailCallRuntime(Runtime::FunctionId fid); -+ -+ // Jump to the builtin routine. -+ void JumpToExternalReference(const ExternalReference& builtin, -+ bool builtin_exit_frame = false); -+ -+ // Generates a trampoline to jump to the off-heap instruction stream. -+ void JumpToInstructionStream(Address entry); -+ -+ // --------------------------------------------------------------------------- -+ // In-place weak references. -+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared); -+ -+ // ------------------------------------------------------------------------- -+ // StatsCounter support. -+ -+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1, -+ Register scratch2); -+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1, -+ Register scratch2); -+ -+ // ------------------------------------------------------------------------- -+ // Smi utilities. -+ -+ void SmiTag(Register dst, Register src) { -+ STATIC_ASSERT(kSmiTag == 0); -+ if (SmiValuesAre32Bits()) { -+ slli_d(dst, src, 32); -+ } else { -+ DCHECK(SmiValuesAre31Bits()); -+ add_w(dst, src, src); -+ } -+ } -+ -+ void SmiTag(Register reg) { SmiTag(reg, reg); } -+ -+ // Left-shifted from int32 equivalent of Smi. -+ void SmiScale(Register dst, Register src, int scale) { -+ if (SmiValuesAre32Bits()) { -+ // The int portion is upper 32-bits of 64-bit word. -+ srai_d(dst, src, kSmiShift - scale); -+ } else { -+ DCHECK(SmiValuesAre31Bits()); -+ DCHECK_GE(scale, kSmiTagSize); -+ slli_w(dst, src, scale - kSmiTagSize); -+ } -+ } -+ -+ // Test if the register contains a smi. -+ inline void SmiTst(Register value, Register scratch) { -+ And(scratch, value, Operand(kSmiTagMask)); -+ } -+ -+ // Jump if the register contains a non-smi. -+ void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch); -+ -+ // Abort execution if argument is a smi, enabled via --debug-code. -+ void AssertNotSmi(Register object); -+ void AssertSmi(Register object); -+ -+ // Abort execution if argument is not a Constructor, enabled via --debug-code. -+ void AssertConstructor(Register object); -+ -+ // Abort execution if argument is not a JSFunction, enabled via --debug-code. -+ void AssertFunction(Register object); -+ -+ // Abort execution if argument is not a JSBoundFunction, -+ // enabled via --debug-code. -+ void AssertBoundFunction(Register object); -+ -+ // Abort execution if argument is not a JSGeneratorObject (or subclass), -+ // enabled via --debug-code. -+ void AssertGeneratorObject(Register object); -+ -+ // Abort execution if argument is not undefined or an AllocationSite, enabled -+ // via --debug-code. -+ void AssertUndefinedOrAllocationSite(Register object, Register scratch); -+ -+ template -+ void DecodeField(Register dst, Register src) { -+ Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift); -+ } -+ -+ template -+ void DecodeField(Register reg) { -+ DecodeField(reg, reg); -+ } -+ -+ private: -+ // Helper functions for generating invokes. -+ void InvokePrologue(Register expected_parameter_count, -+ Register actual_parameter_count, Label* done, -+ InvokeFlag flag); -+ -+ // Compute memory operands for safepoint stack slots. -+ static int SafepointRegisterStackIndex(int reg_code); -+ -+ // Needs access to SafepointRegisterStackIndex for compiled frame -+ // traversal. -+ friend class StandardFrame; -+ -+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); -+}; -+ -+template -+void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, -+ Func GetLabelFunction) { -+ // Ensure that dd-ed labels following this instruction use 8 bytes aligned -+ // addresses. -+ BlockTrampolinePoolFor(static_cast(case_count) * 2 + -+ kSwitchTablePrologueSize); -+ UseScratchRegisterScope temps(this); -+ Register scratch = temps.Acquire(); -+ Align(8); // next is 4 instrs. -+ pcaddi(scratch, 4); -+ // alsl_d will do sa -+ alsl_d(scratch, index, scratch, kPointerSizeLog2); -+ Ld_d(scratch, MemOperand(scratch, 0)); -+ jirl(zero_reg, scratch, 0); -+ for (size_t index = 0; index < case_count; ++index) { -+ dd(GetLabelFunction(index)); -+ } -+} -+ -+#define ACCESS_MASM(masm) masm-> -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_ -diff --git a/deps/v8/src/codegen/loong64/register-loong64.h b/deps/v8/src/codegen/loong64/register-loong64.h -new file mode 100644 -index 00000000..1f57f788 ---- /dev/null -+++ b/deps/v8/src/codegen/loong64/register-loong64.h -@@ -0,0 +1,330 @@ -+// Copyright 2018 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_ -+#define V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_ -+ -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/codegen/register.h" -+#include "src/codegen/reglist.h" -+ -+namespace v8 { -+namespace internal { -+ -+// clang-format off -+#define GENERAL_REGISTERS(V) \ -+ V(zero_reg) V(ra) V(gp) V(sp) \ -+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \ -+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) V(t8) \ -+ V(tp) V(fp) \ -+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \ -+ -+#define ALLOCATABLE_GENERAL_REGISTERS(V) \ -+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \ -+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(s7) -+ -+#define DOUBLE_REGISTERS(V) \ -+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ -+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ -+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ -+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31) -+ -+#define FLOAT_REGISTERS DOUBLE_REGISTERS -+#define SIMD128_REGISTERS(V) \ -+ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \ -+ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \ -+ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \ -+ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31) -+ -+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ -+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ -+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) V(f16) \ -+ V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) -+// clang-format on -+ -+// Note that the bit values must match those used in actual instruction -+// encoding. -+const int kNumRegs = 32; -+ -+const RegList kJSCallerSaved = 1 << 4 | // a0 -+ 1 << 5 | // a1 -+ 1 << 6 | // a2 -+ 1 << 7 | // a3 -+ 1 << 8 | // a4 -+ 1 << 9 | // a5 -+ 1 << 10 | // a6 -+ 1 << 11 | // a7 -+ 1 << 12 | // t0 -+ 1 << 13 | // t1 -+ 1 << 14 | // t2 -+ 1 << 15 | // t3 -+ 1 << 16 | // t4 -+ 1 << 17 | // t5 -+ 1 << 20; // t8 -+ -+const int kNumJSCallerSaved = 15; -+ -+// Callee-saved registers preserved when switching from C to JavaScript. -+const RegList kCalleeSaved = 1 << 22 | // fp -+ 1 << 23 | // s0 -+ 1 << 24 | // s1 -+ 1 << 25 | // s2 -+ 1 << 26 | // s3 -+ 1 << 27 | // s4 -+ 1 << 28 | // s5 -+ 1 << 29 | // s6 (roots in Javascript code) -+ 1 << 30 | // s7 (cp in Javascript code) -+ 1 << 31; // s8 -+ -+const int kNumCalleeSaved = 10; -+ -+const RegList kCalleeSavedFPU = 1 << 24 | // f24 -+ 1 << 25 | // f25 -+ 1 << 26 | // f26 -+ 1 << 27 | // f27 -+ 1 << 28 | // f28 -+ 1 << 29 | // f29 -+ 1 << 30 | // f30 -+ 1 << 31; // f31 -+ -+const int kNumCalleeSavedFPU = 8; -+ -+const RegList kCallerSavedFPU = 1 << 0 | // f0 -+ 1 << 1 | // f1 -+ 1 << 2 | // f2 -+ 1 << 3 | // f3 -+ 1 << 4 | // f4 -+ 1 << 5 | // f5 -+ 1 << 6 | // f6 -+ 1 << 7 | // f7 -+ 1 << 8 | // f8 -+ 1 << 9 | // f9 -+ 1 << 10 | // f10 -+ 1 << 11 | // f11 -+ 1 << 12 | // f12 -+ 1 << 13 | // f13 -+ 1 << 14 | // f14 -+ 1 << 15 | // f15 -+ 1 << 16 | // f16 -+ 1 << 17 | // f17 -+ 1 << 18 | // f18 -+ 1 << 19 | // f19 -+ 1 << 20 | // f20 -+ 1 << 21 | // f21 -+ 1 << 22 | // f22 -+ 1 << 23; // f23 -+ -+// Number of registers for which space is reserved in safepoints. Must be a -+// multiple of 8. -+const int kNumSafepointRegisters = 32; -+ -+// Define the list of registers actually saved at safepoints. -+// Note that the number of saved registers may be smaller than the reserved -+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. -+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; -+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; -+ -+const int kUndefIndex = -1; -+// Map with indexes on stack that corresponds to codes of saved registers. -+const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg -+ kUndefIndex, // ra -+ kUndefIndex, // gp -+ kUndefIndex, // sp -+ 0, // a0 -+ 1, // a1 -+ 2, // a2 -+ 3, // a3 -+ 4, // a4 -+ 5, // a5 -+ 6, // a6 -+ 7, // a7 -+ 8, // t0 -+ 9, // t1 -+ 10, // t2 -+ 11, // t3 -+ 12, // t4 -+ 13, // t5 -+ kUndefIndex, // t6 -+ kUndefIndex, // t7 -+ 14, // t8 -+ kUndefIndex, // tp -+ 15, // fp -+ 16, // s0 -+ 17, // s1 -+ 28, // s2 -+ 29, // s3 -+ 20, // s4 -+ 21, // s5 -+ 22, // s6 -+ 23, // s7 -+ 24}; // s8 -+ -+// CPU Registers. -+// -+// 1) We would prefer to use an enum, but enum values are assignment- -+// compatible with int, which has caused code-generation bugs. -+// -+// 2) We would prefer to use a class instead of a struct but we don't like -+// the register initialization to depend on the particular initialization -+// order (which appears to be different on OS X, Linux, and Windows for the -+// installed versions of C++ we tried). Using a struct permits C-style -+// "initialization". Also, the Register objects cannot be const as this -+// forces initialization stubs in MSVC, making us dependent on initialization -+// order. -+// -+// 3) By not using an enum, we are possibly preventing the compiler from -+// doing certain constant folds, which may significantly reduce the -+// code generated for some assembly instructions (because they boil down -+// to a few constants). If this is a problem, we could change the code -+// such that we use an enum in optimized mode, and the struct in debug -+// mode. This way we get the compile-time error checking in debug mode -+// and best performance in optimized code. -+ -+// ----------------------------------------------------------------------------- -+// Implementation of Register and FPURegister. -+ -+enum RegisterCode { -+#define REGISTER_CODE(R) kRegCode_##R, -+ GENERAL_REGISTERS(REGISTER_CODE) -+#undef REGISTER_CODE -+ kRegAfterLast -+}; -+ -+class Register : public RegisterBase { -+ public: -+ static constexpr int kMantissaOffset = 0; -+ static constexpr int kExponentOffset = 4; -+ -+ private: -+ friend class RegisterBase; -+ explicit constexpr Register(int code) : RegisterBase(code) {} -+}; -+ -+// s7: context register -+// s3: scratch register -+// s4: scratch register 2 -+#define DECLARE_REGISTER(R) \ -+ constexpr Register R = Register::from_code(kRegCode_##R); -+GENERAL_REGISTERS(DECLARE_REGISTER) -+#undef DECLARE_REGISTER -+ -+constexpr Register no_reg = Register::no_reg(); -+ -+int ToNumber(Register reg); -+ -+Register ToRegister(int num); -+ -+constexpr bool kPadArguments = false; -+constexpr bool kSimpleFPAliasing = true; -+constexpr bool kSimdMaskRegisters = false; -+ -+enum DoubleRegisterCode { -+#define REGISTER_CODE(R) kDoubleCode_##R, -+ DOUBLE_REGISTERS(REGISTER_CODE) -+#undef REGISTER_CODE -+ kDoubleAfterLast -+}; -+ -+// Coprocessor register. -+class FPURegister : public RegisterBase { -+ public: -+ FPURegister low() const { -+ // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1. -+ // Find low reg of a Double-reg pair, which is the reg itself. -+ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even. -+ return FPURegister::from_code(code()); -+ } -+ -+ private: -+ friend class RegisterBase; -+ explicit constexpr FPURegister(int code) : RegisterBase(code) {} -+}; -+ -+enum CFRegister { FCC0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 }; -+ -+using FloatRegister = FPURegister; -+ -+using DoubleRegister = FPURegister; -+ -+// TODO here only for build success -+using Simd128Register = FPURegister; -+ -+#define DECLARE_DOUBLE_REGISTER(R) \ -+ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); -+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER) -+#undef DECLARE_DOUBLE_REGISTER -+ -+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg(); -+ -+// Register aliases. -+// cp is assumed to be a callee saved register. -+constexpr Register kRootRegister = s6; -+constexpr Register cp = s7; -+constexpr Register kScratchReg = s3; -+constexpr Register kScratchReg2 = s4; -+constexpr DoubleRegister kScratchDoubleReg = f30; -+// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0. -+constexpr DoubleRegister kDoubleRegZero = f28; -+ -+// FPU (coprocessor 1) control registers. -+// Currently only FCSR0 is implemented. -+// TODO fscr0 fcsr1 fcsr2 fscsr3 -+struct FPUControlRegister { -+ bool is_valid() const { return reg_code == kFCSRRegister; } -+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; } -+ int code() const { -+ DCHECK(is_valid()); -+ return reg_code; -+ } -+ int bit() const { -+ DCHECK(is_valid()); -+ return 1 << reg_code; -+ } -+ void setcode(int f) { -+ reg_code = f; -+ DCHECK(is_valid()); -+ } -+ // Unfortunately we can't make this private in a struct. -+ int reg_code; -+}; -+ -+constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister}; -+constexpr FPUControlRegister FCSR = {kFCSRRegister}; -+ -+// Define {RegisterName} methods for the register types. -+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS) -+DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS) -+ -+// Give alias names to registers for calling conventions. -+constexpr Register kReturnRegister0 = a0; -+constexpr Register kReturnRegister1 = a1; -+constexpr Register kReturnRegister2 = a2; -+constexpr Register kJSFunctionRegister = a1; -+constexpr Register kContextRegister = s7; -+constexpr Register kAllocateSizeRegister = a0; -+constexpr Register kSpeculationPoisonRegister = t3; -+constexpr Register kInterpreterAccumulatorRegister = a0; -+constexpr Register kInterpreterBytecodeOffsetRegister = t0; -+constexpr Register kInterpreterBytecodeArrayRegister = t1; -+constexpr Register kInterpreterDispatchTableRegister = t2; -+ -+constexpr Register kJavaScriptCallArgCountRegister = a0; -+constexpr Register kJavaScriptCallCodeStartRegister = a2; -+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister; -+constexpr Register kJavaScriptCallNewTargetRegister = a3; -+constexpr Register kJavaScriptCallExtraArg1Register = a2; -+ -+constexpr Register kOffHeapTrampolineRegister = t7; -+constexpr Register kRuntimeCallFunctionRegister = a1; -+constexpr Register kRuntimeCallArgCountRegister = a0; -+constexpr Register kRuntimeCallArgvRegister = a2; -+constexpr Register kWasmInstanceRegister = a0; -+constexpr Register kWasmCompileLazyFuncIndexRegister = t0; -+ -+constexpr DoubleRegister kFPReturnRegister0 = f0; -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_ -diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h -index 01175e58..f2a487ac 100644 ---- a/deps/v8/src/codegen/macro-assembler.h -+++ b/deps/v8/src/codegen/macro-assembler.h -@@ -49,6 +49,9 @@ enum AllocationFlags { - #elif V8_TARGET_ARCH_MIPS64 - #include "src/codegen/mips64/constants-mips64.h" - #include "src/codegen/mips64/macro-assembler-mips64.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/codegen/loong64/macro-assembler-loong64.h" - #elif V8_TARGET_ARCH_S390 - #include "src/codegen/s390/constants-s390.h" - #include "src/codegen/s390/macro-assembler-s390.h" -diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc -index 751d0f87..63a9bf87 100644 ---- a/deps/v8/src/codegen/mips64/assembler-mips64.cc -+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc -@@ -997,7 +997,7 @@ void Assembler::next(Label* L, bool is_internal) { - } - - bool Assembler::is_near(Label* L) { -- DCHECK(L->is_bound()); -+ if (L == nullptr || !L->is_bound()) return true; - return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; - } - -diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h -index f70e46f8..c585840a 100644 ---- a/deps/v8/src/codegen/mips64/assembler-mips64.h -+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h -@@ -1864,6 +1864,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { - // instruction. We use this information to trigger different mode of - // branch instruction generation, where we use jump instructions rather - // than regular branch instructions. -+ // TODO can this be optimied?????? - bool trampoline_emitted_; - static constexpr int kInvalidSlotPos = -1; - -diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h -index 21a72330..6096413e 100644 ---- a/deps/v8/src/codegen/register-arch.h -+++ b/deps/v8/src/codegen/register-arch.h -@@ -22,6 +22,8 @@ - #include "src/codegen/mips/register-mips.h" - #elif V8_TARGET_ARCH_MIPS64 - #include "src/codegen/mips64/register-mips64.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/codegen/loong64/register-loong64.h" - #elif V8_TARGET_ARCH_S390 - #include "src/codegen/s390/register-s390.h" - #else -diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc -index 5752b463..50cad4e2 100644 ---- a/deps/v8/src/codegen/register-configuration.cc -+++ b/deps/v8/src/codegen/register-configuration.cc -@@ -58,6 +58,8 @@ static int get_num_allocatable_double_registers() { - kMaxAllocatableDoubleRegisterCount; - #elif V8_TARGET_ARCH_MIPS64 - kMaxAllocatableDoubleRegisterCount; -+#elif V8_TARGET_ARCH_LOONG64 -+ kMaxAllocatableDoubleRegisterCount; - #elif V8_TARGET_ARCH_PPC - kMaxAllocatableDoubleRegisterCount; - #elif V8_TARGET_ARCH_PPC64 -diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc -index 2e62c6f1..d60281eb 100644 ---- a/deps/v8/src/codegen/reloc-info.cc -+++ b/deps/v8/src/codegen/reloc-info.cc -@@ -330,7 +330,8 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { - return false; - #elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ - defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ -- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) -+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \ -+ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64) - return true; - #endif - } -diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h -index 70b17ab6..3347ef4c 100644 ---- a/deps/v8/src/common/globals.h -+++ b/deps/v8/src/common/globals.h -@@ -58,6 +58,9 @@ constexpr int GB = MB * 1024; - #if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390) - #define USE_SIMULATOR 1 - #endif -+#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64) -+#define USE_SIMULATOR 1 -+#endif - #endif - - // Determine whether the architecture uses an embedded constant pool -diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h -index 84d5d249..c71cb229 100644 ---- a/deps/v8/src/compiler/backend/instruction-codes.h -+++ b/deps/v8/src/compiler/backend/instruction-codes.h -@@ -17,6 +17,8 @@ - #include "src/compiler/backend/mips/instruction-codes-mips.h" - #elif V8_TARGET_ARCH_MIPS64 - #include "src/compiler/backend/mips64/instruction-codes-mips64.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/compiler/backend/loong64/instruction-codes-loong64.h" - #elif V8_TARGET_ARCH_X64 - #include "src/compiler/backend/x64/instruction-codes-x64.h" - #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 -diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc -index c2022b57..c8e1baf3 100644 ---- a/deps/v8/src/compiler/backend/instruction-selector.cc -+++ b/deps/v8/src/compiler/backend/instruction-selector.cc -@@ -2588,7 +2588,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { - #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS - - #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ -- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 -+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_LOONG64 - void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } - - void InstructionSelector::VisitWord64AtomicStore(Node* node) { -@@ -2613,7 +2613,8 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { - UNIMPLEMENTED(); - } - #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64 -- // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 -+ // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 && -+ // !V8_TARGET_ARCH_LOONG64 - - #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM - // This is only needed on 32-bit to split the 64-bit value into two operands. -@@ -2627,7 +2628,7 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) { - - #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X - #if !V8_TARGET_ARCH_ARM64 --#if !V8_TARGET_ARCH_MIPS64 -+#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 - void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } - void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } - void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } -diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc -new file mode 100644 -index 00000000..af7c9155 ---- /dev/null -+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc -@@ -0,0 +1,2844 @@ -+// Copyright 2014 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#include "src/codegen/assembler-inl.h" -+#include "src/codegen/callable.h" -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/codegen/optimized-compilation-info.h" -+#include "src/compiler/backend/code-generator-impl.h" -+#include "src/compiler/backend/code-generator.h" -+#include "src/compiler/backend/gap-resolver.h" -+#include "src/compiler/node-matchers.h" -+#include "src/compiler/osr.h" -+#include "src/heap/memory-chunk.h" -+#include "src/wasm/wasm-code-manager.h" -+ -+namespace v8 { -+namespace internal { -+namespace compiler { -+ -+#define __ tasm()-> -+ -+// TODO(plind): consider renaming these macros. -+#define TRACE_MSG(msg) \ -+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ -+ __LINE__) -+ -+#define TRACE_UNIMPL() \ -+ PrintF("UNIMPLEMENTED code_generator_loong64: %s at line %d\n", __FUNCTION__, \ -+ __LINE__) -+ -+// Adds Loong64-specific methods to convert InstructionOperands. -+class Loong64OperandConverter final : public InstructionOperandConverter { -+ public: -+ Loong64OperandConverter(CodeGenerator* gen, Instruction* instr) -+ : InstructionOperandConverter(gen, instr) {} -+ -+ FloatRegister OutputSingleRegister(size_t index = 0) { -+ return ToSingleRegister(instr_->OutputAt(index)); -+ } -+ -+ FloatRegister InputSingleRegister(size_t index) { -+ return ToSingleRegister(instr_->InputAt(index)); -+ } -+ -+ FloatRegister ToSingleRegister(InstructionOperand* op) { -+ // Single (Float) and Double register namespace is same on LOONG64, -+ // both are typedefs of FPURegister. -+ return ToDoubleRegister(op); -+ } -+ -+ Register InputOrZeroRegister(size_t index) { -+ if (instr_->InputAt(index)->IsImmediate()) { -+ DCHECK_EQ(0, InputInt32(index)); -+ return zero_reg; -+ } -+ return InputRegister(index); -+ } -+ -+ DoubleRegister InputOrZeroDoubleRegister(size_t index) { -+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; -+ -+ return InputDoubleRegister(index); -+ } -+ -+ DoubleRegister InputOrZeroSingleRegister(size_t index) { -+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; -+ -+ return InputSingleRegister(index); -+ } -+ -+ Operand InputImmediate(size_t index) { -+ Constant constant = ToConstant(instr_->InputAt(index)); -+ switch (constant.type()) { -+ case Constant::kInt32: -+ return Operand(constant.ToInt32()); -+ case Constant::kInt64: -+ return Operand(constant.ToInt64()); -+ case Constant::kFloat32: -+ return Operand::EmbeddedNumber(constant.ToFloat32()); -+ case Constant::kFloat64: -+ return Operand::EmbeddedNumber(constant.ToFloat64().value()); -+ case Constant::kExternalReference: -+ case Constant::kCompressedHeapObject: -+ case Constant::kHeapObject: -+ // TODO(plind): Maybe we should handle ExtRef & HeapObj here? -+ // maybe not done on arm due to const pool ?? -+ break; -+ case Constant::kDelayedStringConstant: -+ return Operand::EmbeddedStringConstant( -+ constant.ToDelayedStringConstant()); -+ case Constant::kRpoNumber: -+ UNREACHABLE(); // TODO(titzer): RPO immediates on loong64? -+ break; -+ } -+ UNREACHABLE(); -+ } -+ -+ Operand InputOperand(size_t index) { -+ InstructionOperand* op = instr_->InputAt(index); -+ if (op->IsRegister()) { -+ return Operand(ToRegister(op)); -+ } -+ return InputImmediate(index); -+ } -+ -+ MemOperand MemoryOperand(size_t* first_index) { -+ const size_t index = *first_index; -+ switch (AddressingModeField::decode(instr_->opcode())) { -+ case kMode_None: -+ break; -+ case kMode_MRI: -+ *first_index += 2; -+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); -+ case kMode_MRR: -+ *first_index += 2; -+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1)); -+ } -+ UNREACHABLE(); -+ } -+ -+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } -+ -+ MemOperand ToMemOperand(InstructionOperand* op) const { -+ DCHECK_NOT_NULL(op); -+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); -+ return SlotToMemOperand(AllocatedOperand::cast(op)->index()); -+ } -+ -+ MemOperand SlotToMemOperand(int slot) const { -+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot); -+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); -+ } -+}; -+ -+static inline bool HasRegisterInput(Instruction* instr, size_t index) { -+ return instr->InputAt(index)->IsRegister(); -+} -+ -+namespace { -+ -+class OutOfLineRecordWrite final : public OutOfLineCode { -+ public: -+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, -+ Register value, Register scratch0, Register scratch1, -+ RecordWriteMode mode, StubCallMode stub_mode) -+ : OutOfLineCode(gen), -+ object_(object), -+ index_(index), -+ value_(value), -+ scratch0_(scratch0), -+ scratch1_(scratch1), -+ mode_(mode), -+ stub_mode_(stub_mode), -+ must_save_lr_(!gen->frame_access_state()->has_frame()), -+ zone_(gen->zone()) {} -+ -+ void Generate() final { -+ if (mode_ > RecordWriteMode::kValueIsPointer) { -+ __ JumpIfSmi(value_, exit()); -+ } -+ __ CheckPageFlag(value_, scratch0_, -+ MemoryChunk::kPointersToHereAreInterestingMask, eq, -+ exit()); -+ __ Add_d(scratch1_, object_, index_); -+ RememberedSetAction const remembered_set_action = -+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET -+ : OMIT_REMEMBERED_SET; -+ SaveFPRegsMode const save_fp_mode = -+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; -+ if (must_save_lr_) { -+ // We need to save and restore ra if the frame was elided. -+ __ Push(ra); -+ } -+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { -+ __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); -+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { -+ // A direct call to a wasm runtime stub defined in this module. -+ // Just encode the stub index. This will be patched when the code -+ // is added to the native module and copied into wasm code space. -+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, -+ save_fp_mode, wasm::WasmCode::kRecordWrite); -+ } else { -+ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, -+ save_fp_mode); -+ } -+ if (must_save_lr_) { -+ __ Pop(ra); -+ } -+ } -+ -+ private: -+ Register const object_; -+ Register const index_; -+ Register const value_; -+ Register const scratch0_; -+ Register const scratch1_; -+ RecordWriteMode const mode_; -+ StubCallMode const stub_mode_; -+ bool must_save_lr_; -+ Zone* zone_; -+}; -+ -+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ -+ class ool_name final : public OutOfLineCode { \ -+ public: \ -+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ -+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ -+ \ -+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ -+ \ -+ private: \ -+ T const dst_; \ -+ T const src1_; \ -+ T const src2_; \ -+ } -+ -+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister); -+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister); -+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister); -+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister); -+ -+#undef CREATE_OOL_CLASS -+ -+Condition FlagsConditionToConditionCmp(FlagsCondition condition) { -+ switch (condition) { -+ case kEqual: -+ return eq; -+ case kNotEqual: -+ return ne; -+ case kSignedLessThan: -+ return lt; -+ case kSignedGreaterThanOrEqual: -+ return ge; -+ case kSignedLessThanOrEqual: -+ return le; -+ case kSignedGreaterThan: -+ return gt; -+ case kUnsignedLessThan: -+ return lo; -+ case kUnsignedGreaterThanOrEqual: -+ return hs; -+ case kUnsignedLessThanOrEqual: -+ return ls; -+ case kUnsignedGreaterThan: -+ return hi; -+ case kUnorderedEqual: -+ case kUnorderedNotEqual: -+ break; -+ default: -+ break; -+ } -+ UNREACHABLE(); -+} -+ -+Condition FlagsConditionToConditionTst(FlagsCondition condition) { -+ switch (condition) { -+ case kNotEqual: -+ return ne; -+ case kEqual: -+ return eq; -+ default: -+ break; -+ } -+ UNREACHABLE(); -+} -+ -+Condition FlagsConditionToConditionOvf(FlagsCondition condition) { -+ switch (condition) { -+ case kOverflow: -+ return ne; -+ case kNotOverflow: -+ return eq; -+ default: -+ break; -+ } -+ UNREACHABLE(); -+} -+ -+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, -+ FlagsCondition condition) { -+ switch (condition) { -+ case kEqual: -+ *predicate = true; -+ return CEQ; -+ case kNotEqual: -+ *predicate = false; -+ return CEQ; -+ case kUnsignedLessThan: -+ *predicate = true; -+ return CLT; -+ case kUnsignedGreaterThanOrEqual: -+ *predicate = false; -+ return CLT; -+ case kUnsignedLessThanOrEqual: -+ *predicate = true; -+ return CLE; -+ case kUnsignedGreaterThan: -+ *predicate = false; -+ return CLE; -+ case kUnorderedEqual: -+ case kUnorderedNotEqual: -+ *predicate = true; -+ break; -+ default: -+ *predicate = true; -+ break; -+ } -+ UNREACHABLE(); -+} -+ -+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, -+ InstructionCode opcode, Instruction* instr, -+ Loong64OperandConverter const& i) { -+ const MemoryAccessMode access_mode = -+ static_cast(MiscField::decode(opcode)); -+ if (access_mode == kMemoryAccessPoisoned) { -+ Register value = i.OutputRegister(); -+ codegen->tasm()->And(value, value, kSpeculationPoisonRegister); -+ } -+} -+ -+} // namespace -+ -+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ -+ do { \ -+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ -+ __ dbar(0); \ -+ } while (0) -+ -+// TODO remove second dbar? -+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ -+ do { \ -+ __ dbar(0); \ -+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ -+ __ dbar(0); \ -+ } while (0) -+ -+// only use for sub_w and sub_d -+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \ -+ do { \ -+ Label binop; \ -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ -+ __ dbar(0); \ -+ __ bind(&binop); \ -+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ -+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ -+ Operand(i.InputRegister(2))); \ -+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ -+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ -+ __ dbar(0); \ -+ } while (0) -+ -+// TODO remove second dbar? -+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \ -+ size, bin_instr, representation) \ -+ do { \ -+ Label binop; \ -+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ -+ if (representation == 32) { \ -+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \ -+ } else { \ -+ DCHECK_EQ(representation, 64); \ -+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \ -+ } \ -+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ -+ Operand(i.TempRegister(3))); \ -+ __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \ -+ __ dbar(0); \ -+ __ bind(&binop); \ -+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ -+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ -+ size, sign_extend); \ -+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ -+ Operand(i.InputRegister(2))); \ -+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ -+ size); \ -+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ -+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ -+ __ dbar(0); \ -+ } while (0) -+ -+// TODO remove second dbar? -+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \ -+ load_linked, store_conditional, sign_extend, size, representation) \ -+ do { \ -+ Label exchange; \ -+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ -+ if (representation == 32) { \ -+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ -+ } else { \ -+ DCHECK_EQ(representation, 64); \ -+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ -+ } \ -+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ -+ Operand(i.TempRegister(1))); \ -+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \ -+ __ dbar(0); \ -+ __ bind(&exchange); \ -+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ -+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ -+ size, sign_extend); \ -+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ -+ size); \ -+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ -+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ -+ __ dbar(0); \ -+ } while (0) -+ -+// TODO remove second dbar? -+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \ -+ store_conditional) \ -+ do { \ -+ Label compareExchange; \ -+ Label exit; \ -+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ -+ __ dbar(0); \ -+ __ bind(&compareExchange); \ -+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ -+ __ BranchShort(&exit, ne, i.InputRegister(2), \ -+ Operand(i.OutputRegister(0))); \ -+ __ mov(i.TempRegister(2), i.InputRegister(3)); \ -+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ -+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ -+ Operand(zero_reg)); \ -+ __ bind(&exit); \ -+ __ dbar(0); \ -+ } while (0) -+ -+// TODO remove second dbar? -+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \ -+ load_linked, store_conditional, sign_extend, size, representation) \ -+ do { \ -+ Label compareExchange; \ -+ Label exit; \ -+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ -+ if (representation == 32) { \ -+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ -+ } else { \ -+ DCHECK_EQ(representation, 64); \ -+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ -+ } \ -+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ -+ Operand(i.TempRegister(1))); \ -+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \ -+ __ dbar(0); \ -+ __ bind(&compareExchange); \ -+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ -+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ -+ size, sign_extend); \ -+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \ -+ size, sign_extend); \ -+ __ BranchShort(&exit, ne, i.InputRegister(2), \ -+ Operand(i.OutputRegister(0))); \ -+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ -+ size); \ -+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ -+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ -+ Operand(zero_reg)); \ -+ __ bind(&exit); \ -+ __ dbar(0); \ -+ } while (0) -+ -+#define ASSEMBLE_IEEE754_BINOP(name) \ -+ do { \ -+ FrameScope scope(tasm(), StackFrame::MANUAL); \ -+ __ PrepareCallCFunction(0, 2, kScratchReg); \ -+ __ MovToFloatParameters(i.InputDoubleRegister(0), \ -+ i.InputDoubleRegister(1)); \ -+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ -+ /* Move the result in the double result register. */ \ -+ __ MovFromFloatResult(i.OutputDoubleRegister()); \ -+ } while (0) -+ -+#define ASSEMBLE_IEEE754_UNOP(name) \ -+ do { \ -+ FrameScope scope(tasm(), StackFrame::MANUAL); \ -+ __ PrepareCallCFunction(0, 1, kScratchReg); \ -+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ -+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ -+ /* Move the result in the double result register. */ \ -+ __ MovFromFloatResult(i.OutputDoubleRegister()); \ -+ } while (0) -+ -+#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \ -+ do { \ -+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \ -+ i.InputSimd128Register(1)); \ -+ } while (0) -+ -+void CodeGenerator::AssembleDeconstructFrame() { -+ __ mov(sp, fp); -+ __ Pop(ra, fp); -+} -+ -+void CodeGenerator::AssemblePrepareTailCall() { -+ if (frame_access_state()->has_frame()) { -+ __ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); -+ __ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); -+ } -+ frame_access_state()->SetFrameAccessToSP(); -+} -+ -+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, -+ Register scratch1, -+ Register scratch2, -+ Register scratch3) { -+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); -+ Label done; -+ -+ // Check if current frame is an arguments adaptor frame. -+ __ Ld_d(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); -+ __ Branch(&done, ne, scratch3, -+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); -+ -+ // Load arguments count from current arguments adaptor frame (note, it -+ // does not include receiver). -+ Register caller_args_count_reg = scratch1; -+ __ Ld_d(caller_args_count_reg, -+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); -+ __ SmiUntag(caller_args_count_reg); -+ -+ __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3); -+ __ bind(&done); -+} -+ -+namespace { -+ -+void AdjustStackPointerForTailCall(TurboAssembler* tasm, -+ FrameAccessState* state, -+ int new_slot_above_sp, -+ bool allow_shrinkage = true) { -+ int current_sp_offset = state->GetSPToFPSlotCount() + -+ StandardFrameConstants::kFixedSlotCountAboveFp; -+ int stack_slot_delta = new_slot_above_sp - current_sp_offset; -+ if (stack_slot_delta > 0) { -+ tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); -+ state->IncreaseSPDelta(stack_slot_delta); -+ } else if (allow_shrinkage && stack_slot_delta < 0) { -+ tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); -+ state->IncreaseSPDelta(stack_slot_delta); -+ } -+} -+ -+} // namespace -+ -+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, -+ int first_unused_stack_slot) { -+ AdjustStackPointerForTailCall(tasm(), frame_access_state(), -+ first_unused_stack_slot, false); -+} -+ -+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, -+ int first_unused_stack_slot) { -+ AdjustStackPointerForTailCall(tasm(), frame_access_state(), -+ first_unused_stack_slot); -+} -+ -+// Check that {kJavaScriptCallCodeStartRegister} is correct. -+void CodeGenerator::AssembleCodeStartRegisterCheck() { -+ __ ComputeCodeStartAddress(kScratchReg); -+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart, -+ kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); -+} -+ -+// Check if the code object is marked for deoptimization. If it is, then it -+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need -+// to: -+// 1. read from memory the word that contains that bit, which can be found in -+// the flags in the referenced {CodeDataContainer} object; -+// 2. test kMarkedForDeoptimizationBit in those flags; and -+// 3. if it is not zero then it jumps to the builtin. -+void CodeGenerator::BailoutIfDeoptimized() { -+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; -+ __ Ld_d(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); -+ __ Ld_w(kScratchReg, -+ FieldMemOperand(kScratchReg, -+ CodeDataContainer::kKindSpecificFlagsOffset)); -+ __ And(kScratchReg, kScratchReg, -+ Operand(1 << Code::kMarkedForDeoptimizationBit)); -+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), -+ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); -+} -+ -+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { -+ // Calculate a mask which has all bits set in the normal case, but has all -+ // bits cleared if we are speculatively executing the wrong PC. -+ __ li(kSpeculationPoisonRegister, -1); -+ __ ComputeCodeStartAddress(kScratchReg); -+ __ sub_d(kScratchReg, kScratchReg, kJavaScriptCallCodeStartRegister); -+ __ maskeqz(kSpeculationPoisonRegister, kSpeculationPoisonRegister, -+ kScratchReg); -+} -+ -+void CodeGenerator::AssembleRegisterArgumentPoisoning() { -+ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); -+ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); -+ __ And(sp, sp, kSpeculationPoisonRegister); -+} -+ -+// Assembles an instruction after register allocation, producing machine code. -+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( -+ Instruction* instr) { -+ Loong64OperandConverter i(this, instr); -+ InstructionCode opcode = instr->opcode(); -+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); -+ switch (arch_opcode) { -+ case kArchCallCodeObject: { -+ if (instr->InputAt(0)->IsImmediate()) { -+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); -+ } else { -+ Register reg = i.InputRegister(0); -+ DCHECK_IMPLIES( -+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), -+ reg == kJavaScriptCallCodeStartRegister); -+ __ CallCodeObject(reg); -+ } -+ RecordCallPosition(instr); -+ frame_access_state()->ClearSPDelta(); -+ break; -+ } -+ case kArchCallBuiltinPointer: { -+ DCHECK(!instr->InputAt(0)->IsImmediate()); -+ Register builtin_index = i.InputRegister(0); -+ __ CallBuiltinByIndex(builtin_index); -+ RecordCallPosition(instr); -+ frame_access_state()->ClearSPDelta(); -+ break; -+ } -+ case kArchCallWasmFunction: { -+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { -+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, -+ i.TempRegister(0), i.TempRegister(1), -+ i.TempRegister(2)); -+ } -+ if (instr->InputAt(0)->IsImmediate()) { -+ Constant constant = i.ToConstant(instr->InputAt(0)); -+ Address wasm_code = static_cast

(constant.ToInt64()); -+ __ Call(wasm_code, constant.rmode()); -+ } else { -+ __ addi_d(kScratchReg, i.InputRegister(0), 0); -+ __ Call(kScratchReg); -+ } -+ RecordCallPosition(instr); -+ frame_access_state()->ClearSPDelta(); -+ break; -+ } -+ case kArchTailCallCodeObjectFromJSFunction: -+ case kArchTailCallCodeObject: { -+ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { -+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, -+ i.TempRegister(0), i.TempRegister(1), -+ i.TempRegister(2)); -+ } -+ if (instr->InputAt(0)->IsImmediate()) { -+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); -+ } else { -+ Register reg = i.InputRegister(0); -+ DCHECK_IMPLIES( -+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), -+ reg == kJavaScriptCallCodeStartRegister); -+ __ JumpCodeObject(reg); -+ } -+ frame_access_state()->ClearSPDelta(); -+ frame_access_state()->SetFrameAccessToDefault(); -+ break; -+ } -+ case kArchTailCallWasm: { -+ if (instr->InputAt(0)->IsImmediate()) { -+ Constant constant = i.ToConstant(instr->InputAt(0)); -+ Address wasm_code = static_cast
(constant.ToInt64()); -+ __ Jump(wasm_code, constant.rmode()); -+ } else { -+ __ addi_d(kScratchReg, i.InputRegister(0), 0); -+ __ Jump(kScratchReg); -+ } -+ frame_access_state()->ClearSPDelta(); -+ frame_access_state()->SetFrameAccessToDefault(); -+ break; -+ } -+ case kArchTailCallAddress: { -+ CHECK(!instr->InputAt(0)->IsImmediate()); -+ Register reg = i.InputRegister(0); -+ DCHECK_IMPLIES( -+ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), -+ reg == kJavaScriptCallCodeStartRegister); -+ __ Jump(reg); -+ frame_access_state()->ClearSPDelta(); -+ frame_access_state()->SetFrameAccessToDefault(); -+ break; -+ } -+ case kArchCallJSFunction: { -+ Register func = i.InputRegister(0); -+ if (FLAG_debug_code) { -+ // Check the function's context matches the context argument. -+ __ Ld_d(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); -+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, -+ Operand(kScratchReg)); -+ } -+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); -+ __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); -+ __ CallCodeObject(a2); -+ RecordCallPosition(instr); -+ frame_access_state()->ClearSPDelta(); -+ break; -+ } -+ case kArchPrepareCallCFunction: { -+ int const num_parameters = MiscField::decode(instr->opcode()); -+ __ PrepareCallCFunction(num_parameters, kScratchReg); -+ // Frame alignment requires using FP-relative frame addressing. -+ frame_access_state()->SetFrameAccessToFP(); -+ break; -+ } -+ case kArchSaveCallerRegisters: { -+ fp_mode_ = -+ static_cast(MiscField::decode(instr->opcode())); -+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); -+ // kReturnRegister0 should have been saved before entering the stub. -+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); -+ DCHECK(IsAligned(bytes, kSystemPointerSize)); -+ DCHECK_EQ(0, frame_access_state()->sp_delta()); -+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); -+ DCHECK(!caller_registers_saved_); -+ caller_registers_saved_ = true; -+ break; -+ } -+ case kArchRestoreCallerRegisters: { -+ DCHECK(fp_mode_ == -+ static_cast(MiscField::decode(instr->opcode()))); -+ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); -+ // Don't overwrite the returned value. -+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); -+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); -+ DCHECK_EQ(0, frame_access_state()->sp_delta()); -+ DCHECK(caller_registers_saved_); -+ caller_registers_saved_ = false; -+ break; -+ } -+ case kArchPrepareTailCall: -+ AssemblePrepareTailCall(); -+ break; -+ case kArchCallCFunction: { -+ int const num_parameters = MiscField::decode(instr->opcode()); -+ Label start_call; -+ bool isWasmCapiFunction = -+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); -+ // from start_call to return address. -+ int offset = __ root_array_available() ? 44 : 80; // 11 or 20 instrs -+#if V8_HOST_ARCH_LOONG64 -+ if (__ emit_debug_code()) { -+ offset += 12; // see CallCFunction -+ } -+#endif -+ if (isWasmCapiFunction) { -+ // Put the return address in a stack slot. -+ // __ mov(kScratchReg, ra); -+ __ bind(&start_call); -+ __ pcaddi(t7, -4); // __ nal(); -+ //__ nop(); -+ //__ Daddu(ra, ra, offset - 8); // 8 = nop + nal -+ __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); -+ // __ mov(ra, kScratchReg); -+ } -+ if (instr->InputAt(0)->IsImmediate()) { -+ ExternalReference ref = i.InputExternalReference(0); -+ __ CallCFunction(ref, num_parameters); -+ } else { -+ Register func = i.InputRegister(0); -+ __ CallCFunction(func, num_parameters); -+ } -+ if (isWasmCapiFunction) { -+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); -+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); -+ } -+ -+ frame_access_state()->SetFrameAccessToDefault(); -+ // Ideally, we should decrement SP delta to match the change of stack -+ // pointer in CallCFunction. However, for certain architectures (e.g. -+ // ARM), there may be more strict alignment requirement, causing old SP -+ // to be saved on the stack. In those cases, we can not calculate the SP -+ // delta statically. -+ frame_access_state()->ClearSPDelta(); -+ if (caller_registers_saved_) { -+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. -+ // Here, we assume the sequence to be: -+ // kArchSaveCallerRegisters; -+ // kArchCallCFunction; -+ // kArchRestoreCallerRegisters; -+ int bytes = -+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); -+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); -+ } -+ break; -+ } -+ case kArchJmp: -+ AssembleArchJump(i.InputRpo(0)); -+ break; -+ case kArchBinarySearchSwitch: -+ AssembleArchBinarySearchSwitch(instr); -+ break; -+ break; -+ case kArchTableSwitch: -+ AssembleArchTableSwitch(instr); -+ break; -+ case kArchAbortCSAAssert: -+ DCHECK(i.InputRegister(0) == a0); -+ { -+ // We don't actually want to generate a pile of code for this, so just -+ // claim there is a stack frame, without generating one. -+ FrameScope scope(tasm(), StackFrame::NONE); -+ __ Call( -+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), -+ RelocInfo::CODE_TARGET); -+ } -+ __ stop(); -+ break; -+ case kArchDebugBreak: -+ __ DebugBreak(); -+ break; -+ case kArchComment: -+ __ RecordComment(reinterpret_cast(i.InputInt64(0))); -+ break; -+ case kArchNop: -+ case kArchThrowTerminator: -+ // don't emit code for nops. -+ break; -+ case kArchDeoptimize: { -+ DeoptimizationExit* exit = -+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); -+ CodeGenResult result = AssembleDeoptimizerCall(exit); -+ if (result != kSuccess) return result; -+ break; -+ } -+ case kArchRet: -+ AssembleReturn(instr->InputAt(0)); -+ break; -+ case kArchStackPointerGreaterThan: -+ // Pseudo-instruction used for cmp/branch. No opcode emitted here. -+ break; -+ case kArchStackCheckOffset: -+ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); -+ break; -+ case kArchFramePointer: -+ __ mov(i.OutputRegister(), fp); -+ break; -+ case kArchParentFramePointer: -+ if (frame_access_state()->has_frame()) { -+ __ Ld_d(i.OutputRegister(), MemOperand(fp, 0)); -+ } else { -+ __ mov(i.OutputRegister(), fp); -+ } -+ break; -+ case kArchTruncateDoubleToI: -+ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), -+ i.InputDoubleRegister(0), DetermineStubCallMode()); -+ break; -+ case kArchStoreWithWriteBarrier: { -+ RecordWriteMode mode = -+ static_cast(MiscField::decode(instr->opcode())); -+ Register object = i.InputRegister(0); -+ Register index = i.InputRegister(1); -+ Register value = i.InputRegister(2); -+ Register scratch0 = i.TempRegister(0); -+ Register scratch1 = i.TempRegister(1); -+ auto ool = new (zone()) -+ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1, -+ mode, DetermineStubCallMode()); -+ __ Add_d(kScratchReg, object, index); -+ __ St_d(value, MemOperand(kScratchReg, 0)); -+ __ CheckPageFlag(object, scratch0, -+ MemoryChunk::kPointersFromHereAreInterestingMask, ne, -+ ool->entry()); -+ __ bind(ool->exit()); -+ break; -+ } -+ case kArchStackSlot: { -+ FrameOffset offset = -+ frame_access_state()->GetFrameOffset(i.InputInt32(0)); -+ Register base_reg = offset.from_stack_pointer() ? sp : fp; -+ __ Add_d(i.OutputRegister(), base_reg, Operand(offset.offset())); -+ int alignment = i.InputInt32(1); -+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || -+ alignment == 16); -+ if (FLAG_debug_code && alignment > 0) { -+ // Verify that the output_register is properly aligned -+ __ And(kScratchReg, i.OutputRegister(), -+ Operand(kSystemPointerSize - 1)); -+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, -+ Operand(zero_reg)); -+ } -+ if (alignment == 2 * kSystemPointerSize) { -+ Label done; -+ __ Add_d(kScratchReg, base_reg, Operand(offset.offset())); -+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); -+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); -+ __ Add_d(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize); -+ __ bind(&done); -+ } else if (alignment > 2 * kSystemPointerSize) { -+ Label done; -+ __ Add_d(kScratchReg, base_reg, Operand(offset.offset())); -+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); -+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); -+ __ li(kScratchReg2, alignment); -+ __ Sub_d(kScratchReg2, kScratchReg2, Operand(kScratchReg)); -+ __ Add_d(i.OutputRegister(), i.OutputRegister(), kScratchReg2); -+ __ bind(&done); -+ } -+ -+ break; -+ } -+ case kArchWordPoisonOnSpeculation: -+ __ And(i.OutputRegister(), i.InputRegister(0), -+ kSpeculationPoisonRegister); -+ break; -+ case kIeee754Float64Acos: -+ ASSEMBLE_IEEE754_UNOP(acos); -+ break; -+ case kIeee754Float64Acosh: -+ ASSEMBLE_IEEE754_UNOP(acosh); -+ break; -+ case kIeee754Float64Asin: -+ ASSEMBLE_IEEE754_UNOP(asin); -+ break; -+ case kIeee754Float64Asinh: -+ ASSEMBLE_IEEE754_UNOP(asinh); -+ break; -+ case kIeee754Float64Atan: -+ ASSEMBLE_IEEE754_UNOP(atan); -+ break; -+ case kIeee754Float64Atanh: -+ ASSEMBLE_IEEE754_UNOP(atanh); -+ break; -+ case kIeee754Float64Atan2: -+ ASSEMBLE_IEEE754_BINOP(atan2); -+ break; -+ case kIeee754Float64Cos: -+ ASSEMBLE_IEEE754_UNOP(cos); -+ break; -+ case kIeee754Float64Cosh: -+ ASSEMBLE_IEEE754_UNOP(cosh); -+ break; -+ case kIeee754Float64Cbrt: -+ ASSEMBLE_IEEE754_UNOP(cbrt); -+ break; -+ case kIeee754Float64Exp: -+ ASSEMBLE_IEEE754_UNOP(exp); -+ break; -+ case kIeee754Float64Expm1: -+ ASSEMBLE_IEEE754_UNOP(expm1); -+ break; -+ case kIeee754Float64Log: -+ ASSEMBLE_IEEE754_UNOP(log); -+ break; -+ case kIeee754Float64Log1p: -+ ASSEMBLE_IEEE754_UNOP(log1p); -+ break; -+ case kIeee754Float64Log2: -+ ASSEMBLE_IEEE754_UNOP(log2); -+ break; -+ case kIeee754Float64Log10: -+ ASSEMBLE_IEEE754_UNOP(log10); -+ break; -+ case kIeee754Float64Pow: -+ ASSEMBLE_IEEE754_BINOP(pow); -+ break; -+ case kIeee754Float64Sin: -+ ASSEMBLE_IEEE754_UNOP(sin); -+ break; -+ case kIeee754Float64Sinh: -+ ASSEMBLE_IEEE754_UNOP(sinh); -+ break; -+ case kIeee754Float64Tan: -+ ASSEMBLE_IEEE754_UNOP(tan); -+ break; -+ case kIeee754Float64Tanh: -+ ASSEMBLE_IEEE754_UNOP(tanh); -+ break; -+ case kLoong64Add: -+ __ Add_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Dadd: -+ __ Add_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64DaddOvf: -+ __ AdddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), -+ kScratchReg); -+ break; -+ case kLoong64Sub: -+ __ Sub_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Dsub: -+ __ Sub_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64DsubOvf: -+ __ SubdOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), -+ kScratchReg); -+ break; -+ case kLoong64Mul: -+ __ Mul_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64MulOvf: -+ __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), -+ kScratchReg); -+ break; -+ case kLoong64MulHigh: -+ __ Mulh_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64MulHighU: -+ __ Mulh_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64DMulHigh: -+ __ Mulh_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Div: -+ __ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ break; -+ case kLoong64DivU: -+ __ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ break; -+ case kLoong64Mod: -+ __ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64ModU: -+ __ Mod_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Dmul: -+ __ Mul_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Ddiv: -+ __ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ break; -+ case kLoong64DdivU: -+ __ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ break; -+ case kLoong64Dmod: -+ __ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64DmodU: -+ __ Mod_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Dlsa: -+ DCHECK(instr->InputAt(2)->IsImmediate()); -+ __ Alsl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), -+ i.InputInt8(2), t7); -+ break; -+ case kLoong64Lsa: -+ DCHECK(instr->InputAt(2)->IsImmediate()); -+ __ Alsl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), -+ i.InputInt8(2), t7); -+ break; -+ case kLoong64And: -+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64And32: -+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); -+ break; -+ case kLoong64Or: -+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Or32: -+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); -+ break; -+ case kLoong64Nor: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ } else { -+ DCHECK_EQ(0, i.InputOperand(1).immediate()); -+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); -+ } -+ break; -+ case kLoong64Nor32: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); -+ } else { -+ DCHECK_EQ(0, i.InputOperand(1).immediate()); -+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); -+ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); -+ } -+ break; -+ case kLoong64Xor: -+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Xor32: -+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); -+ break; -+ case kLoong64Clz: -+ __ Clz_w(i.OutputRegister(), i.InputRegister(0)); -+ break; -+ case kLoong64Dclz: -+ __ clz_d(i.OutputRegister(), i.InputRegister(0)); -+ break; -+ case kLoong64Ctz: { -+ Register src = i.InputRegister(0); -+ Register dst = i.OutputRegister(); -+ __ Ctz_w(dst, src); -+ } break; -+ case kLoong64Dctz: { -+ Register src = i.InputRegister(0); -+ Register dst = i.OutputRegister(); -+ __ Ctz_d(dst, src); -+ } break; -+ case kLoong64Popcnt: { -+ Register src = i.InputRegister(0); -+ Register dst = i.OutputRegister(); -+ __ Popcnt_w(dst, src); -+ } break; -+ case kLoong64Dpopcnt: { -+ Register src = i.InputRegister(0); -+ Register dst = i.OutputRegister(); -+ __ Popcnt_d(dst, src); -+ } break; -+ case kLoong64Shl: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ sll_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ } else { -+ int64_t imm = i.InputOperand(1).immediate(); -+ __ slli_w(i.OutputRegister(), i.InputRegister(0), -+ static_cast(imm)); -+ } -+ break; -+ case kLoong64Shr: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ slli_w(i.InputRegister(0), i.InputRegister(0), 0x0); -+ __ srl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ } else { -+ int64_t imm = i.InputOperand(1).immediate(); -+ __ slli_w(i.OutputRegister(), i.InputRegister(0), 0x0); -+ __ srli_w(i.OutputRegister(), i.OutputRegister(), -+ static_cast(imm)); -+ } -+ break; -+ case kLoong64Sar: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ slli_w(i.InputRegister(0), i.InputRegister(0), 0x0); -+ __ sra_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ } else { -+ int64_t imm = i.InputOperand(1).immediate(); -+ __ slli_w(i.OutputRegister(), i.InputRegister(0), 0x0); -+ __ srai_w(i.OutputRegister(), i.OutputRegister(), -+ static_cast(imm)); -+ } -+ break; -+ case kLoong64Ext: -+ __ bstrpick_w(i.OutputRegister(), i.InputRegister(0), -+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); -+ break; -+ case kLoong64Ins: -+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { -+ __ bstrins_w(i.OutputRegister(), zero_reg, -+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); -+ } else { -+ __ bstrins_w(i.OutputRegister(), i.InputRegister(0), -+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); -+ } -+ break; -+ case kLoong64Dext: { -+ __ bstrpick_d(i.OutputRegister(), i.InputRegister(0), -+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); -+ break; -+ } -+ case kLoong64Dins: -+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { -+ __ bstrins_d(i.OutputRegister(), zero_reg, -+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); -+ } else { -+ __ bstrins_d(i.OutputRegister(), i.InputRegister(0), -+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); -+ } -+ break; -+ case kLoong64Dshl: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ sll_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ } else { -+ int64_t imm = i.InputOperand(1).immediate(); -+ __ slli_d(i.OutputRegister(), i.InputRegister(0), -+ static_cast(imm)); -+ } -+ break; -+ case kLoong64Dshr: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ srl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ } else { -+ int64_t imm = i.InputOperand(1).immediate(); -+ __ srli_d(i.OutputRegister(), i.InputRegister(0), -+ static_cast(imm)); -+ } -+ break; -+ case kLoong64Dsar: -+ if (instr->InputAt(1)->IsRegister()) { -+ __ sra_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); -+ } else { -+ int64_t imm = i.InputOperand(1).immediate(); -+ __ srai_d(i.OutputRegister(), i.InputRegister(0), imm); -+ } -+ break; -+ case kLoong64Ror: -+ __ Rotr_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Dror: -+ __ Rotr_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); -+ break; -+ case kLoong64Tst: -+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); -+ // Pseudo-instruction used for cmp/branch. No opcode emitted here. -+ break; -+ case kLoong64Cmp: -+ // Pseudo-instruction used for cmp/branch. No opcode emitted here. -+ break; -+ case kLoong64Mov: -+ // TODO(plind): Should we combine mov/li like this, or use separate instr? -+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType -+ if (HasRegisterInput(instr, 0)) { -+ __ mov(i.OutputRegister(), i.InputRegister(0)); -+ } else { -+ __ li(i.OutputRegister(), i.InputOperand(0)); -+ } -+ break; -+ -+ case kLoong64CmpS: { -+ FPURegister left = i.InputOrZeroSingleRegister(0); -+ FPURegister right = i.InputOrZeroSingleRegister(1); -+ bool predicate; -+ FPUCondition cc = -+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); -+ -+ if ((left == kDoubleRegZero || right == kDoubleRegZero) && -+ !__ IsDoubleZeroRegSet()) { -+ __ Move(kDoubleRegZero, 0.0); -+ } -+ -+ __ CompareF32(left, right, cc); -+ } break; -+ case kLoong64AddS: -+ // TODO(plind): add special case: combine mult & add. -+ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64SubS: -+ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64MulS: -+ // TODO(plind): add special case: right op is -1.0, see arm port. -+ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64DivS: -+ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64ModS: { -+ // TODO(bmeurer): We should really get rid of this special instruction, -+ // and generate a CallAddress instruction instead. -+ FrameScope scope(tasm(), StackFrame::MANUAL); -+ __ PrepareCallCFunction(0, 2, kScratchReg); -+ __ MovToFloatParameters(i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) -+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); -+ // Move the result in the double result register. -+ __ MovFromFloatResult(i.OutputSingleRegister()); -+ break; -+ } -+ case kLoong64AbsS: -+ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); -+ break; -+ case kLoong64NegS: -+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); -+ break; -+ case kLoong64SqrtS: { -+ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ } -+ case kLoong64MaxS: -+ __ fmax_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64MinS: -+ __ fmin_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64CmpD: { -+ FPURegister left = i.InputOrZeroDoubleRegister(0); -+ FPURegister right = i.InputOrZeroDoubleRegister(1); -+ bool predicate; -+ FPUCondition cc = -+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); -+ if ((left == kDoubleRegZero || right == kDoubleRegZero) && -+ !__ IsDoubleZeroRegSet()) { -+ __ Move(kDoubleRegZero, 0.0); -+ } -+ -+ __ CompareF64(left, right, cc); -+ } break; -+ case kLoong64AddD: -+ // TODO(plind): add special case: combine mult & add. -+ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64SubD: -+ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64MulD: -+ // TODO(plind): add special case: right op is -1.0, see arm port. -+ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64DivD: -+ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64ModD: { -+ // TODO(bmeurer): We should really get rid of this special instruction, -+ // and generate a CallAddress instruction instead. -+ FrameScope scope(tasm(), StackFrame::MANUAL); -+ __ PrepareCallCFunction(0, 2, kScratchReg); -+ __ MovToFloatParameters(i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); -+ // Move the result in the double result register. -+ __ MovFromFloatResult(i.OutputDoubleRegister()); -+ break; -+ } -+ case kLoong64AbsD: -+ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ case kLoong64NegD: -+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ case kLoong64SqrtD: { -+ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ } -+ case kLoong64MaxD: -+ __ fmax_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64MinD: -+ __ fmin_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), -+ i.InputDoubleRegister(1)); -+ break; -+ case kLoong64Float64RoundDown: { -+ __ Floor_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ } -+ case kLoong64Float32RoundDown: { -+ __ Floor_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); -+ break; -+ } -+ case kLoong64Float64RoundTruncate: { -+ __ Trunc_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ } -+ case kLoong64Float32RoundTruncate: { -+ __ Trunc_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); -+ break; -+ } -+ case kLoong64Float64RoundUp: { -+ __ Ceil_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ } -+ case kLoong64Float32RoundUp: { -+ __ Ceil_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); -+ break; -+ } -+ case kLoong64Float64RoundTiesEven: { -+ __ Round_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ } -+ case kLoong64Float32RoundTiesEven: { -+ __ Round_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); -+ break; -+ } -+ case kLoong64Float32Max: { -+ FPURegister dst = i.OutputSingleRegister(); -+ FPURegister src1 = i.InputSingleRegister(0); -+ FPURegister src2 = i.InputSingleRegister(1); -+ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2); -+ __ Float32Max(dst, src1, src2, ool->entry()); -+ __ bind(ool->exit()); -+ break; -+ } -+ case kLoong64Float64Max: { -+ FPURegister dst = i.OutputDoubleRegister(); -+ FPURegister src1 = i.InputDoubleRegister(0); -+ FPURegister src2 = i.InputDoubleRegister(1); -+ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2); -+ __ Float64Max(dst, src1, src2, ool->entry()); -+ __ bind(ool->exit()); -+ break; -+ } -+ case kLoong64Float32Min: { -+ FPURegister dst = i.OutputSingleRegister(); -+ FPURegister src1 = i.InputSingleRegister(0); -+ FPURegister src2 = i.InputSingleRegister(1); -+ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2); -+ __ Float32Min(dst, src1, src2, ool->entry()); -+ __ bind(ool->exit()); -+ break; -+ } -+ case kLoong64Float64Min: { -+ FPURegister dst = i.OutputDoubleRegister(); -+ FPURegister src1 = i.InputDoubleRegister(0); -+ FPURegister src2 = i.InputDoubleRegister(1); -+ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2); -+ __ Float64Min(dst, src1, src2, ool->entry()); -+ __ bind(ool->exit()); -+ break; -+ } -+ case kLoong64Float64SilenceNaN: -+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); -+ break; -+ case kLoong64CvtSD: -+ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); -+ break; -+ case kLoong64CvtDS: -+ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); -+ break; -+ case kLoong64CvtDW: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ movgr2fr_w(scratch, i.InputRegister(0)); -+ __ ffint_d_w(i.OutputDoubleRegister(), scratch); -+ break; -+ } -+ case kLoong64CvtSW: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ movgr2fr_w(scratch, i.InputRegister(0)); -+ __ ffint_s_w(i.OutputDoubleRegister(), scratch); -+ break; -+ } -+ case kLoong64CvtSUw: { -+ __ Ffint_s_uw(i.OutputDoubleRegister(), i.InputRegister(0)); -+ break; -+ } -+ case kLoong64CvtSL: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ movgr2fr_d(scratch, i.InputRegister(0)); -+ __ ffint_s_l(i.OutputDoubleRegister(), scratch); -+ break; -+ } -+ case kLoong64CvtDL: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ movgr2fr_d(scratch, i.InputRegister(0)); -+ __ ffint_d_l(i.OutputDoubleRegister(), scratch); -+ break; -+ } -+ case kLoong64CvtDUw: { -+ __ Ffint_d_uw(i.OutputDoubleRegister(), i.InputRegister(0)); -+ break; -+ } -+ case kLoong64CvtDUl: { -+ __ Ffint_d_ul(i.OutputDoubleRegister(), i.InputRegister(0)); -+ break; -+ } -+ case kLoong64CvtSUl: { -+ __ Ffint_s_ul(i.OutputDoubleRegister(), i.InputRegister(0)); -+ break; -+ } -+ case kLoong64FloorWD: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ ftintrm_w_d(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ break; -+ } -+ case kLoong64CeilWD: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ ftintrp_w_d(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ break; -+ } -+ case kLoong64RoundWD: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ ftintrne_w_d(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ break; -+ } -+ case kLoong64TruncWD: { -+ FPURegister scratch = kScratchDoubleReg; -+ // Other arches use round to zero here, so we follow. -+ __ ftintrz_w_d(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ break; -+ } -+ case kLoong64FloorWS: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ ftintrm_w_s(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ break; -+ } -+ case kLoong64CeilWS: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ ftintrp_w_s(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ break; -+ } -+ case kLoong64RoundWS: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ ftintrne_w_s(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ break; -+ } -+ case kLoong64TruncWS: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ ftintrz_w_s(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_s(i.OutputRegister(), scratch); -+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, -+ // because INT32_MIN allows easier out-of-bounds detection. -+ __ addi_w(kScratchReg, i.OutputRegister(), 1); -+ __ slt(kScratchReg2, kScratchReg, i.OutputRegister()); -+ __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2); -+ break; -+ } -+ case kLoong64TruncLS: { -+ FPURegister scratch = kScratchDoubleReg; -+ Register tmp_fcsr = kScratchReg; -+ Register result = kScratchReg2; -+ -+ bool load_status = instr->OutputCount() > 1; -+ if (load_status) { -+ // Save FCSR. -+ __ movfcsr2gr(tmp_fcsr); // __ cfc1(tmp_fcsr, FCSR); -+ // Clear FPU flags. -+ __ movgr2fcsr(zero_reg); // __ ctc1(zero_reg, FCSR); -+ } -+ // Other arches use round to zero here, so we follow. -+ __ ftintrz_l_s(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_d(i.OutputRegister(), scratch); -+ if (load_status) { -+ __ movfcsr2gr(result); // __ cfc1(result, FCSR); -+ // Check for overflow and NaNs. -+ __ And(result, result, -+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); -+ __ Slt(result, zero_reg, result); -+ __ xori(result, result, 1); -+ __ mov(i.OutputRegister(1), result); -+ // Restore FCSR -+ __ movgr2fcsr(tmp_fcsr); // __ ctc1(tmp_fcsr, FCSR); -+ } -+ break; -+ } -+ case kLoong64TruncLD: { -+ FPURegister scratch = kScratchDoubleReg; -+ Register tmp_fcsr = kScratchReg; -+ Register result = kScratchReg2; -+ -+ bool load_status = instr->OutputCount() > 1; -+ if (load_status) { -+ // Save FCSR. -+ __ movfcsr2gr(tmp_fcsr); // __ cfc1(tmp_fcsr, FCSR); -+ // Clear FPU flags. -+ __ movgr2fcsr(zero_reg); // __ ctc1(zero_reg, FCSR); -+ } -+ // Other arches use round to zero here, so we follow. -+ __ ftintrz_l_d(scratch, i.InputDoubleRegister(0)); -+ __ movfr2gr_d(i.OutputRegister(0), scratch); -+ if (load_status) { -+ __ movfcsr2gr(result); // __ cfc1(result, FCSR); -+ // Check for overflow and NaNs. -+ __ And(result, result, -+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); -+ __ Slt(result, zero_reg, result); -+ __ xori(result, result, 1); -+ __ mov(i.OutputRegister(1), result); -+ // Restore FCSR -+ __ movgr2fcsr(tmp_fcsr); // __ ctc1(tmp_fcsr, FCSR); -+ } -+ break; -+ } -+ case kLoong64TruncUwD: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ Ftintrz_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch); -+ break; -+ } -+ case kLoong64TruncUwS: { -+ FPURegister scratch = kScratchDoubleReg; -+ __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); -+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, -+ // because 0 allows easier out-of-bounds detection. -+ __ addi_w(kScratchReg, i.OutputRegister(), 1); -+ __ Movz(i.OutputRegister(), zero_reg, kScratchReg); -+ break; -+ } -+ case kLoong64TruncUlS: { -+ FPURegister scratch = kScratchDoubleReg; -+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; -+ __ Ftintrz_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch, -+ result); -+ break; -+ } -+ case kLoong64TruncUlD: { -+ FPURegister scratch = kScratchDoubleReg; -+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; -+ __ Ftintrz_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch, -+ result); -+ break; -+ } -+ case kLoong64BitcastDL: -+ __ movfr2gr_d(i.OutputRegister(), i.InputDoubleRegister(0)); -+ break; -+ case kLoong64BitcastLD: -+ __ movgr2fr_d(i.OutputDoubleRegister(), i.InputRegister(0)); -+ break; -+ case kLoong64Float64ExtractLowWord32: -+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); -+ break; -+ case kLoong64Float64ExtractHighWord32: -+ __ movfrh2gr_s(i.OutputRegister(), i.InputDoubleRegister(0)); -+ break; -+ case kLoong64Float64InsertLowWord32: -+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); -+ break; -+ case kLoong64Float64InsertHighWord32: -+ __ movgr2frh_w(i.OutputDoubleRegister(), i.InputRegister(1)); -+ break; -+ // ... more basic instructions ... -+ -+ case kLoong64Seb: -+ __ ext_w_b(i.OutputRegister(), i.InputRegister(0)); -+ break; -+ case kLoong64Seh: -+ __ ext_w_h(i.OutputRegister(), i.InputRegister(0)); -+ break; -+ case kLoong64Lbu: -+ __ Ld_bu(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Lb: -+ __ Ld_b(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Sb: -+ __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand()); -+ break; -+ case kLoong64Lhu: -+ __ Ld_hu(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Ulhu: -+ __ Ld_hu(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Lh: -+ __ Ld_h(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Ulh: -+ __ Ld_h(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Sh: -+ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand()); -+ break; -+ case kLoong64Ush: -+ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand()); -+ break; -+ case kLoong64Lw: -+ __ Ld_w(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Ulw: -+ __ Ld_w(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Lwu: -+ __ Ld_wu(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Ulwu: -+ __ Ld_wu(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Ld: -+ __ Ld_d(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Uld: -+ __ Ld_d(i.OutputRegister(), i.MemoryOperand()); -+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); -+ break; -+ case kLoong64Sw: -+ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand()); -+ break; -+ case kLoong64Usw: -+ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand()); -+ break; -+ case kLoong64Sd: -+ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand()); -+ break; -+ case kLoong64Usd: -+ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand()); -+ break; -+ case kLoong64Lwc1: { -+ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand()); -+ break; -+ } -+ case kLoong64Ulwc1: { -+ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand()); -+ break; -+ } -+ case kLoong64Swc1: { -+ size_t index = 0; -+ MemOperand operand = i.MemoryOperand(&index); -+ FPURegister ft = i.InputOrZeroSingleRegister(index); -+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { -+ __ Move(kDoubleRegZero, 0.0); -+ } -+ -+ __ Fst_s(ft, operand); -+ break; -+ } -+ case kLoong64Uswc1: { -+ size_t index = 0; -+ MemOperand operand = i.MemoryOperand(&index); -+ FPURegister ft = i.InputOrZeroSingleRegister(index); -+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { -+ __ Move(kDoubleRegZero, 0.0); -+ } -+ -+ __ Fst_s(ft, operand); -+ break; -+ } -+ case kLoong64Ldc1: -+ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand()); -+ break; -+ case kLoong64Uldc1: -+ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand()); -+ break; -+ case kLoong64Sdc1: { -+ FPURegister ft = i.InputOrZeroDoubleRegister(2); -+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { -+ __ Move(kDoubleRegZero, 0.0); -+ } -+ -+ __ Fst_d(ft, i.MemoryOperand()); -+ break; -+ } -+ case kLoong64Usdc1: { -+ FPURegister ft = i.InputOrZeroDoubleRegister(2); -+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { -+ __ Move(kDoubleRegZero, 0.0); -+ } -+ -+ __ Fst_d(ft, i.MemoryOperand()); -+ break; -+ } -+ case kLoong64Sync: { -+ __ dbar(0); -+ break; -+ } -+ case kLoong64Push: -+ if (instr->InputAt(0)->IsFPRegister()) { -+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); -+ __ Sub_d(sp, sp, Operand(kDoubleSize)); -+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize); -+ } else { -+ __ Push(i.InputRegister(0)); -+ frame_access_state()->IncreaseSPDelta(1); -+ } -+ break; -+ case kLoong64Peek: { -+ // The incoming value is 0-based, but we need a 1-based value. -+ int reverse_slot = i.InputInt32(0) + 1; -+ int offset = -+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); -+ if (instr->OutputAt(0)->IsFPRegister()) { -+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); -+ if (op->representation() == MachineRepresentation::kFloat64) { -+ __ Fld_d(i.OutputDoubleRegister(), MemOperand(fp, offset)); -+ } else { -+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32); -+ __ Fld_s( -+ i.OutputSingleRegister(0), -+ MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset)); -+ } -+ } else { -+ __ Ld_d(i.OutputRegister(0), MemOperand(fp, offset)); -+ } -+ break; -+ } -+ case kLoong64StackClaim: { -+ __ Sub_d(sp, sp, Operand(i.InputInt32(0))); -+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / -+ kSystemPointerSize); -+ break; -+ } -+ case kLoong64StoreToStackSlot: { -+ if (instr->InputAt(0)->IsFPRegister()) { -+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); -+ } else { -+ __ St_d(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); -+ } -+ break; -+ } -+ case kLoong64ByteSwap64: { -+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8); -+ break; -+ } -+ case kLoong64ByteSwap32: { -+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4); -+ break; -+ } -+ case kWord32AtomicLoadInt8: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_b); -+ break; -+ case kWord32AtomicLoadUint8: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu); -+ break; -+ case kWord32AtomicLoadInt16: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_h); -+ break; -+ case kWord32AtomicLoadUint16: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu); -+ break; -+ case kWord32AtomicLoadWord32: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_w); -+ break; -+ case kLoong64Word64AtomicLoadUint8: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu); -+ break; -+ case kLoong64Word64AtomicLoadUint16: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu); -+ break; -+ case kLoong64Word64AtomicLoadUint32: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_wu); -+ break; -+ case kLoong64Word64AtomicLoadUint64: -+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_d); -+ break; -+ case kWord32AtomicStoreWord8: -+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b); -+ break; -+ case kWord32AtomicStoreWord16: -+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h); -+ break; -+ case kWord32AtomicStoreWord32: -+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w); -+ break; -+ case kLoong64Word64AtomicStoreWord8: -+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b); -+ break; -+ case kLoong64Word64AtomicStoreWord16: -+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h); -+ break; -+ case kLoong64Word64AtomicStoreWord32: -+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w); -+ break; -+ case kLoong64Word64AtomicStoreWord64: -+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_d); -+ break; -+ case kWord32AtomicExchangeInt8: -+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32); -+ break; -+ case kWord32AtomicExchangeUint8: -+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32); -+ break; -+ case kWord32AtomicExchangeInt16: -+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32); -+ break; -+ case kWord32AtomicExchangeUint16: -+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32); -+ break; -+ case kWord32AtomicExchangeWord32: -+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amswap_db_w(i.OutputRegister(0), i.InputRegister(2), -+ i.TempRegister(0)); -+ break; -+ case kLoong64Word64AtomicExchangeUint8: -+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64); -+ break; -+ case kLoong64Word64AtomicExchangeUint16: -+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64); -+ break; -+ case kLoong64Word64AtomicExchangeUint32: -+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64); -+ break; -+ case kLoong64Word64AtomicExchangeUint64: -+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amswap_db_d(i.OutputRegister(0), i.InputRegister(2), -+ i.TempRegister(0)); -+ break; -+ case kWord32AtomicCompareExchangeInt8: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32); -+ break; -+ case kWord32AtomicCompareExchangeUint8: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32); -+ break; -+ case kWord32AtomicCompareExchangeInt16: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32); -+ break; -+ case kWord32AtomicCompareExchangeUint16: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32); -+ break; -+ case kWord32AtomicCompareExchangeWord32: -+ __ slli_w(i.InputRegister(2), i.InputRegister(2), 0); -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w); -+ break; -+ case kLoong64Word64AtomicCompareExchangeUint8: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64); -+ break; -+ case kLoong64Word64AtomicCompareExchangeUint16: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64); -+ break; -+ case kLoong64Word64AtomicCompareExchangeUint32: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64); -+ break; -+ case kLoong64Word64AtomicCompareExchangeUint64: -+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d); -+ break; -+ case kWord32AtomicAddWord32: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amadd_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+ case kWord32AtomicSubWord32: -+ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w); -+ break; -+ case kWord32AtomicAndWord32: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amand_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+ case kWord32AtomicOrWord32: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amor_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+ case kWord32AtomicXorWord32: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amxor_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+#define ATOMIC_BINOP_CASE(op, inst) \ -+ case kWord32Atomic##op##Int8: \ -+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst, 32); \ -+ break; \ -+ case kWord32Atomic##op##Uint8: \ -+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst, 32); \ -+ break; \ -+ case kWord32Atomic##op##Int16: \ -+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst, 32); \ -+ break; \ -+ case kWord32Atomic##op##Uint16: \ -+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst, 32); \ -+ break; -+ ATOMIC_BINOP_CASE(Add, Add_w) -+ ATOMIC_BINOP_CASE(Sub, Sub_w) -+ ATOMIC_BINOP_CASE(And, And) -+ ATOMIC_BINOP_CASE(Or, Or) -+ ATOMIC_BINOP_CASE(Xor, Xor) -+#undef ATOMIC_BINOP_CASE -+ -+ case kLoong64Word64AtomicAddUint64: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amadd_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+ case kLoong64Word64AtomicSubUint64: -+ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d); -+ break; -+ case kLoong64Word64AtomicAndUint64: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amand_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+ case kLoong64Word64AtomicOrUint64: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+ case kLoong64Word64AtomicXorUint64: -+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); -+ __ amxor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); -+ break; -+#define ATOMIC_BINOP_CASE(op, inst) \ -+ case kLoong64Word64Atomic##op##Uint8: \ -+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst, 64); \ -+ break; \ -+ case kLoong64Word64Atomic##op##Uint16: \ -+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst, 64); \ -+ break; \ -+ case kLoong64Word64Atomic##op##Uint32: \ -+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, inst, 64); \ -+ break; -+ ATOMIC_BINOP_CASE(Add, Add_d) -+ ATOMIC_BINOP_CASE(Sub, Sub_d) -+ ATOMIC_BINOP_CASE(And, And) -+ ATOMIC_BINOP_CASE(Or, Or) -+ ATOMIC_BINOP_CASE(Xor, Xor) -+#undef ATOMIC_BINOP_CASE -+ case kLoong64AssertEqual: -+ __ Assert(eq, static_cast(i.InputOperand(2).immediate()), -+ i.InputRegister(0), Operand(i.InputRegister(1))); -+ break; -+ case kLoong64S128Zero: -+ case kLoong64I32x4Splat: -+ case kLoong64I32x4ExtractLane: -+ case kLoong64I32x4AddHoriz: -+ case kLoong64I32x4Add: -+ case kLoong64I32x4ReplaceLane: -+ case kLoong64I32x4Sub: -+ case kLoong64F64x2Abs: -+ default: -+ break; -+ } -+ return kSuccess; -+} // NOLINT(readability/fn_size) -+ -+#define UNSUPPORTED_COND(opcode, condition) \ -+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ -+ << "\""; \ -+ UNIMPLEMENTED(); -+ -+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, -+ Instruction* instr, FlagsCondition condition, -+ Label* tlabel, Label* flabel, bool fallthru) { -+#undef __ -+#define __ tasm-> -+ Loong64OperandConverter i(gen, instr); -+ -+ Condition cc = kNoCondition; -+ // LOONG64 does not have condition code flags, so compare and branch are -+ // implemented differently than on the other arch's. The compare operations -+ // emit loong64 pseudo-instructions, which are handled here by branch -+ // instructions that do the actual comparison. Essential that the input -+ // registers to compare pseudo-op are not modified before this branch op, as -+ // they are tested here. -+ -+ if (instr->arch_opcode() == kLoong64Tst) { -+ cc = FlagsConditionToConditionTst(condition); -+ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg)); -+ } else if (instr->arch_opcode() == kLoong64Dadd || -+ instr->arch_opcode() == kLoong64Dsub) { -+ cc = FlagsConditionToConditionOvf(condition); -+ __ srai_d(kScratchReg, i.OutputRegister(), 32); -+ __ srai_w(kScratchReg2, i.OutputRegister(), 31); -+ __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg)); -+ } else if (instr->arch_opcode() == kLoong64DaddOvf || -+ instr->arch_opcode() == kLoong64DsubOvf) { -+ switch (condition) { -+ // Overflow occurs if overflow register is negative -+ case kOverflow: -+ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg)); -+ break; -+ case kNotOverflow: -+ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg)); -+ break; -+ default: -+ UNSUPPORTED_COND(instr->arch_opcode(), condition); -+ break; -+ } -+ } else if (instr->arch_opcode() == kLoong64MulOvf) { -+ // Overflow occurs if overflow register is not zero -+ switch (condition) { -+ case kOverflow: -+ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg)); -+ break; -+ case kNotOverflow: -+ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg)); -+ break; -+ default: -+ UNSUPPORTED_COND(kLoong64MulOvf, condition); -+ break; -+ } -+ } else if (instr->arch_opcode() == kLoong64Cmp) { -+ cc = FlagsConditionToConditionCmp(condition); -+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); -+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { -+ cc = FlagsConditionToConditionCmp(condition); -+ Register lhs_register = sp; -+ uint32_t offset; -+ if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) { -+ lhs_register = i.TempRegister(0); -+ __ Sub_d(lhs_register, sp, offset); -+ } -+ __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0))); -+ } else if (instr->arch_opcode() == kLoong64CmpS || -+ instr->arch_opcode() == kLoong64CmpD) { -+ bool predicate; -+ FlagsConditionToConditionCmpFPU(&predicate, condition); -+ if (predicate) { -+ __ BranchTrueF(tlabel); -+ } else { -+ __ BranchFalseF(tlabel); -+ } -+ } else { -+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", -+ instr->arch_opcode()); -+ UNIMPLEMENTED(); -+ } -+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel. -+#undef __ -+#define __ tasm()-> -+} -+ -+// Assembles branches after an instruction. -+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { -+ Label* tlabel = branch->true_label; -+ Label* flabel = branch->false_label; -+ -+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, -+ branch->fallthru); -+} -+ -+void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, -+ Instruction* instr) { -+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). -+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { -+ return; -+ } -+ -+ Loong64OperandConverter i(this, instr); -+ condition = NegateFlagsCondition(condition); -+ -+ switch (instr->arch_opcode()) { -+ case kLoong64Cmp: { -+ __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0), -+ i.InputOperand(1), -+ FlagsConditionToConditionCmp(condition)); -+ } -+ return; -+ case kLoong64Tst: { -+ switch (condition) { -+ case kEqual: -+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); -+ break; -+ case kNotEqual: -+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, -+ kScratchReg); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+ return; -+ case kLoong64Dadd: -+ case kLoong64Dsub: { -+ // Check for overflow creates 1 or 0 for result. -+ __ srli_d(kScratchReg, i.OutputRegister(), 63); -+ __ srli_w(kScratchReg2, i.OutputRegister(), 31); -+ __ xor_(kScratchReg2, kScratchReg, kScratchReg2); -+ switch (condition) { -+ case kOverflow: -+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, -+ kScratchReg2); -+ break; -+ case kNotOverflow: -+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); -+ break; -+ default: -+ UNSUPPORTED_COND(instr->arch_opcode(), condition); -+ } -+ } -+ return; -+ case kLoong64DaddOvf: -+ case kLoong64DsubOvf: { -+ // Overflow occurs if overflow register is negative -+ __ Slt(kScratchReg2, kScratchReg, zero_reg); -+ switch (condition) { -+ case kOverflow: -+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, -+ kScratchReg2); -+ break; -+ case kNotOverflow: -+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); -+ break; -+ default: -+ UNSUPPORTED_COND(instr->arch_opcode(), condition); -+ } -+ } -+ return; -+ case kLoong64MulOvf: { -+ // Overflow occurs if overflow register is not zero -+ switch (condition) { -+ case kOverflow: -+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, -+ kScratchReg); -+ break; -+ case kNotOverflow: -+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); -+ break; -+ default: -+ UNSUPPORTED_COND(instr->arch_opcode(), condition); -+ } -+ } -+ return; -+ case kLoong64CmpS: -+ case kLoong64CmpD: { -+ bool predicate; -+ FlagsConditionToConditionCmpFPU(&predicate, condition); -+ if (predicate) { -+ __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); -+ } else { -+ __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister); -+ } -+ } -+ return; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+#undef UNSUPPORTED_COND -+ -+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, -+ BranchInfo* branch) { -+ AssembleArchBranch(instr, branch); -+} -+ -+void CodeGenerator::AssembleArchJump(RpoNumber target) { -+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target)); -+} -+ -+void CodeGenerator::AssembleArchTrap(Instruction* instr, -+ FlagsCondition condition) { -+ class OutOfLineTrap final : public OutOfLineCode { -+ public: -+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr) -+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {} -+ void Generate() final { -+ Loong64OperandConverter i(gen_, instr_); -+ TrapId trap_id = -+ static_cast(i.InputInt32(instr_->InputCount() - 1)); -+ GenerateCallToTrap(trap_id); -+ } -+ -+ private: -+ void GenerateCallToTrap(TrapId trap_id) { -+ if (trap_id == TrapId::kInvalid) { -+ // We cannot test calls to the runtime in cctest/test-run-wasm. -+ // Therefore we emit a call to C here instead of a call to the runtime. -+ // We use the context register as the scratch register, because we do -+ // not have a context here. -+ __ PrepareCallCFunction(0, 0, cp); -+ __ CallCFunction( -+ ExternalReference::wasm_call_trap_callback_for_testing(), 0); -+ __ LeaveFrame(StackFrame::WASM); -+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); -+ int pop_count = -+ static_cast(call_descriptor->StackParameterCount()); -+ pop_count += (pop_count & 1); // align -+ __ Drop(pop_count); -+ __ Ret(); -+ } else { -+ gen_->AssembleSourcePosition(instr_); -+ // A direct call to a wasm runtime stub defined in this module. -+ // Just encode the stub index. This will be patched when the code -+ // is added to the native module and copied into wasm code space. -+ __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); -+ ReferenceMap* reference_map = -+ new (gen_->zone()) ReferenceMap(gen_->zone()); -+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); -+ if (FLAG_debug_code) { -+ __ stop(); -+ } -+ } -+ } -+ Instruction* instr_; -+ CodeGenerator* gen_; -+ }; -+ auto ool = new (zone()) OutOfLineTrap(this, instr); -+ Label* tlabel = ool->entry(); -+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); -+} -+ -+// Assembles boolean materializations after an instruction. -+void CodeGenerator::AssembleArchBoolean(Instruction* instr, -+ FlagsCondition condition) { -+ Loong64OperandConverter i(this, instr); -+ -+ // Materialize a full 32-bit 1 or 0 value. The result register is always the -+ // last output of the instruction. -+ DCHECK_NE(0u, instr->OutputCount()); -+ Register result = i.OutputRegister(instr->OutputCount() - 1); -+ Condition cc = kNoCondition; -+ // Loong64 does not have condition code flags, so compare and branch are -+ // implemented differently than on the other arch's. The compare operations -+ // emit loong64 pseudo-instructions, which are checked and handled here. -+ -+ if (instr->arch_opcode() == kLoong64Tst) { -+ cc = FlagsConditionToConditionTst(condition); -+ if (cc == eq) { -+ __ Sltu(result, kScratchReg, 1); -+ } else { -+ __ Sltu(result, zero_reg, kScratchReg); -+ } -+ return; -+ } else if (instr->arch_opcode() == kLoong64Dadd || -+ instr->arch_opcode() == kLoong64Dsub) { -+ cc = FlagsConditionToConditionOvf(condition); -+ // Check for overflow creates 1 or 0 for result. -+ __ srli_d(kScratchReg, i.OutputRegister(), 63); -+ __ srli_w(kScratchReg2, i.OutputRegister(), 31); -+ __ xor_(result, kScratchReg, kScratchReg2); -+ if (cc == eq) // Toggle result for not overflow. -+ __ xori(result, result, 1); -+ return; -+ } else if (instr->arch_opcode() == kLoong64DaddOvf || -+ instr->arch_opcode() == kLoong64DsubOvf) { -+ // Overflow occurs if overflow register is negative -+ __ slt(result, kScratchReg, zero_reg); -+ } else if (instr->arch_opcode() == kLoong64MulOvf) { -+ // Overflow occurs if overflow register is not zero -+ __ Sgtu(result, kScratchReg, zero_reg); -+ } else if (instr->arch_opcode() == kLoong64Cmp) { -+ cc = FlagsConditionToConditionCmp(condition); -+ switch (cc) { -+ case eq: -+ case ne: { -+ Register left = i.InputRegister(0); -+ Operand right = i.InputOperand(1); -+ if (instr->InputAt(1)->IsImmediate()) { -+ if (is_int12(-right.immediate())) { -+ if (right.immediate() == 0) { -+ if (cc == eq) { -+ __ Sltu(result, left, 1); -+ } else { -+ __ Sltu(result, zero_reg, left); -+ } -+ } else { -+ __ Add_d(result, left, Operand(-right.immediate())); -+ if (cc == eq) { -+ __ Sltu(result, result, 1); -+ } else { -+ __ Sltu(result, zero_reg, result); -+ } -+ } -+ } else { -+ if (is_uint12(right.immediate())) { -+ __ Xor(result, left, right); -+ } else { -+ __ li(kScratchReg, right); -+ __ Xor(result, left, kScratchReg); -+ } -+ if (cc == eq) { -+ __ Sltu(result, result, 1); -+ } else { -+ __ Sltu(result, zero_reg, result); -+ } -+ } -+ } else { -+ __ Xor(result, left, right); -+ if (cc == eq) { -+ __ Sltu(result, result, 1); -+ } else { -+ __ Sltu(result, zero_reg, result); -+ } -+ } -+ } break; -+ case lt: -+ case ge: { -+ Register left = i.InputRegister(0); -+ Operand right = i.InputOperand(1); -+ __ Slt(result, left, right); -+ if (cc == ge) { -+ __ xori(result, result, 1); -+ } -+ } break; -+ case gt: -+ case le: { -+ Register left = i.InputRegister(1); -+ Operand right = i.InputOperand(0); -+ __ Slt(result, left, right); -+ if (cc == le) { -+ __ xori(result, result, 1); -+ } -+ } break; -+ case lo: -+ case hs: { -+ Register left = i.InputRegister(0); -+ Operand right = i.InputOperand(1); -+ __ Sltu(result, left, right); -+ if (cc == hs) { -+ __ xori(result, result, 1); -+ } -+ } break; -+ case hi: -+ case ls: { -+ Register left = i.InputRegister(1); -+ Operand right = i.InputOperand(0); -+ __ Sltu(result, left, right); -+ if (cc == ls) { -+ __ xori(result, result, 1); -+ } -+ } break; -+ default: -+ UNREACHABLE(); -+ } -+ return; -+ } else if (instr->arch_opcode() == kLoong64CmpD || -+ instr->arch_opcode() == kLoong64CmpS) { -+ FPURegister left = i.InputOrZeroDoubleRegister(0); -+ FPURegister right = i.InputOrZeroDoubleRegister(1); -+ if ((left == kDoubleRegZero || right == kDoubleRegZero) && -+ !__ IsDoubleZeroRegSet()) { -+ __ Move(kDoubleRegZero, 0.0); -+ } -+ bool predicate; -+ FlagsConditionToConditionCmpFPU(&predicate, condition); -+ { -+ __ movcf2gr(result, FCC0); -+ if (!predicate) { -+ __ xori(result, result, 1); -+ } -+ } -+ return; -+ } else { -+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", -+ instr->arch_opcode()); -+ TRACE_UNIMPL(); -+ UNIMPLEMENTED(); -+ } -+} -+ -+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { -+ Loong64OperandConverter i(this, instr); -+ Register input = i.InputRegister(0); -+ std::vector> cases; -+ for (size_t index = 2; index < instr->InputCount(); index += 2) { -+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); -+ } -+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), -+ cases.data() + cases.size()); -+} -+ -+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { -+ Loong64OperandConverter i(this, instr); -+ Register input = i.InputRegister(0); -+ size_t const case_count = instr->InputCount() - 2; -+ -+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count)); -+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { -+ return GetLabel(i.InputRpo(index + 2)); -+ }); -+} -+ -+void CodeGenerator::FinishFrame(Frame* frame) { -+ auto call_descriptor = linkage()->GetIncomingDescriptor(); -+ -+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); -+ if (saves_fpu != 0) { -+ int count = base::bits::CountPopulation(saves_fpu); -+ DCHECK_EQ(kNumCalleeSavedFPU, count); -+ frame->AllocateSavedCalleeRegisterSlots(count * -+ (kDoubleSize / kSystemPointerSize)); -+ } -+ -+ const RegList saves = call_descriptor->CalleeSavedRegisters(); -+ if (saves != 0) { -+ int count = base::bits::CountPopulation(saves); -+ DCHECK_EQ(kNumCalleeSaved, count + 1); -+ frame->AllocateSavedCalleeRegisterSlots(count); -+ } -+} -+ -+void CodeGenerator::AssembleConstructFrame() { -+ auto call_descriptor = linkage()->GetIncomingDescriptor(); -+ -+ if (frame_access_state()->has_frame()) { -+ if (call_descriptor->IsCFunctionCall()) { -+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { -+ __ StubPrologue(StackFrame::C_WASM_ENTRY); -+ // Reserve stack space for saving the c_entry_fp later. -+ __ Sub_d(sp, sp, Operand(kSystemPointerSize)); -+ } else { -+ __ Push(ra, fp); -+ __ mov(fp, sp); -+ } -+ } else if (call_descriptor->IsJSFunctionCall()) { -+ __ Prologue(); -+ if (call_descriptor->PushArgumentCount()) { -+ __ Push(kJavaScriptCallArgCountRegister); -+ } -+ } else { -+ __ StubPrologue(info()->GetOutputStackFrameType()); -+ if (call_descriptor->IsWasmFunctionCall()) { -+ __ Push(kWasmInstanceRegister); -+ } else if (call_descriptor->IsWasmImportWrapper() || -+ call_descriptor->IsWasmCapiFunction()) { -+ // Wasm import wrappers are passed a tuple in the place of the instance. -+ // Unpack the tuple into the instance and the target callable. -+ // This must be done here in the codegen because it cannot be expressed -+ // properly in the graph. -+ __ Ld_d(kJSFunctionRegister, -+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset)); -+ __ Ld_d(kWasmInstanceRegister, -+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); -+ __ Push(kWasmInstanceRegister); -+ if (call_descriptor->IsWasmCapiFunction()) { -+ // Reserve space for saving the PC later. -+ __ Sub_d(sp, sp, Operand(kSystemPointerSize)); -+ } -+ } -+ } -+ } -+ -+ int required_slots = -+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); -+ -+ if (info()->is_osr()) { -+ // TurboFan OSR-compiled functions cannot be entered directly. -+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); -+ -+ // Unoptimized code jumps directly to this entrypoint while the unoptimized -+ // frame is still on the stack. Optimized code uses OSR values directly from -+ // the unoptimized frame. Thus, all that needs to be done is to allocate the -+ // remaining stack slots. -+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); -+ osr_pc_offset_ = __ pc_offset(); -+ required_slots -= osr_helper()->UnoptimizedFrameSlots(); -+ ResetSpeculationPoison(); -+ } -+ -+ const RegList saves = call_descriptor->CalleeSavedRegisters(); -+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); -+ -+ if (required_slots > 0) { -+ DCHECK(frame_access_state()->has_frame()); -+ if (info()->IsWasm() && required_slots > 128) { -+ // For WebAssembly functions with big frames we have to do the stack -+ // overflow check before we construct the frame. Otherwise we may not -+ // have enough space on the stack to call the runtime for the stack -+ // overflow. -+ Label done; -+ -+ // If the frame is bigger than the stack, we throw the stack overflow -+ // exception unconditionally. Thereby we can avoid the integer overflow -+ // check in the condition code. -+ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { -+ __ Ld_d( -+ kScratchReg, -+ FieldMemOperand(kWasmInstanceRegister, -+ WasmInstanceObject::kRealStackLimitAddressOffset)); -+ __ Ld_d(kScratchReg, MemOperand(kScratchReg, 0)); -+ __ Add_d(kScratchReg, kScratchReg, -+ Operand(required_slots * kSystemPointerSize)); -+ __ Branch(&done, uge, sp, Operand(kScratchReg)); -+ } -+ -+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); -+ // We come from WebAssembly, there are no references for the GC. -+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); -+ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); -+ if (FLAG_debug_code) { -+ __ stop(); -+ } -+ -+ __ bind(&done); -+ } -+ } -+ -+ const int returns = frame()->GetReturnSlotCount(); -+ -+ // Skip callee-saved and return slots, which are pushed below. -+ required_slots -= base::bits::CountPopulation(saves); -+ required_slots -= base::bits::CountPopulation(saves_fpu); -+ required_slots -= returns; -+ if (required_slots > 0) { -+ __ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize)); -+ } -+ -+ if (saves_fpu != 0) { -+ // Save callee-saved FPU registers. -+ __ MultiPushFPU(saves_fpu); -+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu)); -+ } -+ -+ if (saves != 0) { -+ // Save callee-saved registers. -+ __ MultiPush(saves); -+ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1); -+ } -+ -+ if (returns != 0) { -+ // Create space for returns. -+ __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize)); -+ } -+} -+ -+void CodeGenerator::AssembleReturn(InstructionOperand* pop) { -+ auto call_descriptor = linkage()->GetIncomingDescriptor(); -+ -+ const int returns = frame()->GetReturnSlotCount(); -+ if (returns != 0) { -+ __ Add_d(sp, sp, Operand(returns * kSystemPointerSize)); -+ } -+ -+ // Restore GP registers. -+ const RegList saves = call_descriptor->CalleeSavedRegisters(); -+ if (saves != 0) { -+ __ MultiPop(saves); -+ } -+ -+ // Restore FPU registers. -+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); -+ if (saves_fpu != 0) { -+ __ MultiPopFPU(saves_fpu); -+ } -+ -+ Loong64OperandConverter g(this, nullptr); -+ if (call_descriptor->IsCFunctionCall()) { -+ AssembleDeconstructFrame(); -+ } else if (frame_access_state()->has_frame()) { -+ // Canonicalize JSFunction return sites for now unless they have an variable -+ // number of stack slot pops. -+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) { -+ if (return_label_.is_bound()) { -+ __ Branch(&return_label_); -+ return; -+ } else { -+ __ bind(&return_label_); -+ AssembleDeconstructFrame(); -+ } -+ } else { -+ AssembleDeconstructFrame(); -+ } -+ } -+ int pop_count = static_cast(call_descriptor->StackParameterCount()); -+ if (pop->IsImmediate()) { -+ pop_count += g.ToConstant(pop).ToInt32(); -+ } else { -+ Register pop_reg = g.ToRegister(pop); -+ __ slli_d(pop_reg, pop_reg, kSystemPointerSizeLog2); -+ __ Add_d(sp, sp, pop_reg); -+ } -+ if (pop_count != 0) { -+ __ DropAndRet(pop_count); -+ } else { -+ __ Ret(); -+ } -+} -+ -+void CodeGenerator::FinishCode() {} -+ -+void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} -+ -+void CodeGenerator::AssembleMove(InstructionOperand* source, -+ InstructionOperand* destination) { -+ Loong64OperandConverter g(this, nullptr); -+ // Dispatch on the source and destination operand kinds. Not all -+ // combinations are possible. -+ if (source->IsRegister()) { -+ DCHECK(destination->IsRegister() || destination->IsStackSlot()); -+ Register src = g.ToRegister(source); -+ if (destination->IsRegister()) { -+ __ mov(g.ToRegister(destination), src); -+ } else { -+ __ St_d(src, g.ToMemOperand(destination)); -+ } -+ } else if (source->IsStackSlot()) { -+ DCHECK(destination->IsRegister() || destination->IsStackSlot()); -+ MemOperand src = g.ToMemOperand(source); -+ if (destination->IsRegister()) { -+ __ Ld_d(g.ToRegister(destination), src); -+ } else { -+ Register temp = kScratchReg; -+ __ Ld_d(temp, src); -+ __ St_d(temp, g.ToMemOperand(destination)); -+ } -+ } else if (source->IsConstant()) { -+ Constant src = g.ToConstant(source); -+ if (destination->IsRegister() || destination->IsStackSlot()) { -+ Register dst = -+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; -+ switch (src.type()) { -+ case Constant::kInt32: -+ __ li(dst, Operand(src.ToInt32())); -+ break; -+ case Constant::kFloat32: -+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32())); -+ break; -+ case Constant::kInt64: -+ if (RelocInfo::IsWasmReference(src.rmode())) { -+ __ li(dst, Operand(src.ToInt64(), src.rmode())); -+ } else { -+ __ li(dst, Operand(src.ToInt64())); -+ } -+ break; -+ case Constant::kFloat64: -+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value())); -+ break; -+ case Constant::kExternalReference: -+ __ li(dst, src.ToExternalReference()); -+ break; -+ case Constant::kDelayedStringConstant: -+ __ li(dst, src.ToDelayedStringConstant()); -+ break; -+ case Constant::kHeapObject: { -+ Handle src_object = src.ToHeapObject(); -+ RootIndex index; -+ if (IsMaterializableFromRoot(src_object, &index)) { -+ __ LoadRoot(dst, index); -+ } else { -+ __ li(dst, src_object); -+ } -+ break; -+ } -+ case Constant::kCompressedHeapObject: -+ UNREACHABLE(); -+ case Constant::kRpoNumber: -+ UNREACHABLE(); // TODO(titzer): loading RPO numbers on LOONG64. -+ break; -+ } -+ if (destination->IsStackSlot()) __ St_d(dst, g.ToMemOperand(destination)); -+ } else if (src.type() == Constant::kFloat32) { -+ if (destination->IsFPStackSlot()) { -+ MemOperand dst = g.ToMemOperand(destination); -+ if (bit_cast(src.ToFloat32()) == 0) { -+ __ St_d(zero_reg, dst); -+ } else { -+ __ li(kScratchReg, Operand(bit_cast(src.ToFloat32()))); -+ __ St_d(kScratchReg, dst); -+ } -+ } else { -+ DCHECK(destination->IsFPRegister()); -+ FloatRegister dst = g.ToSingleRegister(destination); -+ __ Move(dst, src.ToFloat32()); -+ } -+ } else { -+ DCHECK_EQ(Constant::kFloat64, src.type()); -+ DoubleRegister dst = destination->IsFPRegister() -+ ? g.ToDoubleRegister(destination) -+ : kScratchDoubleReg; -+ __ Move(dst, src.ToFloat64().value()); -+ if (destination->IsFPStackSlot()) { -+ __ Fst_d(dst, g.ToMemOperand(destination)); -+ } -+ } -+ } else if (source->IsFPRegister()) { -+ FPURegister src = g.ToDoubleRegister(source); -+ if (destination->IsFPRegister()) { -+ FPURegister dst = g.ToDoubleRegister(destination); -+ __ Move(dst, src); -+ } else { -+ DCHECK(destination->IsFPStackSlot()); -+ __ Fst_d(src, g.ToMemOperand(destination)); -+ } -+ } else if (source->IsFPStackSlot()) { -+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); -+ MemOperand src = g.ToMemOperand(source); -+ if (destination->IsFPRegister()) { -+ __ Fld_d(g.ToDoubleRegister(destination), src); -+ } else { -+ DCHECK(destination->IsFPStackSlot()); -+ FPURegister temp = kScratchDoubleReg; -+ __ Fld_d(temp, src); -+ __ Fst_d(temp, g.ToMemOperand(destination)); -+ } -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+void CodeGenerator::AssembleSwap(InstructionOperand* source, -+ InstructionOperand* destination) { -+ Loong64OperandConverter g(this, nullptr); -+ // Dispatch on the source and destination operand kinds. Not all -+ // combinations are possible. -+ if (source->IsRegister()) { -+ // Register-register. -+ Register temp = kScratchReg; -+ Register src = g.ToRegister(source); -+ if (destination->IsRegister()) { -+ Register dst = g.ToRegister(destination); -+ __ Move(temp, src); -+ __ Move(src, dst); -+ __ Move(dst, temp); -+ } else { -+ DCHECK(destination->IsStackSlot()); -+ MemOperand dst = g.ToMemOperand(destination); -+ __ mov(temp, src); -+ __ Ld_d(src, dst); -+ __ St_d(temp, dst); -+ } -+ } else if (source->IsStackSlot()) { -+ DCHECK(destination->IsStackSlot()); -+ Register temp_0 = kScratchReg; -+ Register temp_1 = kScratchReg2; -+ MemOperand src = g.ToMemOperand(source); -+ MemOperand dst = g.ToMemOperand(destination); -+ __ Ld_d(temp_0, src); -+ __ Ld_d(temp_1, dst); -+ __ St_d(temp_0, dst); -+ __ St_d(temp_1, src); -+ } else if (source->IsFPRegister()) { -+ FPURegister temp = kScratchDoubleReg; -+ FPURegister src = g.ToDoubleRegister(source); -+ if (destination->IsFPRegister()) { -+ FPURegister dst = g.ToDoubleRegister(destination); -+ __ Move(temp, src); -+ __ Move(src, dst); -+ __ Move(dst, temp); -+ } else { -+ DCHECK(destination->IsFPStackSlot()); -+ MemOperand dst = g.ToMemOperand(destination); -+ __ Move(temp, src); -+ __ Fld_d(src, dst); -+ __ Fst_d(temp, dst); -+ } -+ } else if (source->IsFPStackSlot()) { -+ DCHECK(destination->IsFPStackSlot()); -+ Register temp_0 = kScratchReg; -+ MemOperand src0 = g.ToMemOperand(source); -+ MemOperand src1(src0.base(), src0.offset() + kIntSize); -+ MemOperand dst0 = g.ToMemOperand(destination); -+ MemOperand dst1(dst0.base(), dst0.offset() + kIntSize); -+ FPURegister temp_1 = kScratchDoubleReg; -+ __ Fld_d(temp_1, dst0); // Save destination in temp_1. -+ __ Ld_w(temp_0, src0); // Then use temp_0 to copy source to destination. -+ __ St_w(temp_0, dst0); -+ __ Ld_w(temp_0, src1); -+ __ St_w(temp_0, dst1); -+ __ Fst_d(temp_1, src0); -+ } else { -+ // No other combinations are possible. -+ UNREACHABLE(); -+ } -+} -+ -+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { -+ // On 64-bit LOONG64 we emit the jump tables inline. -+ UNREACHABLE(); -+} -+ -+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER -+#undef ASSEMBLE_ATOMIC_STORE_INTEGER -+#undef ASSEMBLE_ATOMIC_BINOP -+#undef ASSEMBLE_ATOMIC_BINOP_EXT -+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER -+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT -+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER -+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT -+#undef ASSEMBLE_IEEE754_BINOP -+#undef ASSEMBLE_IEEE754_UNOP -+ -+#undef TRACE_MSG -+#undef TRACE_UNIMPL -+#undef __ -+ -+} // namespace compiler -+} // namespace internal -+} // namespace v8 -diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h -new file mode 100644 -index 00000000..99328e1e ---- /dev/null -+++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h -@@ -0,0 +1,415 @@ -+// Copyright 2014 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_ -+#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_ -+ -+namespace v8 { -+namespace internal { -+namespace compiler { -+ -+// LOONG64-specific opcodes that specify which assembly sequence to emit. -+// Most opcodes specify a single instruction. -+#define TARGET_ARCH_OPCODE_LIST(V) \ -+ V(Loong64Add) \ -+ V(Loong64Dadd) \ -+ V(Loong64DaddOvf) \ -+ V(Loong64Sub) \ -+ V(Loong64Dsub) \ -+ V(Loong64DsubOvf) \ -+ V(Loong64Mul) \ -+ V(Loong64MulOvf) \ -+ V(Loong64MulHigh) \ -+ V(Loong64DMulHigh) \ -+ V(Loong64MulHighU) \ -+ V(Loong64Dmul) \ -+ V(Loong64Div) \ -+ V(Loong64Ddiv) \ -+ V(Loong64DivU) \ -+ V(Loong64DdivU) \ -+ V(Loong64Mod) \ -+ V(Loong64Dmod) \ -+ V(Loong64ModU) \ -+ V(Loong64DmodU) \ -+ V(Loong64And) \ -+ V(Loong64And32) \ -+ V(Loong64Or) \ -+ V(Loong64Or32) \ -+ V(Loong64Nor) \ -+ V(Loong64Nor32) \ -+ V(Loong64Xor) \ -+ V(Loong64Xor32) \ -+ V(Loong64Clz) \ -+ V(Loong64Lsa) \ -+ V(Loong64Dlsa) \ -+ V(Loong64Shl) \ -+ V(Loong64Shr) \ -+ V(Loong64Sar) \ -+ V(Loong64Ext) \ -+ V(Loong64Ins) \ -+ V(Loong64Dext) \ -+ V(Loong64Dins) \ -+ V(Loong64Dclz) \ -+ V(Loong64Ctz) \ -+ V(Loong64Dctz) \ -+ V(Loong64Popcnt) \ -+ V(Loong64Dpopcnt) \ -+ V(Loong64Dshl) \ -+ V(Loong64Dshr) \ -+ V(Loong64Dsar) \ -+ V(Loong64Ror) \ -+ V(Loong64Dror) \ -+ V(Loong64Mov) \ -+ V(Loong64Tst) \ -+ V(Loong64Cmp) \ -+ V(Loong64CmpS) \ -+ V(Loong64AddS) \ -+ V(Loong64SubS) \ -+ V(Loong64MulS) \ -+ V(Loong64DivS) \ -+ V(Loong64ModS) \ -+ V(Loong64AbsS) \ -+ V(Loong64NegS) \ -+ V(Loong64SqrtS) \ -+ V(Loong64MaxS) \ -+ V(Loong64MinS) \ -+ V(Loong64CmpD) \ -+ V(Loong64AddD) \ -+ V(Loong64SubD) \ -+ V(Loong64MulD) \ -+ V(Loong64DivD) \ -+ V(Loong64ModD) \ -+ V(Loong64AbsD) \ -+ V(Loong64NegD) \ -+ V(Loong64SqrtD) \ -+ V(Loong64MaxD) \ -+ V(Loong64MinD) \ -+ V(Loong64Float64RoundDown) \ -+ V(Loong64Float64RoundTruncate) \ -+ V(Loong64Float64RoundUp) \ -+ V(Loong64Float64RoundTiesEven) \ -+ V(Loong64Float32RoundDown) \ -+ V(Loong64Float32RoundTruncate) \ -+ V(Loong64Float32RoundUp) \ -+ V(Loong64Float32RoundTiesEven) \ -+ V(Loong64CvtSD) \ -+ V(Loong64CvtDS) \ -+ V(Loong64TruncWD) \ -+ V(Loong64RoundWD) \ -+ V(Loong64FloorWD) \ -+ V(Loong64CeilWD) \ -+ V(Loong64TruncWS) \ -+ V(Loong64RoundWS) \ -+ V(Loong64FloorWS) \ -+ V(Loong64CeilWS) \ -+ V(Loong64TruncLS) \ -+ V(Loong64TruncLD) \ -+ V(Loong64TruncUwD) \ -+ V(Loong64TruncUwS) \ -+ V(Loong64TruncUlS) \ -+ V(Loong64TruncUlD) \ -+ V(Loong64CvtDW) \ -+ V(Loong64CvtSL) \ -+ V(Loong64CvtSW) \ -+ V(Loong64CvtSUw) \ -+ V(Loong64CvtSUl) \ -+ V(Loong64CvtDL) \ -+ V(Loong64CvtDUw) \ -+ V(Loong64CvtDUl) \ -+ V(Loong64Lb) \ -+ V(Loong64Lbu) \ -+ V(Loong64Sb) \ -+ V(Loong64Lh) \ -+ V(Loong64Ulh) \ -+ V(Loong64Lhu) \ -+ V(Loong64Ulhu) \ -+ V(Loong64Sh) \ -+ V(Loong64Ush) \ -+ V(Loong64Ld) \ -+ V(Loong64Uld) \ -+ V(Loong64Lw) \ -+ V(Loong64Ulw) \ -+ V(Loong64Lwu) \ -+ V(Loong64Ulwu) \ -+ V(Loong64Sw) \ -+ V(Loong64Usw) \ -+ V(Loong64Sd) \ -+ V(Loong64Usd) \ -+ V(Loong64Lwc1) \ -+ V(Loong64Ulwc1) \ -+ V(Loong64Swc1) \ -+ V(Loong64Uswc1) \ -+ V(Loong64Ldc1) \ -+ V(Loong64Uldc1) \ -+ V(Loong64Sdc1) \ -+ V(Loong64Usdc1) \ -+ V(Loong64BitcastDL) \ -+ V(Loong64BitcastLD) \ -+ V(Loong64Float64ExtractLowWord32) \ -+ V(Loong64Float64ExtractHighWord32) \ -+ V(Loong64Float64InsertLowWord32) \ -+ V(Loong64Float64InsertHighWord32) \ -+ V(Loong64Float32Max) \ -+ V(Loong64Float64Max) \ -+ V(Loong64Float32Min) \ -+ V(Loong64Float64Min) \ -+ V(Loong64Float64SilenceNaN) \ -+ V(Loong64Push) \ -+ V(Loong64Peek) \ -+ V(Loong64StoreToStackSlot) \ -+ V(Loong64ByteSwap64) \ -+ V(Loong64ByteSwap32) \ -+ V(Loong64StackClaim) \ -+ V(Loong64Seb) \ -+ V(Loong64Seh) \ -+ V(Loong64Sync) \ -+ V(Loong64AssertEqual) \ -+ V(Loong64S128Zero) \ -+ V(Loong64I32x4Splat) \ -+ V(Loong64I32x4ExtractLane) \ -+ V(Loong64I32x4ReplaceLane) \ -+ V(Loong64I32x4Add) \ -+ V(Loong64I32x4AddHoriz) \ -+ V(Loong64I32x4Sub) \ -+ V(Loong64F64x2Abs) \ -+ V(Loong64F64x2Neg) \ -+ V(Loong64F32x4Splat) \ -+ V(Loong64F32x4ExtractLane) \ -+ V(Loong64F32x4ReplaceLane) \ -+ V(Loong64F32x4SConvertI32x4) \ -+ V(Loong64F32x4UConvertI32x4) \ -+ V(Loong64I32x4Mul) \ -+ V(Loong64I32x4MaxS) \ -+ V(Loong64I32x4MinS) \ -+ V(Loong64I32x4Eq) \ -+ V(Loong64I32x4Ne) \ -+ V(Loong64I32x4Shl) \ -+ V(Loong64I32x4ShrS) \ -+ V(Loong64I32x4ShrU) \ -+ V(Loong64I32x4MaxU) \ -+ V(Loong64I32x4MinU) \ -+ V(Loong64F64x2Sqrt) \ -+ V(Loong64F64x2Add) \ -+ V(Loong64F64x2Sub) \ -+ V(Loong64F64x2Mul) \ -+ V(Loong64F64x2Div) \ -+ V(Loong64F64x2Min) \ -+ V(Loong64F64x2Max) \ -+ V(Loong64F64x2Eq) \ -+ V(Loong64F64x2Ne) \ -+ V(Loong64F64x2Lt) \ -+ V(Loong64F64x2Le) \ -+ V(Loong64F64x2Splat) \ -+ V(Loong64F64x2ExtractLane) \ -+ V(Loong64F64x2ReplaceLane) \ -+ V(Loong64I64x2Splat) \ -+ V(Loong64I64x2ExtractLane) \ -+ V(Loong64I64x2ReplaceLane) \ -+ V(Loong64I64x2Add) \ -+ V(Loong64I64x2Sub) \ -+ V(Loong64I64x2Mul) \ -+ V(Loong64I64x2Neg) \ -+ V(Loong64I64x2Shl) \ -+ V(Loong64I64x2ShrS) \ -+ V(Loong64I64x2ShrU) \ -+ V(Loong64F32x4Abs) \ -+ V(Loong64F32x4Neg) \ -+ V(Loong64F32x4Sqrt) \ -+ V(Loong64F32x4RecipApprox) \ -+ V(Loong64F32x4RecipSqrtApprox) \ -+ V(Loong64F32x4Add) \ -+ V(Loong64F32x4AddHoriz) \ -+ V(Loong64F32x4Sub) \ -+ V(Loong64F32x4Mul) \ -+ V(Loong64F32x4Div) \ -+ V(Loong64F32x4Max) \ -+ V(Loong64F32x4Min) \ -+ V(Loong64F32x4Eq) \ -+ V(Loong64F32x4Ne) \ -+ V(Loong64F32x4Lt) \ -+ V(Loong64F32x4Le) \ -+ V(Loong64I32x4SConvertF32x4) \ -+ V(Loong64I32x4UConvertF32x4) \ -+ V(Loong64I32x4Neg) \ -+ V(Loong64I32x4GtS) \ -+ V(Loong64I32x4GeS) \ -+ V(Loong64I32x4GtU) \ -+ V(Loong64I32x4GeU) \ -+ V(Loong64I32x4Abs) \ -+ V(Loong64I16x8Splat) \ -+ V(Loong64I16x8ExtractLaneU) \ -+ V(Loong64I16x8ExtractLaneS) \ -+ V(Loong64I16x8ReplaceLane) \ -+ V(Loong64I16x8Neg) \ -+ V(Loong64I16x8Shl) \ -+ V(Loong64I16x8ShrS) \ -+ V(Loong64I16x8ShrU) \ -+ V(Loong64I16x8Add) \ -+ V(Loong64I16x8AddSaturateS) \ -+ V(Loong64I16x8AddHoriz) \ -+ V(Loong64I16x8Sub) \ -+ V(Loong64I16x8SubSaturateS) \ -+ V(Loong64I16x8Mul) \ -+ V(Loong64I16x8MaxS) \ -+ V(Loong64I16x8MinS) \ -+ V(Loong64I16x8Eq) \ -+ V(Loong64I16x8Ne) \ -+ V(Loong64I16x8GtS) \ -+ V(Loong64I16x8GeS) \ -+ V(Loong64I16x8AddSaturateU) \ -+ V(Loong64I16x8SubSaturateU) \ -+ V(Loong64I16x8MaxU) \ -+ V(Loong64I16x8MinU) \ -+ V(Loong64I16x8GtU) \ -+ V(Loong64I16x8GeU) \ -+ V(Loong64I16x8RoundingAverageU) \ -+ V(Loong64I16x8Abs) \ -+ V(Loong64I8x16Splat) \ -+ V(Loong64I8x16ExtractLaneU) \ -+ V(Loong64I8x16ExtractLaneS) \ -+ V(Loong64I8x16ReplaceLane) \ -+ V(Loong64I8x16Neg) \ -+ V(Loong64I8x16Shl) \ -+ V(Loong64I8x16ShrS) \ -+ V(Loong64I8x16Add) \ -+ V(Loong64I8x16AddSaturateS) \ -+ V(Loong64I8x16Sub) \ -+ V(Loong64I8x16SubSaturateS) \ -+ V(Loong64I8x16Mul) \ -+ V(Loong64I8x16MaxS) \ -+ V(Loong64I8x16MinS) \ -+ V(Loong64I8x16Eq) \ -+ V(Loong64I8x16Ne) \ -+ V(Loong64I8x16GtS) \ -+ V(Loong64I8x16GeS) \ -+ V(Loong64I8x16ShrU) \ -+ V(Loong64I8x16AddSaturateU) \ -+ V(Loong64I8x16SubSaturateU) \ -+ V(Loong64I8x16MaxU) \ -+ V(Loong64I8x16MinU) \ -+ V(Loong64I8x16GtU) \ -+ V(Loong64I8x16GeU) \ -+ V(Loong64I8x16RoundingAverageU) \ -+ V(Loong64I8x16Abs) \ -+ V(Loong64S128And) \ -+ V(Loong64S128Or) \ -+ V(Loong64S128Xor) \ -+ V(Loong64S128Not) \ -+ V(Loong64S128Select) \ -+ V(Loong64S128AndNot) \ -+ V(Loong64S1x4AnyTrue) \ -+ V(Loong64S1x4AllTrue) \ -+ V(Loong64S1x8AnyTrue) \ -+ V(Loong64S1x8AllTrue) \ -+ V(Loong64S1x16AnyTrue) \ -+ V(Loong64S1x16AllTrue) \ -+ V(Loong64S32x4InterleaveRight) \ -+ V(Loong64S32x4InterleaveLeft) \ -+ V(Loong64S32x4PackEven) \ -+ V(Loong64S32x4PackOdd) \ -+ V(Loong64S32x4InterleaveEven) \ -+ V(Loong64S32x4InterleaveOdd) \ -+ V(Loong64S32x4Shuffle) \ -+ V(Loong64S16x8InterleaveRight) \ -+ V(Loong64S16x8InterleaveLeft) \ -+ V(Loong64S16x8PackEven) \ -+ V(Loong64S16x8PackOdd) \ -+ V(Loong64S16x8InterleaveEven) \ -+ V(Loong64S16x8InterleaveOdd) \ -+ V(Loong64S16x4Reverse) \ -+ V(Loong64S16x2Reverse) \ -+ V(Loong64S8x16InterleaveRight) \ -+ V(Loong64S8x16InterleaveLeft) \ -+ V(Loong64S8x16PackEven) \ -+ V(Loong64S8x16PackOdd) \ -+ V(Loong64S8x16InterleaveEven) \ -+ V(Loong64S8x16InterleaveOdd) \ -+ V(Loong64S8x16Shuffle) \ -+ V(Loong64S8x16Swizzle) \ -+ V(Loong64S8x16Concat) \ -+ V(Loong64S8x8Reverse) \ -+ V(Loong64S8x4Reverse) \ -+ V(Loong64S8x2Reverse) \ -+ V(Loong64S8x16LoadSplat) \ -+ V(Loong64S16x8LoadSplat) \ -+ V(Loong64S32x4LoadSplat) \ -+ V(Loong64S64x2LoadSplat) \ -+ V(Loong64I16x8Load8x8S) \ -+ V(Loong64I16x8Load8x8U) \ -+ V(Loong64I32x4Load16x4S) \ -+ V(Loong64I32x4Load16x4U) \ -+ V(Loong64I64x2Load32x2S) \ -+ V(Loong64I64x2Load32x2U) \ -+ V(Loong64I32x4SConvertI16x8Low) \ -+ V(Loong64I32x4SConvertI16x8High) \ -+ V(Loong64I32x4UConvertI16x8Low) \ -+ V(Loong64I32x4UConvertI16x8High) \ -+ V(Loong64I16x8SConvertI8x16Low) \ -+ V(Loong64I16x8SConvertI8x16High) \ -+ V(Loong64I16x8SConvertI32x4) \ -+ V(Loong64I16x8UConvertI32x4) \ -+ V(Loong64I16x8UConvertI8x16Low) \ -+ V(Loong64I16x8UConvertI8x16High) \ -+ V(Loong64I8x16SConvertI16x8) \ -+ V(Loong64I8x16UConvertI16x8) \ -+ V(Loong64Word64AtomicLoadUint8) \ -+ V(Loong64Word64AtomicLoadUint16) \ -+ V(Loong64Word64AtomicLoadUint32) \ -+ V(Loong64Word64AtomicLoadUint64) \ -+ V(Loong64Word64AtomicStoreWord8) \ -+ V(Loong64Word64AtomicStoreWord16) \ -+ V(Loong64Word64AtomicStoreWord32) \ -+ V(Loong64Word64AtomicStoreWord64) \ -+ V(Loong64Word64AtomicAddUint8) \ -+ V(Loong64Word64AtomicAddUint16) \ -+ V(Loong64Word64AtomicAddUint32) \ -+ V(Loong64Word64AtomicAddUint64) \ -+ V(Loong64Word64AtomicSubUint8) \ -+ V(Loong64Word64AtomicSubUint16) \ -+ V(Loong64Word64AtomicSubUint32) \ -+ V(Loong64Word64AtomicSubUint64) \ -+ V(Loong64Word64AtomicAndUint8) \ -+ V(Loong64Word64AtomicAndUint16) \ -+ V(Loong64Word64AtomicAndUint32) \ -+ V(Loong64Word64AtomicAndUint64) \ -+ V(Loong64Word64AtomicOrUint8) \ -+ V(Loong64Word64AtomicOrUint16) \ -+ V(Loong64Word64AtomicOrUint32) \ -+ V(Loong64Word64AtomicOrUint64) \ -+ V(Loong64Word64AtomicXorUint8) \ -+ V(Loong64Word64AtomicXorUint16) \ -+ V(Loong64Word64AtomicXorUint32) \ -+ V(Loong64Word64AtomicXorUint64) \ -+ V(Loong64Word64AtomicExchangeUint8) \ -+ V(Loong64Word64AtomicExchangeUint16) \ -+ V(Loong64Word64AtomicExchangeUint32) \ -+ V(Loong64Word64AtomicExchangeUint64) \ -+ V(Loong64Word64AtomicCompareExchangeUint8) \ -+ V(Loong64Word64AtomicCompareExchangeUint16) \ -+ V(Loong64Word64AtomicCompareExchangeUint32) \ -+ V(Loong64Word64AtomicCompareExchangeUint64) -+ -+// Addressing modes represent the "shape" of inputs to an instruction. -+// Many instructions support multiple addressing modes. Addressing modes -+// are encoded into the InstructionCode of the instruction and tell the -+// code generator after register allocation which assembler method to call. -+// -+// We use the following local notation for addressing modes: -+// -+// R = register -+// O = register or stack slot -+// D = double register -+// I = immediate (handle, external, int32) -+// MRI = [register + immediate] -+// MRR = [register + register] -+// TODO(plind): Add the new r6 address modes. -+#define TARGET_ADDRESSING_MODE_LIST(V) \ -+ V(MRI) /* [%r0 + K] */ \ -+ V(MRR) /* [%r0 + %r1] */ -+ -+} // namespace compiler -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_ -diff --git a/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc -new file mode 100644 -index 00000000..8437aa25 ---- /dev/null -+++ b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc -@@ -0,0 +1,1537 @@ -+// Copyright 2015 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#include "src/codegen/macro-assembler.h" -+#include "src/compiler/backend/instruction-scheduler.h" -+ -+namespace v8 { -+namespace internal { -+namespace compiler { -+ -+bool InstructionScheduler::SchedulerSupported() { return true; } -+ -+int InstructionScheduler::GetTargetInstructionFlags( -+ const Instruction* instr) const { -+ switch (instr->arch_opcode()) { -+ case kLoong64AbsD: -+ case kLoong64AbsS: -+ case kLoong64Add: -+ case kLoong64AddD: -+ case kLoong64AddS: -+ case kLoong64And: -+ case kLoong64And32: -+ case kLoong64AssertEqual: -+ case kLoong64BitcastDL: -+ case kLoong64BitcastLD: -+ case kLoong64ByteSwap32: -+ case kLoong64ByteSwap64: -+ case kLoong64CeilWD: -+ case kLoong64CeilWS: -+ case kLoong64Clz: -+ case kLoong64Cmp: -+ case kLoong64CmpD: -+ case kLoong64CmpS: -+ case kLoong64Ctz: -+ case kLoong64CvtDL: -+ case kLoong64CvtDS: -+ case kLoong64CvtDUl: -+ case kLoong64CvtDUw: -+ case kLoong64CvtDW: -+ case kLoong64CvtSD: -+ case kLoong64CvtSL: -+ case kLoong64CvtSUl: -+ case kLoong64CvtSUw: -+ case kLoong64CvtSW: -+ case kLoong64DMulHigh: -+ case kLoong64MulHighU: -+ case kLoong64Dadd: -+ case kLoong64DaddOvf: -+ case kLoong64Dclz: -+ case kLoong64Dctz: -+ case kLoong64Ddiv: -+ case kLoong64DdivU: -+ case kLoong64Dext: -+ case kLoong64Dins: -+ case kLoong64Div: -+ case kLoong64DivD: -+ case kLoong64DivS: -+ case kLoong64DivU: -+ case kLoong64Dlsa: -+ case kLoong64Dmod: -+ case kLoong64DmodU: -+ case kLoong64Dmul: -+ case kLoong64Dpopcnt: -+ case kLoong64Dror: -+ case kLoong64Dsar: -+ case kLoong64Dshl: -+ case kLoong64Dshr: -+ case kLoong64Dsub: -+ case kLoong64DsubOvf: -+ case kLoong64Ext: -+ case kLoong64F64x2Abs: -+ case kLoong64F64x2Neg: -+ case kLoong64F64x2Sqrt: -+ case kLoong64F64x2Add: -+ case kLoong64F64x2Sub: -+ case kLoong64F64x2Mul: -+ case kLoong64F64x2Div: -+ case kLoong64F64x2Min: -+ case kLoong64F64x2Max: -+ case kLoong64F64x2Eq: -+ case kLoong64F64x2Ne: -+ case kLoong64F64x2Lt: -+ case kLoong64F64x2Le: -+ case kLoong64I64x2Splat: -+ case kLoong64I64x2ExtractLane: -+ case kLoong64I64x2ReplaceLane: -+ case kLoong64I64x2Add: -+ case kLoong64I64x2Sub: -+ case kLoong64I64x2Mul: -+ case kLoong64I64x2Neg: -+ case kLoong64I64x2Shl: -+ case kLoong64I64x2ShrS: -+ case kLoong64I64x2ShrU: -+ case kLoong64F32x4Abs: -+ case kLoong64F32x4Add: -+ case kLoong64F32x4AddHoriz: -+ case kLoong64F32x4Eq: -+ case kLoong64F32x4ExtractLane: -+ case kLoong64F32x4Lt: -+ case kLoong64F32x4Le: -+ case kLoong64F32x4Max: -+ case kLoong64F32x4Min: -+ case kLoong64F32x4Mul: -+ case kLoong64F32x4Div: -+ case kLoong64F32x4Ne: -+ case kLoong64F32x4Neg: -+ case kLoong64F32x4Sqrt: -+ case kLoong64F32x4RecipApprox: -+ case kLoong64F32x4RecipSqrtApprox: -+ case kLoong64F32x4ReplaceLane: -+ case kLoong64F32x4SConvertI32x4: -+ case kLoong64F32x4Splat: -+ case kLoong64F32x4Sub: -+ case kLoong64F32x4UConvertI32x4: -+ case kLoong64F64x2Splat: -+ case kLoong64F64x2ExtractLane: -+ case kLoong64F64x2ReplaceLane: -+ case kLoong64Float32Max: -+ case kLoong64Float32Min: -+ case kLoong64Float32RoundDown: -+ case kLoong64Float32RoundTiesEven: -+ case kLoong64Float32RoundTruncate: -+ case kLoong64Float32RoundUp: -+ case kLoong64Float64ExtractLowWord32: -+ case kLoong64Float64ExtractHighWord32: -+ case kLoong64Float64InsertLowWord32: -+ case kLoong64Float64InsertHighWord32: -+ case kLoong64Float64Max: -+ case kLoong64Float64Min: -+ case kLoong64Float64RoundDown: -+ case kLoong64Float64RoundTiesEven: -+ case kLoong64Float64RoundTruncate: -+ case kLoong64Float64RoundUp: -+ case kLoong64Float64SilenceNaN: -+ case kLoong64FloorWD: -+ case kLoong64FloorWS: -+ case kLoong64I16x8Add: -+ case kLoong64I16x8AddHoriz: -+ case kLoong64I16x8AddSaturateS: -+ case kLoong64I16x8AddSaturateU: -+ case kLoong64I16x8Eq: -+ case kLoong64I16x8ExtractLaneU: -+ case kLoong64I16x8ExtractLaneS: -+ case kLoong64I16x8GeS: -+ case kLoong64I16x8GeU: -+ case kLoong64I16x8GtS: -+ case kLoong64I16x8GtU: -+ case kLoong64I16x8MaxS: -+ case kLoong64I16x8MaxU: -+ case kLoong64I16x8MinS: -+ case kLoong64I16x8MinU: -+ case kLoong64I16x8Mul: -+ case kLoong64I16x8Ne: -+ case kLoong64I16x8Neg: -+ case kLoong64I16x8ReplaceLane: -+ case kLoong64I8x16SConvertI16x8: -+ case kLoong64I16x8SConvertI32x4: -+ case kLoong64I16x8SConvertI8x16High: -+ case kLoong64I16x8SConvertI8x16Low: -+ case kLoong64I16x8Shl: -+ case kLoong64I16x8ShrS: -+ case kLoong64I16x8ShrU: -+ case kLoong64I16x8Splat: -+ case kLoong64I16x8Sub: -+ case kLoong64I16x8SubSaturateS: -+ case kLoong64I16x8SubSaturateU: -+ case kLoong64I8x16UConvertI16x8: -+ case kLoong64I16x8UConvertI32x4: -+ case kLoong64I16x8UConvertI8x16High: -+ case kLoong64I16x8UConvertI8x16Low: -+ case kLoong64I16x8RoundingAverageU: -+ case kLoong64I16x8Abs: -+ case kLoong64I32x4Add: -+ case kLoong64I32x4AddHoriz: -+ case kLoong64I32x4Eq: -+ case kLoong64I32x4ExtractLane: -+ case kLoong64I32x4GeS: -+ case kLoong64I32x4GeU: -+ case kLoong64I32x4GtS: -+ case kLoong64I32x4GtU: -+ case kLoong64I32x4MaxS: -+ case kLoong64I32x4MaxU: -+ case kLoong64I32x4MinS: -+ case kLoong64I32x4MinU: -+ case kLoong64I32x4Mul: -+ case kLoong64I32x4Ne: -+ case kLoong64I32x4Neg: -+ case kLoong64I32x4ReplaceLane: -+ case kLoong64I32x4SConvertF32x4: -+ case kLoong64I32x4SConvertI16x8High: -+ case kLoong64I32x4SConvertI16x8Low: -+ case kLoong64I32x4Shl: -+ case kLoong64I32x4ShrS: -+ case kLoong64I32x4ShrU: -+ case kLoong64I32x4Splat: -+ case kLoong64I32x4Sub: -+ case kLoong64I32x4UConvertF32x4: -+ case kLoong64I32x4UConvertI16x8High: -+ case kLoong64I32x4UConvertI16x8Low: -+ case kLoong64I32x4Abs: -+ case kLoong64I8x16Add: -+ case kLoong64I8x16AddSaturateS: -+ case kLoong64I8x16AddSaturateU: -+ case kLoong64I8x16Eq: -+ case kLoong64I8x16ExtractLaneU: -+ case kLoong64I8x16ExtractLaneS: -+ case kLoong64I8x16GeS: -+ case kLoong64I8x16GeU: -+ case kLoong64I8x16GtS: -+ case kLoong64I8x16GtU: -+ case kLoong64I8x16MaxS: -+ case kLoong64I8x16MaxU: -+ case kLoong64I8x16MinS: -+ case kLoong64I8x16MinU: -+ case kLoong64I8x16Mul: -+ case kLoong64I8x16Ne: -+ case kLoong64I8x16Neg: -+ case kLoong64I8x16ReplaceLane: -+ case kLoong64I8x16Shl: -+ case kLoong64I8x16ShrS: -+ case kLoong64I8x16ShrU: -+ case kLoong64I8x16Splat: -+ case kLoong64I8x16Sub: -+ case kLoong64I8x16SubSaturateS: -+ case kLoong64I8x16SubSaturateU: -+ case kLoong64I8x16RoundingAverageU: -+ case kLoong64I8x16Abs: -+ case kLoong64Ins: -+ case kLoong64Lsa: -+ case kLoong64MaxD: -+ case kLoong64MaxS: -+ case kLoong64MinD: -+ case kLoong64MinS: -+ case kLoong64Mod: -+ case kLoong64ModU: -+ case kLoong64Mov: -+ case kLoong64Mul: -+ case kLoong64MulD: -+ case kLoong64MulHigh: -+ case kLoong64MulOvf: -+ case kLoong64MulS: -+ case kLoong64NegD: -+ case kLoong64NegS: -+ case kLoong64Nor: -+ case kLoong64Nor32: -+ case kLoong64Or: -+ case kLoong64Or32: -+ case kLoong64Popcnt: -+ case kLoong64Ror: -+ case kLoong64RoundWD: -+ case kLoong64RoundWS: -+ case kLoong64S128And: -+ case kLoong64S128Or: -+ case kLoong64S128Not: -+ case kLoong64S128Select: -+ case kLoong64S128AndNot: -+ case kLoong64S128Xor: -+ case kLoong64S128Zero: -+ case kLoong64S16x8InterleaveEven: -+ case kLoong64S16x8InterleaveOdd: -+ case kLoong64S16x8InterleaveLeft: -+ case kLoong64S16x8InterleaveRight: -+ case kLoong64S16x8PackEven: -+ case kLoong64S16x8PackOdd: -+ case kLoong64S16x2Reverse: -+ case kLoong64S16x4Reverse: -+ case kLoong64S1x16AllTrue: -+ case kLoong64S1x16AnyTrue: -+ case kLoong64S1x4AllTrue: -+ case kLoong64S1x4AnyTrue: -+ case kLoong64S1x8AllTrue: -+ case kLoong64S1x8AnyTrue: -+ case kLoong64S32x4InterleaveEven: -+ case kLoong64S32x4InterleaveOdd: -+ case kLoong64S32x4InterleaveLeft: -+ case kLoong64S32x4InterleaveRight: -+ case kLoong64S32x4PackEven: -+ case kLoong64S32x4PackOdd: -+ case kLoong64S32x4Shuffle: -+ case kLoong64S8x16Concat: -+ case kLoong64S8x16InterleaveEven: -+ case kLoong64S8x16InterleaveOdd: -+ case kLoong64S8x16InterleaveLeft: -+ case kLoong64S8x16InterleaveRight: -+ case kLoong64S8x16PackEven: -+ case kLoong64S8x16PackOdd: -+ case kLoong64S8x2Reverse: -+ case kLoong64S8x4Reverse: -+ case kLoong64S8x8Reverse: -+ case kLoong64S8x16Shuffle: -+ case kLoong64S8x16Swizzle: -+ case kLoong64Sar: -+ case kLoong64Seb: -+ case kLoong64Seh: -+ case kLoong64Shl: -+ case kLoong64Shr: -+ case kLoong64SqrtD: -+ case kLoong64SqrtS: -+ case kLoong64Sub: -+ case kLoong64SubD: -+ case kLoong64SubS: -+ case kLoong64TruncLD: -+ case kLoong64TruncLS: -+ case kLoong64TruncUlD: -+ case kLoong64TruncUlS: -+ case kLoong64TruncUwD: -+ case kLoong64TruncUwS: -+ case kLoong64TruncWD: -+ case kLoong64TruncWS: -+ case kLoong64Tst: -+ case kLoong64Xor: -+ case kLoong64Xor32: -+ return kNoOpcodeFlags; -+ -+ case kLoong64Lb: -+ case kLoong64Lbu: -+ case kLoong64Ld: -+ case kLoong64Ldc1: -+ case kLoong64Lh: -+ case kLoong64Lhu: -+ case kLoong64Lw: -+ case kLoong64Lwc1: -+ case kLoong64Lwu: -+ case kLoong64Peek: -+ case kLoong64Uld: -+ case kLoong64Uldc1: -+ case kLoong64Ulh: -+ case kLoong64Ulhu: -+ case kLoong64Ulw: -+ case kLoong64Ulwu: -+ case kLoong64Ulwc1: -+ case kLoong64S8x16LoadSplat: -+ case kLoong64S16x8LoadSplat: -+ case kLoong64S32x4LoadSplat: -+ case kLoong64S64x2LoadSplat: -+ case kLoong64I16x8Load8x8S: -+ case kLoong64I16x8Load8x8U: -+ case kLoong64I32x4Load16x4S: -+ case kLoong64I32x4Load16x4U: -+ case kLoong64I64x2Load32x2S: -+ case kLoong64I64x2Load32x2U: -+ case kLoong64Word64AtomicLoadUint8: -+ case kLoong64Word64AtomicLoadUint16: -+ case kLoong64Word64AtomicLoadUint32: -+ case kLoong64Word64AtomicLoadUint64: -+ -+ return kIsLoadOperation; -+ -+ case kLoong64ModD: -+ case kLoong64ModS: -+ case kLoong64Push: -+ case kLoong64Sb: -+ case kLoong64Sd: -+ case kLoong64Sdc1: -+ case kLoong64Sh: -+ case kLoong64StackClaim: -+ case kLoong64StoreToStackSlot: -+ case kLoong64Sw: -+ case kLoong64Swc1: -+ case kLoong64Usd: -+ case kLoong64Usdc1: -+ case kLoong64Ush: -+ case kLoong64Usw: -+ case kLoong64Uswc1: -+ case kLoong64Sync: -+ case kLoong64Word64AtomicStoreWord8: -+ case kLoong64Word64AtomicStoreWord16: -+ case kLoong64Word64AtomicStoreWord32: -+ case kLoong64Word64AtomicStoreWord64: -+ case kLoong64Word64AtomicAddUint8: -+ case kLoong64Word64AtomicAddUint16: -+ case kLoong64Word64AtomicAddUint32: -+ case kLoong64Word64AtomicAddUint64: -+ case kLoong64Word64AtomicSubUint8: -+ case kLoong64Word64AtomicSubUint16: -+ case kLoong64Word64AtomicSubUint32: -+ case kLoong64Word64AtomicSubUint64: -+ case kLoong64Word64AtomicAndUint8: -+ case kLoong64Word64AtomicAndUint16: -+ case kLoong64Word64AtomicAndUint32: -+ case kLoong64Word64AtomicAndUint64: -+ case kLoong64Word64AtomicOrUint8: -+ case kLoong64Word64AtomicOrUint16: -+ case kLoong64Word64AtomicOrUint32: -+ case kLoong64Word64AtomicOrUint64: -+ case kLoong64Word64AtomicXorUint8: -+ case kLoong64Word64AtomicXorUint16: -+ case kLoong64Word64AtomicXorUint32: -+ case kLoong64Word64AtomicXorUint64: -+ case kLoong64Word64AtomicExchangeUint8: -+ case kLoong64Word64AtomicExchangeUint16: -+ case kLoong64Word64AtomicExchangeUint32: -+ case kLoong64Word64AtomicExchangeUint64: -+ case kLoong64Word64AtomicCompareExchangeUint8: -+ case kLoong64Word64AtomicCompareExchangeUint16: -+ case kLoong64Word64AtomicCompareExchangeUint32: -+ case kLoong64Word64AtomicCompareExchangeUint64: -+ return kHasSideEffect; -+ -+#define CASE(Name) case k##Name: -+ COMMON_ARCH_OPCODE_LIST(CASE) -+#undef CASE -+ // Already covered in architecture independent code. -+ UNREACHABLE(); -+ } -+ -+ UNREACHABLE(); -+} -+ -+enum Latency { -+ BRANCH = 4, // Estimated max. -+ RINT_S = 4, // Estimated. -+ RINT_D = 4, // Estimated. -+ -+ MULT = 4, -+ MULTU = 4, -+ DMULT = 4, -+ DMULTU = 4, -+ -+ MUL = 7, -+ DMUL = 7, -+ MUH = 7, -+ MUHU = 7, -+ DMUH = 7, -+ DMUHU = 7, -+ -+ DIV = 50, // Min:11 Max:50 -+ DDIV = 50, -+ DIVU = 50, -+ DDIVU = 50, -+ -+ ABS_S = 4, -+ ABS_D = 4, -+ NEG_S = 4, -+ NEG_D = 4, -+ ADD_S = 4, -+ ADD_D = 4, -+ SUB_S = 4, -+ SUB_D = 4, -+ MAX_S = 4, // Estimated. -+ MIN_S = 4, -+ MAX_D = 4, // Estimated. -+ MIN_D = 4, -+ C_cond_S = 4, -+ C_cond_D = 4, -+ MUL_S = 4, -+ -+ MADD_S = 4, -+ MSUB_S = 4, -+ NMADD_S = 4, -+ NMSUB_S = 4, -+ -+ CABS_cond_S = 4, -+ CABS_cond_D = 4, -+ -+ CVT_D_S = 4, -+ CVT_PS_PW = 4, -+ -+ CVT_S_W = 4, -+ CVT_S_L = 4, -+ CVT_D_W = 4, -+ CVT_D_L = 4, -+ -+ CVT_S_D = 4, -+ -+ CVT_W_S = 4, -+ CVT_W_D = 4, -+ CVT_L_S = 4, -+ CVT_L_D = 4, -+ -+ CEIL_W_S = 4, -+ CEIL_W_D = 4, -+ CEIL_L_S = 4, -+ CEIL_L_D = 4, -+ -+ FLOOR_W_S = 4, -+ FLOOR_W_D = 4, -+ FLOOR_L_S = 4, -+ FLOOR_L_D = 4, -+ -+ ROUND_W_S = 4, -+ ROUND_W_D = 4, -+ ROUND_L_S = 4, -+ ROUND_L_D = 4, -+ -+ TRUNC_W_S = 4, -+ TRUNC_W_D = 4, -+ TRUNC_L_S = 4, -+ TRUNC_L_D = 4, -+ -+ MOV_S = 4, -+ MOV_D = 4, -+ -+ MOVF_S = 4, -+ MOVF_D = 4, -+ -+ MOVN_S = 4, -+ MOVN_D = 4, -+ -+ MOVT_S = 4, -+ MOVT_D = 4, -+ -+ MOVZ_S = 4, -+ MOVZ_D = 4, -+ -+ MUL_D = 5, -+ MADD_D = 5, -+ MSUB_D = 5, -+ NMADD_D = 5, -+ NMSUB_D = 5, -+ -+ RECIP_S = 13, -+ RECIP_D = 26, -+ -+ RSQRT_S = 17, -+ RSQRT_D = 36, -+ -+ DIV_S = 17, -+ SQRT_S = 17, -+ -+ DIV_D = 32, -+ SQRT_D = 32, -+ -+ MTC1 = 4, -+ MTHC1 = 4, -+ DMTC1 = 4, -+ LWC1 = 4, -+ LDC1 = 4, -+ -+ MFC1 = 1, -+ MFHC1 = 1, -+ DMFC1 = 1, -+ MFHI = 1, -+ MFLO = 1, -+ SWC1 = 1, -+ SDC1 = 1, -+}; -+ -+int DadduLatency(bool is_operand_register = true) { -+ if (is_operand_register) { -+ return 1; -+ } else { -+ return 2; // Estimated max. -+ } -+} -+ -+int DsubuLatency(bool is_operand_register = true) { -+ return DadduLatency(is_operand_register); -+} -+ -+int AndLatency(bool is_operand_register = true) { -+ return DadduLatency(is_operand_register); -+} -+ -+int OrLatency(bool is_operand_register = true) { -+ return DadduLatency(is_operand_register); -+} -+ -+int NorLatency(bool is_operand_register = true) { -+ if (is_operand_register) { -+ return 1; -+ } else { -+ return 2; // Estimated max. -+ } -+} -+ -+int XorLatency(bool is_operand_register = true) { -+ return DadduLatency(is_operand_register); -+} -+ -+int MulLatency(bool is_operand_register = true) { -+ if (is_operand_register) { -+ return Latency::MUL; -+ } else { -+ return Latency::MUL + 1; -+ } -+} -+ -+int DmulLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = Latency::DMUL; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int MulhLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = Latency::MUH; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int MulhuLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = Latency::MUH; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int DMulhLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = Latency::DMUH; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int DivLatency(bool is_operand_register = true) { -+ if (is_operand_register) { -+ return Latency::DIV; -+ } else { -+ return Latency::DIV + 1; -+ } -+} -+ -+int DivuLatency(bool is_operand_register = true) { -+ if (is_operand_register) { -+ return Latency::DIVU; -+ } else { -+ return Latency::DIVU + 1; -+ } -+} -+ -+int DdivLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = Latency::DDIV; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int DdivuLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = Latency::DDIVU; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int ModLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = 1; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int ModuLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = 1; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int DmodLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = 1; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int DmoduLatency(bool is_operand_register = true) { -+ int latency = 0; -+ latency = 1; -+ if (!is_operand_register) { -+ latency += 1; -+ } -+ return latency; -+} -+ -+int MovzLatency() { return Latency::BRANCH + 1; } -+ -+int MovnLatency() { return Latency::BRANCH + 1; } -+ -+int DlsaLatency() { -+ // Estimated max. -+ return DadduLatency() + 1; -+} -+ -+int CallLatency() { -+ // Estimated. -+ return DadduLatency(false) + Latency::BRANCH + 5; -+} -+ -+int JumpLatency() { -+ // Estimated max. -+ return 1 + DadduLatency() + Latency::BRANCH + 2; -+} -+ -+int SmiUntagLatency() { return 1; } -+ -+int PrepareForTailCallLatency() { -+ // Estimated max. -+ return 2 * (DlsaLatency() + DadduLatency(false)) + 2 + Latency::BRANCH + -+ Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1; -+} -+ -+int AssemblePopArgumentsAdoptFrameLatency() { -+ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() + -+ PrepareForTailCallLatency(); -+} -+ -+int AssertLatency() { return 1; } -+ -+int PrepareCallCFunctionLatency() { -+ int frame_alignment = TurboAssembler::ActivationFrameAlignment(); -+ if (frame_alignment > kSystemPointerSize) { -+ return 1 + DsubuLatency(false) + AndLatency(false) + 1; -+ } else { -+ return DsubuLatency(false); -+ } -+} -+ -+int AdjustBaseAndOffsetLatency() { -+ return 3; // Estimated max. -+} -+ -+int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; } -+ -+int UlhuLatency() { return AlignedMemoryLatency(); } -+ -+int UlwLatency() { return AlignedMemoryLatency(); } -+ -+int UlwuLatency() { return AlignedMemoryLatency(); } -+ -+int UldLatency() { return AlignedMemoryLatency(); } -+ -+int Ulwc1Latency() { return AlignedMemoryLatency(); } -+ -+int Uldc1Latency() { return AlignedMemoryLatency(); } -+ -+int UshLatency() { return AlignedMemoryLatency(); } -+ -+int UswLatency() { return AlignedMemoryLatency(); } -+ -+int UsdLatency() { return AlignedMemoryLatency(); } -+ -+int Uswc1Latency() { return AlignedMemoryLatency(); } -+ -+int Usdc1Latency() { return AlignedMemoryLatency(); } -+ -+int Lwc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LWC1; } -+ -+int Swc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SWC1; } -+ -+int Sdc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SDC1; } -+ -+int Ldc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LDC1; } -+ -+int MultiPushLatency() { -+ int latency = DsubuLatency(false); -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ latency++; -+ } -+ return latency; -+} -+ -+int MultiPushFPULatency() { -+ int latency = DsubuLatency(false); -+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { -+ latency += Sdc1Latency(); -+ } -+ return latency; -+} -+ -+int PushCallerSavedLatency(SaveFPRegsMode fp_mode) { -+ int latency = MultiPushLatency(); -+ if (fp_mode == kSaveFPRegs) { -+ latency += MultiPushFPULatency(); -+ } -+ return latency; -+} -+ -+int MultiPopLatency() { -+ int latency = DadduLatency(false); -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ latency++; -+ } -+ return latency; -+} -+ -+int MultiPopFPULatency() { -+ int latency = DadduLatency(false); -+ for (int16_t i = 0; i < kNumRegisters; i++) { -+ latency += Ldc1Latency(); -+ } -+ return latency; -+} -+ -+int PopCallerSavedLatency(SaveFPRegsMode fp_mode) { -+ int latency = MultiPopLatency(); -+ if (fp_mode == kSaveFPRegs) { -+ latency += MultiPopFPULatency(); -+ } -+ return latency; -+} -+ -+int CallCFunctionHelperLatency() { -+ // Estimated. -+ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency(); -+ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { -+ latency++; -+ } else { -+ latency += DadduLatency(false); -+ } -+ return latency; -+} -+ -+int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); } -+ -+int AssembleArchJumpLatency() { -+ // Estimated max. -+ return Latency::BRANCH; -+} -+ -+int GenerateSwitchTableLatency() { -+ int latency = 0; -+ latency = DlsaLatency() + 2; -+ latency += 2; -+ return latency; -+} -+ -+int AssembleArchTableSwitchLatency() { -+ return Latency::BRANCH + GenerateSwitchTableLatency(); -+} -+ -+int DropAndRetLatency() { -+ // Estimated max. -+ return DadduLatency(false) + JumpLatency(); -+} -+ -+int AssemblerReturnLatency() { -+ // Estimated max. -+ return DadduLatency(false) + MultiPopLatency() + MultiPopFPULatency() + -+ Latency::BRANCH + DadduLatency() + 1 + DropAndRetLatency(); -+} -+ -+int TryInlineTruncateDoubleToILatency() { -+ return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) + -+ Latency::BRANCH; -+} -+ -+int CallStubDelayedLatency() { return 1 + CallLatency(); } -+ -+int TruncateDoubleToIDelayedLatency() { -+ // TODO(loong64): This no longer reflects how TruncateDoubleToI is called. -+ return TryInlineTruncateDoubleToILatency() + 1 + DsubuLatency(false) + -+ Sdc1Latency() + CallStubDelayedLatency() + DadduLatency(false) + 1; -+} -+ -+int CheckPageFlagLatency() { -+ return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) + -+ Latency::BRANCH; -+} -+ -+int SltuLatency(bool is_operand_register = true) { -+ if (is_operand_register) { -+ return 1; -+ } else { -+ return 2; // Estimated max. -+ } -+} -+ -+int BranchShortHelperLatency() { -+ return 2; // Estimated max. -+} -+ -+int BranchShortLatency() { return BranchShortHelperLatency(); } -+ -+int MoveLatency() { return 1; } -+ -+int MovToFloatParametersLatency() { return 2 * MoveLatency(); } -+ -+int MovFromFloatResultLatency() { return MoveLatency(); } -+ -+int DaddOverflowLatency() { -+ // Estimated max. -+ return 6; -+} -+ -+int DsubOverflowLatency() { -+ // Estimated max. -+ return 6; -+} -+ -+int MulOverflowLatency() { -+ // Estimated max. -+ return MulLatency() + MulhLatency() + 2; -+} -+ -+int DclzLatency() { return 1; } -+ -+int CtzLatency() { return 3 + DclzLatency(); } -+ -+int DctzLatency() { return 4; } -+ -+int PopcntLatency() { -+ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 + -+ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() + -+ 1 + MulLatency() + 1; -+} -+ -+int DpopcntLatency() { -+ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 + -+ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() + -+ 1 + DmulLatency() + 1; -+} -+ -+int CompareFLatency() { return Latency::C_cond_S; } -+ -+int CompareF32Latency() { return CompareFLatency(); } -+ -+int CompareF64Latency() { return CompareFLatency(); } -+ -+int CompareIsNanFLatency() { return CompareFLatency(); } -+ -+int CompareIsNanF32Latency() { return CompareIsNanFLatency(); } -+ -+int CompareIsNanF64Latency() { return CompareIsNanFLatency(); } -+ -+int NegsLatency() { return Latency::NEG_S; } -+ -+int NegdLatency() { return Latency::NEG_D; } -+ -+int Float64RoundLatency() { return Latency::RINT_D + 4; } -+ -+int Float32RoundLatency() { return Latency::RINT_S + 4; } -+ -+int Float32MaxLatency() { -+ // Estimated max. -+ int latency = CompareIsNanF32Latency() + Latency::BRANCH; -+ return latency + Latency::MAX_S; -+} -+ -+int Float64MaxLatency() { -+ // Estimated max. -+ int latency = CompareIsNanF64Latency() + Latency::BRANCH; -+ return latency + Latency::MAX_D; -+} -+ -+int Float32MinLatency() { -+ // Estimated max. -+ int latency = CompareIsNanF32Latency() + Latency::BRANCH; -+ return latency + Latency::MIN_S; -+} -+ -+int Float64MinLatency() { -+ // Estimated max. -+ int latency = CompareIsNanF64Latency() + Latency::BRANCH; -+ return latency + Latency::MIN_D; -+} -+ -+int TruncLSLatency(bool load_status) { -+ int latency = Latency::TRUNC_L_S + Latency::DMFC1; -+ if (load_status) { -+ latency += SltuLatency() + 7; -+ } -+ return latency; -+} -+ -+int TruncLDLatency(bool load_status) { -+ int latency = Latency::TRUNC_L_D + Latency::DMFC1; -+ if (load_status) { -+ latency += SltuLatency() + 7; -+ } -+ return latency; -+} -+ -+int TruncUlSLatency() { -+ // Estimated max. -+ return 2 * CompareF32Latency() + CompareIsNanF32Latency() + -+ 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S + -+ 3 * Latency::DMFC1 + OrLatency() + Latency::MTC1 + Latency::MOV_S + -+ SltuLatency() + 4; -+} -+ -+int TruncUlDLatency() { -+ // Estimated max. -+ return 2 * CompareF64Latency() + CompareIsNanF64Latency() + -+ 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D + -+ 3 * Latency::DMFC1 + OrLatency() + Latency::DMTC1 + Latency::MOV_D + -+ SltuLatency() + 4; -+} -+ -+int PushLatency() { return DadduLatency() + AlignedMemoryLatency(); } -+ -+int ByteSwapSignedLatency() { return 2; } -+ -+int LlLatency(int offset) { -+ bool is_one_instruction = is_int14(offset); -+ if (is_one_instruction) { -+ return 1; -+ } else { -+ return 3; -+ } -+} -+ -+int ExtractBitsLatency(bool sign_extend, int size) { -+ int latency = 2; -+ if (sign_extend) { -+ switch (size) { -+ case 8: -+ case 16: -+ case 32: -+ latency += 1; -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+ return latency; -+} -+ -+int InsertBitsLatency() { return 2 + DsubuLatency(false) + 2; } -+ -+int ScLatency(int offset) { -+ bool is_one_instruction = is_int14(offset); -+ if (is_one_instruction) { -+ return 1; -+ } else { -+ return 3; -+ } -+} -+ -+int Word32AtomicExchangeLatency(bool sign_extend, int size) { -+ return DadduLatency(false) + 1 + DsubuLatency() + 2 + LlLatency(0) + -+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + -+ ScLatency(0) + BranchShortLatency() + 1; -+} -+ -+int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { -+ return 2 + DsubuLatency() + 2 + LlLatency(0) + -+ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + -+ ScLatency(0) + BranchShortLatency() + 1; -+} -+ -+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { -+ // Basic latency modeling for LOONG64 instructions. They have been determined -+ // in empirical way. -+ switch (instr->arch_opcode()) { -+ case kArchCallCodeObject: -+ case kArchCallWasmFunction: -+ return CallLatency(); -+ case kArchTailCallCodeObjectFromJSFunction: -+ case kArchTailCallCodeObject: { -+ int latency = 0; -+ if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) { -+ latency = AssemblePopArgumentsAdoptFrameLatency(); -+ } -+ return latency + JumpLatency(); -+ } -+ case kArchTailCallWasm: -+ case kArchTailCallAddress: -+ return JumpLatency(); -+ case kArchCallJSFunction: { -+ int latency = 0; -+ if (FLAG_debug_code) { -+ latency = 1 + AssertLatency(); -+ } -+ return latency + 1 + DadduLatency(false) + CallLatency(); -+ } -+ case kArchPrepareCallCFunction: -+ return PrepareCallCFunctionLatency(); -+ case kArchSaveCallerRegisters: { -+ auto fp_mode = -+ static_cast(MiscField::decode(instr->opcode())); -+ return PushCallerSavedLatency(fp_mode); -+ } -+ case kArchRestoreCallerRegisters: { -+ auto fp_mode = -+ static_cast(MiscField::decode(instr->opcode())); -+ return PopCallerSavedLatency(fp_mode); -+ } -+ case kArchPrepareTailCall: -+ return 2; -+ case kArchCallCFunction: -+ return CallCFunctionLatency(); -+ case kArchJmp: -+ return AssembleArchJumpLatency(); -+ case kArchTableSwitch: -+ return AssembleArchTableSwitchLatency(); -+ case kArchAbortCSAAssert: -+ return CallLatency() + 1; -+ case kArchDebugBreak: -+ return 1; -+ case kArchComment: -+ case kArchNop: -+ case kArchThrowTerminator: -+ case kArchDeoptimize: -+ return 0; -+ case kArchRet: -+ return AssemblerReturnLatency(); -+ case kArchFramePointer: -+ return 1; -+ case kArchParentFramePointer: -+ // Estimated max. -+ return AlignedMemoryLatency(); -+ case kArchTruncateDoubleToI: -+ return TruncateDoubleToIDelayedLatency(); -+ case kArchStoreWithWriteBarrier: -+ return DadduLatency() + 1 + CheckPageFlagLatency(); -+ case kArchStackSlot: -+ // Estimated max. -+ return DadduLatency(false) + AndLatency(false) + AssertLatency() + -+ DadduLatency(false) + AndLatency(false) + BranchShortLatency() + -+ 1 + DsubuLatency() + DadduLatency(); -+ case kArchWordPoisonOnSpeculation: -+ return AndLatency(); -+ case kIeee754Float64Acos: -+ case kIeee754Float64Acosh: -+ case kIeee754Float64Asin: -+ case kIeee754Float64Asinh: -+ case kIeee754Float64Atan: -+ case kIeee754Float64Atanh: -+ case kIeee754Float64Atan2: -+ case kIeee754Float64Cos: -+ case kIeee754Float64Cosh: -+ case kIeee754Float64Cbrt: -+ case kIeee754Float64Exp: -+ case kIeee754Float64Expm1: -+ case kIeee754Float64Log: -+ case kIeee754Float64Log1p: -+ case kIeee754Float64Log10: -+ case kIeee754Float64Log2: -+ case kIeee754Float64Pow: -+ case kIeee754Float64Sin: -+ case kIeee754Float64Sinh: -+ case kIeee754Float64Tan: -+ case kIeee754Float64Tanh: -+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + -+ CallCFunctionLatency() + MovFromFloatResultLatency(); -+ case kLoong64Add: -+ case kLoong64Dadd: -+ return DadduLatency(instr->InputAt(1)->IsRegister()); -+ case kLoong64DaddOvf: -+ return DaddOverflowLatency(); -+ case kLoong64Sub: -+ case kLoong64Dsub: -+ return DsubuLatency(instr->InputAt(1)->IsRegister()); -+ case kLoong64DsubOvf: -+ return DsubOverflowLatency(); -+ case kLoong64Mul: -+ return MulLatency(); -+ case kLoong64MulOvf: -+ return MulOverflowLatency(); -+ case kLoong64MulHigh: -+ return MulhLatency(); -+ case kLoong64MulHighU: -+ return MulhuLatency(); -+ case kLoong64DMulHigh: -+ return DMulhLatency(); -+ case kLoong64Div: { -+ int latency = DivLatency(instr->InputAt(1)->IsRegister()); -+ return latency++; -+ } -+ case kLoong64DivU: { -+ int latency = DivuLatency(instr->InputAt(1)->IsRegister()); -+ return latency++; -+ } -+ case kLoong64Mod: -+ return ModLatency(); -+ case kLoong64ModU: -+ return ModuLatency(); -+ case kLoong64Dmul: -+ return DmulLatency(); -+ case kLoong64Ddiv: { -+ int latency = DdivLatency(); -+ return latency++; -+ } -+ case kLoong64DdivU: { -+ int latency = DdivuLatency(); -+ return latency++; -+ } -+ case kLoong64Dmod: -+ return DmodLatency(); -+ case kLoong64DmodU: -+ return DmoduLatency(); -+ case kLoong64Dlsa: -+ case kLoong64Lsa: -+ return DlsaLatency(); -+ case kLoong64And: -+ return AndLatency(instr->InputAt(1)->IsRegister()); -+ case kLoong64And32: { -+ bool is_operand_register = instr->InputAt(1)->IsRegister(); -+ int latency = AndLatency(is_operand_register); -+ if (is_operand_register) { -+ return latency + 2; -+ } else { -+ return latency + 1; -+ } -+ } -+ case kLoong64Or: -+ return OrLatency(instr->InputAt(1)->IsRegister()); -+ case kLoong64Or32: { -+ bool is_operand_register = instr->InputAt(1)->IsRegister(); -+ int latency = OrLatency(is_operand_register); -+ if (is_operand_register) { -+ return latency + 2; -+ } else { -+ return latency + 1; -+ } -+ } -+ case kLoong64Nor: -+ return NorLatency(instr->InputAt(1)->IsRegister()); -+ case kLoong64Nor32: { -+ bool is_operand_register = instr->InputAt(1)->IsRegister(); -+ int latency = NorLatency(is_operand_register); -+ if (is_operand_register) { -+ return latency + 2; -+ } else { -+ return latency + 1; -+ } -+ } -+ case kLoong64Xor: -+ return XorLatency(instr->InputAt(1)->IsRegister()); -+ case kLoong64Xor32: { -+ bool is_operand_register = instr->InputAt(1)->IsRegister(); -+ int latency = XorLatency(is_operand_register); -+ if (is_operand_register) { -+ return latency + 2; -+ } else { -+ return latency + 1; -+ } -+ } -+ case kLoong64Clz: -+ case kLoong64Dclz: -+ return DclzLatency(); -+ case kLoong64Ctz: -+ return CtzLatency(); -+ case kLoong64Dctz: -+ return DctzLatency(); -+ case kLoong64Popcnt: -+ return PopcntLatency(); -+ case kLoong64Dpopcnt: -+ return DpopcntLatency(); -+ case kLoong64Shl: -+ return 1; -+ case kLoong64Shr: -+ case kLoong64Sar: -+ return 2; -+ case kLoong64Ext: -+ case kLoong64Ins: -+ case kLoong64Dext: -+ case kLoong64Dins: -+ case kLoong64Dshl: -+ case kLoong64Dshr: -+ case kLoong64Dsar: -+ case kLoong64Ror: -+ case kLoong64Dror: -+ return 1; -+ case kLoong64Tst: -+ return AndLatency(instr->InputAt(1)->IsRegister()); -+ case kLoong64Mov: -+ return 1; -+ case kLoong64CmpS: -+ return MoveLatency() + CompareF32Latency(); -+ case kLoong64AddS: -+ return Latency::ADD_S; -+ case kLoong64SubS: -+ return Latency::SUB_S; -+ case kLoong64MulS: -+ return Latency::MUL_S; -+ case kLoong64DivS: -+ return Latency::DIV_S; -+ case kLoong64ModS: -+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + -+ CallCFunctionLatency() + MovFromFloatResultLatency(); -+ case kLoong64AbsS: -+ return Latency::ABS_S; -+ case kLoong64NegS: -+ return NegdLatency(); -+ case kLoong64SqrtS: -+ return Latency::SQRT_S; -+ case kLoong64MaxS: -+ return Latency::MAX_S; -+ case kLoong64MinS: -+ return Latency::MIN_S; -+ case kLoong64CmpD: -+ return MoveLatency() + CompareF64Latency(); -+ case kLoong64AddD: -+ return Latency::ADD_D; -+ case kLoong64SubD: -+ return Latency::SUB_D; -+ case kLoong64MulD: -+ return Latency::MUL_D; -+ case kLoong64DivD: -+ return Latency::DIV_D; -+ case kLoong64ModD: -+ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + -+ CallCFunctionLatency() + MovFromFloatResultLatency(); -+ case kLoong64AbsD: -+ return Latency::ABS_D; -+ case kLoong64NegD: -+ return NegdLatency(); -+ case kLoong64SqrtD: -+ return Latency::SQRT_D; -+ case kLoong64MaxD: -+ return Latency::MAX_D; -+ case kLoong64MinD: -+ return Latency::MIN_D; -+ case kLoong64Float64RoundDown: -+ case kLoong64Float64RoundTruncate: -+ case kLoong64Float64RoundUp: -+ case kLoong64Float64RoundTiesEven: -+ return Float64RoundLatency(); -+ case kLoong64Float32RoundDown: -+ case kLoong64Float32RoundTruncate: -+ case kLoong64Float32RoundUp: -+ case kLoong64Float32RoundTiesEven: -+ return Float32RoundLatency(); -+ case kLoong64Float32Max: -+ return Float32MaxLatency(); -+ case kLoong64Float64Max: -+ return Float64MaxLatency(); -+ case kLoong64Float32Min: -+ return Float32MinLatency(); -+ case kLoong64Float64Min: -+ return Float64MinLatency(); -+ case kLoong64Float64SilenceNaN: -+ return Latency::SUB_D; -+ case kLoong64CvtSD: -+ return Latency::CVT_S_D; -+ case kLoong64CvtDS: -+ return Latency::CVT_D_S; -+ case kLoong64CvtDW: -+ return Latency::MTC1 + Latency::CVT_D_W; -+ case kLoong64CvtSW: -+ return Latency::MTC1 + Latency::CVT_S_W; -+ case kLoong64CvtSUw: -+ return 1 + Latency::DMTC1 + Latency::CVT_S_L; -+ case kLoong64CvtSL: -+ return Latency::DMTC1 + Latency::CVT_S_L; -+ case kLoong64CvtDL: -+ return Latency::DMTC1 + Latency::CVT_D_L; -+ case kLoong64CvtDUw: -+ return 1 + Latency::DMTC1 + Latency::CVT_D_L; -+ case kLoong64CvtDUl: -+ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 + -+ 2 * Latency::CVT_D_L + Latency::ADD_D; -+ case kLoong64CvtSUl: -+ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 + -+ 2 * Latency::CVT_S_L + Latency::ADD_S; -+ case kLoong64FloorWD: -+ return Latency::FLOOR_W_D + Latency::MFC1; -+ case kLoong64CeilWD: -+ return Latency::CEIL_W_D + Latency::MFC1; -+ case kLoong64RoundWD: -+ return Latency::ROUND_W_D + Latency::MFC1; -+ case kLoong64TruncWD: -+ return Latency::TRUNC_W_D + Latency::MFC1; -+ case kLoong64FloorWS: -+ return Latency::FLOOR_W_S + Latency::MFC1; -+ case kLoong64CeilWS: -+ return Latency::CEIL_W_S + Latency::MFC1; -+ case kLoong64RoundWS: -+ return Latency::ROUND_W_S + Latency::MFC1; -+ case kLoong64TruncWS: -+ return Latency::TRUNC_W_S + Latency::MFC1 + 2 + MovnLatency(); -+ case kLoong64TruncLS: -+ return TruncLSLatency(instr->OutputCount() > 1); -+ case kLoong64TruncLD: -+ return TruncLDLatency(instr->OutputCount() > 1); -+ case kLoong64TruncUwD: -+ // Estimated max. -+ return CompareF64Latency() + 2 * Latency::BRANCH + -+ 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() + -+ Latency::MTC1 + Latency::MFC1 + Latency::MTHC1 + 1; -+ case kLoong64TruncUwS: -+ // Estimated max. -+ return CompareF32Latency() + 2 * Latency::BRANCH + -+ 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() + -+ Latency::MTC1 + 2 * Latency::MFC1 + 2 + MovzLatency(); -+ case kLoong64TruncUlS: -+ return TruncUlSLatency(); -+ case kLoong64TruncUlD: -+ return TruncUlDLatency(); -+ case kLoong64BitcastDL: -+ return Latency::DMFC1; -+ case kLoong64BitcastLD: -+ return Latency::DMTC1; -+ case kLoong64Float64ExtractLowWord32: -+ return Latency::MFC1; -+ case kLoong64Float64InsertLowWord32: -+ return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1; -+ case kLoong64Float64ExtractHighWord32: -+ return Latency::MFHC1; -+ case kLoong64Float64InsertHighWord32: -+ return Latency::MTHC1; -+ case kLoong64Seb: -+ case kLoong64Seh: -+ return 1; -+ case kLoong64Lbu: -+ case kLoong64Lb: -+ case kLoong64Lhu: -+ case kLoong64Lh: -+ case kLoong64Lwu: -+ case kLoong64Lw: -+ case kLoong64Ld: -+ case kLoong64Sb: -+ case kLoong64Sh: -+ case kLoong64Sw: -+ case kLoong64Sd: -+ return AlignedMemoryLatency(); -+ case kLoong64Lwc1: -+ return Lwc1Latency(); -+ case kLoong64Ldc1: -+ return Ldc1Latency(); -+ case kLoong64Swc1: -+ return Swc1Latency(); -+ case kLoong64Sdc1: -+ return Sdc1Latency(); -+ case kLoong64Ulhu: -+ case kLoong64Ulh: -+ return UlhuLatency(); -+ case kLoong64Ulwu: -+ return UlwuLatency(); -+ case kLoong64Ulw: -+ return UlwLatency(); -+ case kLoong64Uld: -+ return UldLatency(); -+ case kLoong64Ulwc1: -+ return Ulwc1Latency(); -+ case kLoong64Uldc1: -+ return Uldc1Latency(); -+ case kLoong64Ush: -+ return UshLatency(); -+ case kLoong64Usw: -+ return UswLatency(); -+ case kLoong64Usd: -+ return UsdLatency(); -+ case kLoong64Uswc1: -+ return Uswc1Latency(); -+ case kLoong64Usdc1: -+ return Usdc1Latency(); -+ case kLoong64Push: { -+ int latency = 0; -+ if (instr->InputAt(0)->IsFPRegister()) { -+ latency = Sdc1Latency() + DsubuLatency(false); -+ } else { -+ latency = PushLatency(); -+ } -+ return latency; -+ } -+ case kLoong64Peek: { -+ int latency = 0; -+ if (instr->OutputAt(0)->IsFPRegister()) { -+ auto op = LocationOperand::cast(instr->OutputAt(0)); -+ switch (op->representation()) { -+ case MachineRepresentation::kFloat64: -+ latency = Ldc1Latency(); -+ break; -+ case MachineRepresentation::kFloat32: -+ latency = Latency::LWC1; -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } else { -+ latency = AlignedMemoryLatency(); -+ } -+ return latency; -+ } -+ case kLoong64StackClaim: -+ return DsubuLatency(false); -+ case kLoong64StoreToStackSlot: { -+ int latency = 0; -+ if (instr->InputAt(0)->IsFPRegister()) { -+ if (instr->InputAt(0)->IsSimd128Register()) { -+ latency = 1; // Estimated value. -+ } else { -+ latency = Sdc1Latency(); -+ } -+ } else { -+ latency = AlignedMemoryLatency(); -+ } -+ return latency; -+ } -+ case kLoong64ByteSwap64: -+ return ByteSwapSignedLatency(); -+ case kLoong64ByteSwap32: -+ return ByteSwapSignedLatency(); -+ case kWord32AtomicLoadInt8: -+ case kWord32AtomicLoadUint8: -+ case kWord32AtomicLoadInt16: -+ case kWord32AtomicLoadUint16: -+ case kWord32AtomicLoadWord32: -+ return 2; -+ case kWord32AtomicStoreWord8: -+ case kWord32AtomicStoreWord16: -+ case kWord32AtomicStoreWord32: -+ return 3; -+ case kWord32AtomicExchangeInt8: -+ return Word32AtomicExchangeLatency(true, 8); -+ case kWord32AtomicExchangeUint8: -+ return Word32AtomicExchangeLatency(false, 8); -+ case kWord32AtomicExchangeInt16: -+ return Word32AtomicExchangeLatency(true, 16); -+ case kWord32AtomicExchangeUint16: -+ return Word32AtomicExchangeLatency(false, 16); -+ case kWord32AtomicExchangeWord32: -+ return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1; -+ case kWord32AtomicCompareExchangeInt8: -+ return Word32AtomicCompareExchangeLatency(true, 8); -+ case kWord32AtomicCompareExchangeUint8: -+ return Word32AtomicCompareExchangeLatency(false, 8); -+ case kWord32AtomicCompareExchangeInt16: -+ return Word32AtomicCompareExchangeLatency(true, 16); -+ case kWord32AtomicCompareExchangeUint16: -+ return Word32AtomicCompareExchangeLatency(false, 16); -+ case kWord32AtomicCompareExchangeWord32: -+ return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) + -+ BranchShortLatency() + 1; -+ case kLoong64AssertEqual: -+ return AssertLatency(); -+ default: -+ return 1; -+ } -+} -+ -+} // namespace compiler -+} // namespace internal -+} // namespace v8 -diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc -new file mode 100644 -index 00000000..deb7d220 ---- /dev/null -+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc -@@ -0,0 +1,3101 @@ -+// Copyright 2014 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#include "src/base/bits.h" -+#include "src/compiler/backend/instruction-selector-impl.h" -+#include "src/compiler/node-matchers.h" -+#include "src/compiler/node-properties.h" -+ -+namespace v8 { -+namespace internal { -+namespace compiler { -+ -+#define TRACE_UNIMPL() \ -+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) -+ -+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) -+ -+// Adds loong64-specific methods for generating InstructionOperands. -+class Loong64OperandGenerator final : public OperandGenerator { -+ public: -+ explicit Loong64OperandGenerator(InstructionSelector* selector) -+ : OperandGenerator(selector) {} -+ -+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) { -+ if (CanBeImmediate(node, opcode)) { -+ return UseImmediate(node); -+ } -+ return UseRegister(node); -+ } -+ -+ // Use the zero register if the node has the immediate value zero, otherwise -+ // assign a register. -+ InstructionOperand UseRegisterOrImmediateZero(Node* node) { -+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) || -+ (IsFloatConstant(node) && -+ (bit_cast(GetFloatConstantValue(node)) == 0))) { -+ return UseImmediate(node); -+ } -+ return UseRegister(node); -+ } -+ -+ bool IsIntegerConstant(Node* node) { -+ return (node->opcode() == IrOpcode::kInt32Constant) || -+ (node->opcode() == IrOpcode::kInt64Constant); -+ } -+ -+ int64_t GetIntegerConstantValue(Node* node) { -+ if (node->opcode() == IrOpcode::kInt32Constant) { -+ return OpParameter(node->op()); -+ } -+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode()); -+ return OpParameter(node->op()); -+ } -+ -+ bool IsFloatConstant(Node* node) { -+ return (node->opcode() == IrOpcode::kFloat32Constant) || -+ (node->opcode() == IrOpcode::kFloat64Constant); -+ } -+ -+ double GetFloatConstantValue(Node* node) { -+ if (node->opcode() == IrOpcode::kFloat32Constant) { -+ return OpParameter(node->op()); -+ } -+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode()); -+ return OpParameter(node->op()); -+ } -+ -+ bool CanBeImmediate(Node* node, InstructionCode mode) { -+ return IsIntegerConstant(node) && -+ CanBeImmediate(GetIntegerConstantValue(node), mode); -+ } -+ -+ bool CanBeImmediate(int64_t value, InstructionCode opcode) { -+ switch (ArchOpcodeField::decode(opcode)) { -+ case kLoong64Shl: -+ case kLoong64Sar: -+ case kLoong64Shr: -+ return is_uint5(value); -+ case kLoong64Dshl: -+ case kLoong64Dsar: -+ case kLoong64Dshr: -+ return is_uint6(value); -+ case kLoong64Add: -+ case kLoong64And32: -+ case kLoong64And: -+ case kLoong64Dadd: -+ case kLoong64Or32: -+ case kLoong64Or: -+ case kLoong64Tst: -+ case kLoong64Xor: -+ return is_uint12(value); -+ case kLoong64Lb: -+ case kLoong64Lbu: -+ case kLoong64Sb: -+ case kLoong64Lh: -+ case kLoong64Lhu: -+ case kLoong64Sh: -+ case kLoong64Lw: -+ case kLoong64Sw: -+ case kLoong64Ld: -+ case kLoong64Sd: -+ case kLoong64Lwc1: -+ case kLoong64Swc1: -+ case kLoong64Ldc1: -+ case kLoong64Sdc1: -+ return is_int12(value); -+ default: -+ return is_int12(value); -+ } -+ } -+ -+ private: -+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { -+ TRACE_UNIMPL(); -+ return false; -+ } -+}; -+ -+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, -+ Node* node) { -+ Loong64OperandGenerator g(selector); -+ selector->Emit(opcode, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0))); -+} -+ -+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, -+ Node* node) { -+ Loong64OperandGenerator g(selector); -+ int32_t imm = OpParameter(node->op()); -+ selector->Emit(opcode, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); -+} -+ -+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode, -+ Node* node) { -+ Loong64OperandGenerator g(selector); -+ if (g.IsIntegerConstant(node->InputAt(1))) { -+ selector->Emit(opcode, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), -+ g.UseImmediate(node->InputAt(1))); -+ } else { -+ selector->Emit(opcode, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), -+ g.UseRegister(node->InputAt(1))); -+ } -+} -+ -+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, -+ Node* node) { -+ Loong64OperandGenerator g(selector); -+ int32_t imm = OpParameter(node->op()); -+ selector->Emit(opcode, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm), -+ g.UseRegister(node->InputAt(1))); -+} -+ -+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, -+ Node* node) { -+ Loong64OperandGenerator g(selector); -+ selector->Emit(opcode, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), -+ g.UseRegister(node->InputAt(1))); -+} -+ -+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { -+ Loong64OperandGenerator g(selector); -+ selector->Emit( -+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), -+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); -+} -+ -+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, -+ Node* node) { -+ Loong64OperandGenerator g(selector); -+ selector->Emit(opcode, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), -+ g.UseOperand(node->InputAt(1), opcode)); -+} -+ -+struct ExtendingLoadMatcher { -+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector) -+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) { -+ Initialize(node); -+ } -+ -+ bool Matches() const { return matches_; } -+ -+ Node* base() const { -+ DCHECK(Matches()); -+ return base_; -+ } -+ int64_t immediate() const { -+ DCHECK(Matches()); -+ return immediate_; -+ } -+ ArchOpcode opcode() const { -+ DCHECK(Matches()); -+ return opcode_; -+ } -+ -+ private: -+ bool matches_; -+ InstructionSelector* selector_; -+ Node* base_; -+ int64_t immediate_; -+ ArchOpcode opcode_; -+ -+ void Initialize(Node* node) { -+ Int64BinopMatcher m(node); -+ // When loading a 64-bit value and shifting by 32, we should -+ // just load and sign-extend the interesting 4 bytes instead. -+ // This happens, for example, when we're loading and untagging SMIs. -+ DCHECK(m.IsWord64Sar()); -+ if (m.left().IsLoad() && m.right().Is(32) && -+ selector_->CanCover(m.node(), m.left().node())) { -+ DCHECK_EQ(selector_->GetEffectLevel(node), -+ selector_->GetEffectLevel(m.left().node())); -+ MachineRepresentation rep = -+ LoadRepresentationOf(m.left().node()->op()).representation(); -+ DCHECK_EQ(3, ElementSizeLog2Of(rep)); -+ if (rep != MachineRepresentation::kTaggedSigned && -+ rep != MachineRepresentation::kTaggedPointer && -+ rep != MachineRepresentation::kTagged && -+ rep != MachineRepresentation::kWord64) { -+ return; -+ } -+ -+ Loong64OperandGenerator g(selector_); -+ Node* load = m.left().node(); -+ Node* offset = load->InputAt(1); -+ base_ = load->InputAt(0); -+ opcode_ = kLoong64Lw; -+ if (g.CanBeImmediate(offset, opcode_)) { -+ immediate_ = g.GetIntegerConstantValue(offset) + 4; -+ matches_ = g.CanBeImmediate(immediate_, kLoong64Lw); -+ } -+ } -+ } -+}; -+ -+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node, -+ Node* output_node) { -+ ExtendingLoadMatcher m(node, selector); -+ Loong64OperandGenerator g(selector); -+ if (m.Matches()) { -+ InstructionOperand inputs[2]; -+ inputs[0] = g.UseRegister(m.base()); -+ InstructionCode opcode = -+ m.opcode() | AddressingModeField::encode(kMode_MRI); -+ DCHECK(is_int32(m.immediate())); -+ inputs[1] = g.TempImmediate(static_cast(m.immediate())); -+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)}; -+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs), -+ inputs); -+ return true; -+ } -+ return false; -+} -+ -+bool TryMatchImmediate(InstructionSelector* selector, -+ InstructionCode* opcode_return, Node* node, -+ size_t* input_count_return, InstructionOperand* inputs) { -+ Loong64OperandGenerator g(selector); -+ if (g.CanBeImmediate(node, *opcode_return)) { -+ *opcode_return |= AddressingModeField::encode(kMode_MRI); -+ inputs[0] = g.UseImmediate(node); -+ *input_count_return = 1; -+ return true; -+ } -+ return false; -+} -+ -+static void VisitBinop(InstructionSelector* selector, Node* node, -+ InstructionCode opcode, bool has_reverse_opcode, -+ InstructionCode reverse_opcode, -+ FlagsContinuation* cont) { -+ Loong64OperandGenerator g(selector); -+ Int32BinopMatcher m(node); -+ InstructionOperand inputs[2]; -+ size_t input_count = 0; -+ InstructionOperand outputs[1]; -+ size_t output_count = 0; -+ -+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count, -+ &inputs[1])) { -+ inputs[0] = g.UseRegister(m.left().node()); -+ input_count++; -+ } else if (has_reverse_opcode && -+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(), -+ &input_count, &inputs[1])) { -+ inputs[0] = g.UseRegister(m.right().node()); -+ opcode = reverse_opcode; -+ input_count++; -+ } else { -+ inputs[input_count++] = g.UseRegister(m.left().node()); -+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode); -+ } -+ -+ if (cont->IsDeoptimize()) { -+ // If we can deoptimize as a result of the binop, we need to make sure that -+ // the deopt inputs are not overwritten by the binop result. One way -+ // to achieve that is to declare the output register as same-as-first. -+ outputs[output_count++] = g.DefineSameAsFirst(node); -+ } else { -+ outputs[output_count++] = g.DefineAsRegister(node); -+ } -+ -+ DCHECK_NE(0u, input_count); -+ DCHECK_EQ(1u, output_count); -+ DCHECK_GE(arraysize(inputs), input_count); -+ DCHECK_GE(arraysize(outputs), output_count); -+ -+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count, -+ inputs, cont); -+} -+ -+static void VisitBinop(InstructionSelector* selector, Node* node, -+ InstructionCode opcode, bool has_reverse_opcode, -+ InstructionCode reverse_opcode) { -+ FlagsContinuation cont; -+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont); -+} -+ -+static void VisitBinop(InstructionSelector* selector, Node* node, -+ InstructionCode opcode, FlagsContinuation* cont) { -+ VisitBinop(selector, node, opcode, false, kArchNop, cont); -+} -+ -+static void VisitBinop(InstructionSelector* selector, Node* node, -+ InstructionCode opcode) { -+ VisitBinop(selector, node, opcode, false, kArchNop); -+} -+ -+void InstructionSelector::VisitStackSlot(Node* node) { -+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); -+ int alignment = rep.alignment(); -+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment); -+ OperandGenerator g(this); -+ -+ Emit(kArchStackSlot, g.DefineAsRegister(node), -+ sequence()->AddImmediate(Constant(slot)), -+ sequence()->AddImmediate(Constant(alignment)), 0, nullptr); -+} -+ -+void InstructionSelector::VisitAbortCSAAssert(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); -+} -+ -+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, -+ Node* output = nullptr) { -+ Loong64OperandGenerator g(selector); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ -+ if (g.CanBeImmediate(index, opcode)) { -+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), -+ g.DefineAsRegister(output == nullptr ? node : output), -+ g.UseRegister(base), g.UseImmediate(index)); -+ } else { -+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR), -+ g.DefineAsRegister(output == nullptr ? node : output), -+ g.UseRegister(base), g.UseRegister(index)); -+ } -+} -+ -+void InstructionSelector::VisitLoadTransform(Node* node) { -+ LoadTransformParameters params = LoadTransformParametersOf(node->op()); -+ -+ InstructionCode opcode = kArchNop; -+ switch (params.transformation) { -+ case LoadTransformation::kS8x16LoadSplat: -+ opcode = kLoong64S8x16LoadSplat; -+ break; -+ case LoadTransformation::kS16x8LoadSplat: -+ opcode = kLoong64S16x8LoadSplat; -+ break; -+ case LoadTransformation::kS32x4LoadSplat: -+ opcode = kLoong64S32x4LoadSplat; -+ break; -+ case LoadTransformation::kS64x2LoadSplat: -+ opcode = kLoong64S64x2LoadSplat; -+ break; -+ case LoadTransformation::kI16x8Load8x8S: -+ opcode = kLoong64I16x8Load8x8S; -+ break; -+ case LoadTransformation::kI16x8Load8x8U: -+ opcode = kLoong64I16x8Load8x8U; -+ break; -+ case LoadTransformation::kI32x4Load16x4S: -+ opcode = kLoong64I32x4Load16x4S; -+ break; -+ case LoadTransformation::kI32x4Load16x4U: -+ opcode = kLoong64I32x4Load16x4U; -+ break; -+ case LoadTransformation::kI64x2Load32x2S: -+ opcode = kLoong64I64x2Load32x2S; -+ break; -+ case LoadTransformation::kI64x2Load32x2U: -+ opcode = kLoong64I64x2Load32x2U; -+ break; -+ default: -+ UNIMPLEMENTED(); -+ } -+ -+ EmitLoad(this, node, opcode); -+} -+ -+void InstructionSelector::VisitLoad(Node* node) { -+ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); -+ -+ InstructionCode opcode = kArchNop; -+ switch (load_rep.representation()) { -+ case MachineRepresentation::kFloat32: -+ opcode = kLoong64Lwc1; -+ break; -+ case MachineRepresentation::kFloat64: -+ opcode = kLoong64Ldc1; -+ break; -+ case MachineRepresentation::kBit: // Fall through. -+ case MachineRepresentation::kWord8: -+ opcode = load_rep.IsUnsigned() ? kLoong64Lbu : kLoong64Lb; -+ break; -+ case MachineRepresentation::kWord16: -+ opcode = load_rep.IsUnsigned() ? kLoong64Lhu : kLoong64Lh; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = load_rep.IsUnsigned() ? kLoong64Lwu : kLoong64Lw; -+ break; -+ case MachineRepresentation::kTaggedSigned: // Fall through. -+ case MachineRepresentation::kTaggedPointer: // Fall through. -+ case MachineRepresentation::kTagged: // Fall through. -+ case MachineRepresentation::kWord64: -+ opcode = kLoong64Ld; -+ break; -+ case MachineRepresentation::kCompressedPointer: // Fall through. -+ case MachineRepresentation::kCompressed: // Fall through. -+ case MachineRepresentation::kNone: -+ case MachineRepresentation::kSimd128: -+ UNREACHABLE(); -+ } -+ if (node->opcode() == IrOpcode::kPoisonedLoad) { -+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); -+ opcode |= MiscField::encode(kMemoryAccessPoisoned); -+ } -+ -+ EmitLoad(this, node, opcode); -+} -+ -+void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } -+ -+void InstructionSelector::VisitProtectedLoad(Node* node) { -+ // TODO(eholk) -+ UNIMPLEMENTED(); -+} -+ -+void InstructionSelector::VisitStore(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ Node* value = node->InputAt(2); -+ -+ StoreRepresentation store_rep = StoreRepresentationOf(node->op()); -+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); -+ MachineRepresentation rep = store_rep.representation(); -+ -+ // TODO(loong64): I guess this could be done in a better way. -+ if (write_barrier_kind != kNoWriteBarrier && -+ V8_LIKELY(!FLAG_disable_write_barriers)) { -+ DCHECK(CanBeTaggedPointer(rep)); -+ InstructionOperand inputs[3]; -+ size_t input_count = 0; -+ inputs[input_count++] = g.UseUniqueRegister(base); -+ inputs[input_count++] = g.UseUniqueRegister(index); -+ inputs[input_count++] = g.UseUniqueRegister(value); -+ RecordWriteMode record_write_mode = -+ WriteBarrierKindToRecordWriteMode(write_barrier_kind); -+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; -+ size_t const temp_count = arraysize(temps); -+ InstructionCode code = kArchStoreWithWriteBarrier; -+ code |= MiscField::encode(static_cast(record_write_mode)); -+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); -+ } else { -+ ArchOpcode opcode = kArchNop; -+ switch (rep) { -+ case MachineRepresentation::kFloat32: -+ opcode = kLoong64Swc1; -+ break; -+ case MachineRepresentation::kFloat64: -+ opcode = kLoong64Sdc1; -+ break; -+ case MachineRepresentation::kBit: // Fall through. -+ case MachineRepresentation::kWord8: -+ opcode = kLoong64Sb; -+ break; -+ case MachineRepresentation::kWord16: -+ opcode = kLoong64Sh; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = kLoong64Sw; -+ break; -+ case MachineRepresentation::kTaggedSigned: // Fall through. -+ case MachineRepresentation::kTaggedPointer: // Fall through. -+ case MachineRepresentation::kTagged: // Fall through. -+ case MachineRepresentation::kWord64: -+ opcode = kLoong64Sd; -+ break; -+ case MachineRepresentation::kCompressedPointer: // Fall through. -+ case MachineRepresentation::kCompressed: // Fall through. -+ case MachineRepresentation::kNone: -+ case MachineRepresentation::kSimd128: -+ UNREACHABLE(); -+ return; -+ } -+ -+ if (g.CanBeImmediate(index, opcode)) { -+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), -+ g.UseRegister(base), g.UseImmediate(index), -+ g.UseRegisterOrImmediateZero(value)); -+ } else { -+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(), -+ g.UseRegister(base), g.UseRegister(index), -+ g.UseRegisterOrImmediateZero(value)); -+ } -+ } -+} -+ -+void InstructionSelector::VisitProtectedStore(Node* node) { -+ // TODO(eholk) -+ UNIMPLEMENTED(); -+} -+ -+void InstructionSelector::VisitWord32And(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher m(node); -+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && -+ m.right().HasValue()) { -+ uint32_t mask = m.right().Value(); -+ uint32_t mask_width = base::bits::CountPopulation(mask); -+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); -+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { -+ // The mask must be contiguous, and occupy the least-significant bits. -+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); -+ -+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least -+ // significant bits. -+ Int32BinopMatcher mleft(m.left().node()); -+ if (mleft.right().HasValue()) { -+ // Any shift value can match; int32 shifts use `value % 32`. -+ uint32_t lsb = mleft.right().Value() & 0x1F; -+ -+ // Ext cannot extract bits past the register size, however since -+ // shifting the original value would have introduced some zeros we can -+ // still use Ext with a smaller mask and the remaining bits will be -+ // zeros. -+ if (lsb + mask_width > 32) mask_width = 32 - lsb; -+ -+ Emit(kLoong64Ext, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), -+ g.TempImmediate(mask_width)); -+ return; -+ } -+ // Other cases fall through to the normal And operation. -+ } -+ } -+ if (m.right().HasValue()) { -+ uint32_t mask = m.right().Value(); -+ uint32_t shift = base::bits::CountPopulation(~mask); -+ uint32_t msb = base::bits::CountLeadingZeros32(~mask); -+ if (shift != 0 && shift != 32 && msb + shift == 32) { -+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction -+ // and remove constant loading of inverted mask. -+ Emit(kLoong64Ins, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), -+ g.TempImmediate(0), g.TempImmediate(shift)); -+ return; -+ } -+ } -+ VisitBinop(this, node, kLoong64And32, true, kLoong64And32); -+} -+ -+void InstructionSelector::VisitWord64And(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && -+ m.right().HasValue()) { -+ uint64_t mask = m.right().Value(); -+ uint32_t mask_width = base::bits::CountPopulation(mask); -+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); -+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { -+ // The mask must be contiguous, and occupy the least-significant bits. -+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); -+ -+ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least -+ // significant bits. -+ Int64BinopMatcher mleft(m.left().node()); -+ if (mleft.right().HasValue()) { -+ // Any shift value can match; int64 shifts use `value % 64`. -+ uint32_t lsb = static_cast(mleft.right().Value() & 0x3F); -+ -+ // Dext cannot extract bits past the register size, however since -+ // shifting the original value would have introduced some zeros we can -+ // still use Dext with a smaller mask and the remaining bits will be -+ // zeros. -+ if (lsb + mask_width > 64) mask_width = 64 - lsb; -+ -+ if (lsb == 0 && mask_width == 64) { -+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node())); -+ } else { -+ Emit(kLoong64Dext, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), -+ g.TempImmediate(static_cast(mask_width))); -+ } -+ return; -+ } -+ // Other cases fall through to the normal And operation. -+ } -+ } -+ if (m.right().HasValue()) { -+ uint64_t mask = m.right().Value(); -+ uint32_t shift = base::bits::CountPopulation(~mask); -+ uint32_t msb = base::bits::CountLeadingZeros64(~mask); -+ if (shift != 0 && shift < 32 && msb + shift == 64) { -+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction -+ // and remove constant loading of inverted mask. Dins cannot insert bits -+ // past word size, so shifts smaller than 32 are covered. -+ Emit(kLoong64Dins, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), -+ g.TempImmediate(0), g.TempImmediate(shift)); -+ return; -+ } -+ } -+ VisitBinop(this, node, kLoong64And, true, kLoong64And); -+} -+ -+void InstructionSelector::VisitWord32Or(Node* node) { -+ VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32); -+} -+ -+void InstructionSelector::VisitWord64Or(Node* node) { -+ VisitBinop(this, node, kLoong64Or, true, kLoong64Or); -+} -+ -+void InstructionSelector::VisitWord32Xor(Node* node) { -+ Int32BinopMatcher m(node); -+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) && -+ m.right().Is(-1)) { -+ Int32BinopMatcher mleft(m.left().node()); -+ if (!mleft.right().HasValue()) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Nor32, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), -+ g.UseRegister(mleft.right().node())); -+ return; -+ } -+ } -+ if (m.right().Is(-1)) { -+ // Use Nor for bit negation and eliminate constant loading for xori. -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.TempImmediate(0)); -+ return; -+ } -+ VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32); -+} -+ -+void InstructionSelector::VisitWord64Xor(Node* node) { -+ Int64BinopMatcher m(node); -+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) && -+ m.right().Is(-1)) { -+ Int64BinopMatcher mleft(m.left().node()); -+ if (!mleft.right().HasValue()) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Nor, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), -+ g.UseRegister(mleft.right().node())); -+ return; -+ } -+ } -+ if (m.right().Is(-1)) { -+ // Use Nor for bit negation and eliminate constant loading for xori. -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.TempImmediate(0)); -+ return; -+ } -+ VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor); -+} -+ -+void InstructionSelector::VisitWord32Shl(Node* node) { -+ Int32BinopMatcher m(node); -+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) && -+ m.right().IsInRange(1, 31)) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher mleft(m.left().node()); -+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is -+ // contiguous, and the shift immediate non-zero. -+ if (mleft.right().HasValue()) { -+ uint32_t mask = mleft.right().Value(); -+ uint32_t mask_width = base::bits::CountPopulation(mask); -+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); -+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { -+ uint32_t shift = m.right().Value(); -+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); -+ DCHECK_NE(0u, shift); -+ if ((shift + mask_width) >= 32) { -+ // If the mask is contiguous and reaches or extends beyond the top -+ // bit, only the shift is needed. -+ Emit(kLoong64Shl, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), -+ g.UseImmediate(m.right().node())); -+ return; -+ } -+ } -+ } -+ } -+ VisitRRO(this, kLoong64Shl, node); -+} -+ -+void InstructionSelector::VisitWord32Shr(Node* node) { -+ Int32BinopMatcher m(node); -+ if (m.left().IsWord32And() && m.right().HasValue()) { -+ uint32_t lsb = m.right().Value() & 0x1F; -+ Int32BinopMatcher mleft(m.left().node()); -+ if (mleft.right().HasValue() && mleft.right().Value() != 0) { -+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is -+ // shifted into the least-significant bits. -+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb; -+ unsigned mask_width = base::bits::CountPopulation(mask); -+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask); -+ if ((mask_msb + mask_width + lsb) == 32) { -+ Loong64OperandGenerator g(this); -+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); -+ Emit(kLoong64Ext, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), -+ g.TempImmediate(mask_width)); -+ return; -+ } -+ } -+ } -+ VisitRRO(this, kLoong64Shr, node); -+} -+ -+void InstructionSelector::VisitWord32Sar(Node* node) { -+ Int32BinopMatcher m(node); -+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) { -+ Int32BinopMatcher mleft(m.left().node()); -+ if (m.right().HasValue() && mleft.right().HasValue()) { -+ Loong64OperandGenerator g(this); -+ uint32_t sar = m.right().Value(); -+ uint32_t shl = mleft.right().Value(); -+ if ((sar == shl) && (sar == 16)) { -+ Emit(kLoong64Seh, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node())); -+ return; -+ } else if ((sar == shl) && (sar == 24)) { -+ Emit(kLoong64Seb, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node())); -+ return; -+ } else if ((sar == shl) && (sar == 32)) { -+ Emit(kLoong64Shl, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), g.TempImmediate(0)); -+ return; -+ } -+ } -+ } -+ VisitRRO(this, kLoong64Sar, node); -+} -+ -+void InstructionSelector::VisitWord64Shl(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && -+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) { -+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper -+ // 32 bits anyway. -+ Emit(kLoong64Dshl, g.DefineSameAsFirst(node), -+ g.UseRegister(m.left().node()->InputAt(0)), -+ g.UseImmediate(m.right().node())); -+ return; -+ } -+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) && -+ m.right().IsInRange(1, 63)) { -+ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is -+ // contiguous, and the shift immediate non-zero. -+ Int64BinopMatcher mleft(m.left().node()); -+ if (mleft.right().HasValue()) { -+ uint64_t mask = mleft.right().Value(); -+ uint32_t mask_width = base::bits::CountPopulation(mask); -+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); -+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { -+ uint64_t shift = m.right().Value(); -+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); -+ DCHECK_NE(0u, shift); -+ -+ if ((shift + mask_width) >= 64) { -+ // If the mask is contiguous and reaches or extends beyond the top -+ // bit, only the shift is needed. -+ Emit(kLoong64Dshl, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), -+ g.UseImmediate(m.right().node())); -+ return; -+ } -+ } -+ } -+ } -+ VisitRRO(this, kLoong64Dshl, node); -+} -+ -+void InstructionSelector::VisitWord64Shr(Node* node) { -+ Int64BinopMatcher m(node); -+ if (m.left().IsWord64And() && m.right().HasValue()) { -+ uint32_t lsb = m.right().Value() & 0x3F; -+ Int64BinopMatcher mleft(m.left().node()); -+ if (mleft.right().HasValue() && mleft.right().Value() != 0) { -+ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is -+ // shifted into the least-significant bits. -+ uint64_t mask = (mleft.right().Value() >> lsb) << lsb; -+ unsigned mask_width = base::bits::CountPopulation(mask); -+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask); -+ if ((mask_msb + mask_width + lsb) == 64) { -+ Loong64OperandGenerator g(this); -+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); -+ Emit(kLoong64Dext, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), -+ g.TempImmediate(mask_width)); -+ return; -+ } -+ } -+ } -+ VisitRRO(this, kLoong64Dshr, node); -+} -+ -+void InstructionSelector::VisitWord64Sar(Node* node) { -+ if (TryEmitExtendingLoad(this, node, node)) return; -+ VisitRRO(this, kLoong64Dsar, node); -+} -+ -+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); } -+ -+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); } -+ -+void InstructionSelector::VisitWord32Ror(Node* node) { -+ VisitRRO(this, kLoong64Ror, node); -+} -+ -+void InstructionSelector::VisitWord32Clz(Node* node) { -+ VisitRR(this, kLoong64Clz, node); -+} -+ -+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } -+ -+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); } -+ -+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64ByteSwap64, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64ByteSwap32, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { -+ UNREACHABLE(); -+} -+ -+void InstructionSelector::VisitWord32Ctz(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitWord64Ctz(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitWord32Popcnt(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Popcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitWord64Popcnt(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Dpopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitWord64Ror(Node* node) { -+ VisitRRO(this, kLoong64Dror, node); -+} -+ -+void InstructionSelector::VisitWord64Clz(Node* node) { -+ VisitRR(this, kLoong64Dclz, node); -+} -+ -+void InstructionSelector::VisitInt32Add(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher m(node); -+ -+ // Select Lsa for (left + (left_of_right << imm)). -+ if (m.right().opcode() == IrOpcode::kWord32Shl && -+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { -+ Int32BinopMatcher mright(m.right().node()); -+ if (mright.right().HasValue() && !m.left().HasValue()) { -+ int32_t shift_value = static_cast(mright.right().Value()); -+ if (shift_value > 0 && shift_value <= 31) { -+ Emit(kLoong64Lsa, g.DefineAsRegister(node), -+ g.UseRegister(mright.left().node()), -+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value)); -+ return; -+ } -+ } -+ } -+ -+ // Select Lsa for ((left_of_left << imm) + right). -+ if (m.left().opcode() == IrOpcode::kWord32Shl && -+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { -+ Int32BinopMatcher mleft(m.left().node()); -+ if (mleft.right().HasValue() && !m.right().HasValue()) { -+ int32_t shift_value = static_cast(mleft.right().Value()); -+ if (shift_value > 0 && shift_value <= 31) { -+ Emit(kLoong64Lsa, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), -+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value)); -+ return; -+ } -+ } -+ } -+ -+ VisitBinop(this, node, kLoong64Add, true, kLoong64Add); -+} -+ -+void InstructionSelector::VisitInt64Add(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ -+ // Select Dlsa for (left + (left_of_right << imm)). -+ if (m.right().opcode() == IrOpcode::kWord64Shl && -+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { -+ Int64BinopMatcher mright(m.right().node()); -+ if (mright.right().HasValue() && !m.left().HasValue()) { -+ int32_t shift_value = static_cast(mright.right().Value()); -+ if (shift_value > 0 && shift_value <= 31) { -+ Emit(kLoong64Dlsa, g.DefineAsRegister(node), -+ g.UseRegister(mright.left().node()), -+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value)); -+ return; -+ } -+ } -+ } -+ -+ // Select Dlsa for ((left_of_left << imm) + right). -+ if (m.left().opcode() == IrOpcode::kWord64Shl && -+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { -+ Int64BinopMatcher mleft(m.left().node()); -+ if (mleft.right().HasValue() && !m.right().HasValue()) { -+ int32_t shift_value = static_cast(mleft.right().Value()); -+ if (shift_value > 0 && shift_value <= 31) { -+ Emit(kLoong64Dlsa, g.DefineAsRegister(node), -+ g.UseRegister(mleft.left().node()), -+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value)); -+ return; -+ } -+ } -+ } -+ -+ VisitBinop(this, node, kLoong64Dadd, true, kLoong64Dadd); -+} -+ -+void InstructionSelector::VisitInt32Sub(Node* node) { -+ VisitBinop(this, node, kLoong64Sub); -+} -+ -+void InstructionSelector::VisitInt64Sub(Node* node) { -+ VisitBinop(this, node, kLoong64Dsub); -+} -+ -+void InstructionSelector::VisitInt32Mul(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher m(node); -+ if (m.right().HasValue() && m.right().Value() > 0) { -+ uint32_t value = static_cast(m.right().Value()); -+ if (base::bits::IsPowerOfTwo(value)) { -+ Emit(kLoong64Shl | AddressingModeField::encode(kMode_None), -+ g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); -+ return; -+ } -+ if (base::bits::IsPowerOfTwo(value - 1) && /*kArchVariant == kLoong64r6 &&*/ -+ value - 1 > 0 && value - 1 <= 31) { -+ Emit(kLoong64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.left().node()), -+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); -+ return; -+ } -+ if (base::bits::IsPowerOfTwo(value + 1)) { -+ InstructionOperand temp = g.TempRegister(); -+ Emit(kLoong64Shl | AddressingModeField::encode(kMode_None), temp, -+ g.UseRegister(m.left().node()), -+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); -+ Emit(kLoong64Sub | AddressingModeField::encode(kMode_None), -+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); -+ return; -+ } -+ } -+ Node* left = node->InputAt(0); -+ Node* right = node->InputAt(1); -+ if (CanCover(node, left) && CanCover(node, right)) { -+ if (left->opcode() == IrOpcode::kWord64Sar && -+ right->opcode() == IrOpcode::kWord64Sar) { -+ Int64BinopMatcher leftInput(left), rightInput(right); -+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) { -+ // Combine untagging shifts with Dmul high. -+ Emit(kLoong64DMulHigh, g.DefineSameAsFirst(node), -+ g.UseRegister(leftInput.left().node()), -+ g.UseRegister(rightInput.left().node())); -+ return; -+ } -+ } -+ } -+ VisitRRR(this, kLoong64Mul, node); -+} -+ -+void InstructionSelector::VisitInt32MulHigh(Node* node) { -+ VisitRRR(this, kLoong64MulHigh, node); -+} -+ -+void InstructionSelector::VisitUint32MulHigh(Node* node) { -+ VisitRRR(this, kLoong64MulHighU, node); -+} -+ -+void InstructionSelector::VisitInt64Mul(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ // TODO(dusmil): Add optimization for shifts larger than 32. -+ if (m.right().HasValue() && m.right().Value() > 0) { -+ uint32_t value = static_cast(m.right().Value()); -+ if (base::bits::IsPowerOfTwo(value)) { -+ Emit(kLoong64Dshl | AddressingModeField::encode(kMode_None), -+ g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); -+ return; -+ } -+ if (base::bits::IsPowerOfTwo(value - 1) && /*kArchVariant == kLoong64r6 &&*/ -+ value - 1 > 0 && value - 1 <= 31) { -+ // Dlsa macro will handle the shifting value out of bound cases. -+ Emit(kLoong64Dlsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.left().node()), -+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); -+ return; -+ } -+ if (base::bits::IsPowerOfTwo(value + 1)) { -+ InstructionOperand temp = g.TempRegister(); -+ Emit(kLoong64Dshl | AddressingModeField::encode(kMode_None), temp, -+ g.UseRegister(m.left().node()), -+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); -+ Emit(kLoong64Dsub | AddressingModeField::encode(kMode_None), -+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); -+ return; -+ } -+ } -+ Emit(kLoong64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitInt32Div(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher m(node); -+ Node* left = node->InputAt(0); -+ Node* right = node->InputAt(1); -+ if (CanCover(node, left) && CanCover(node, right)) { -+ if (left->opcode() == IrOpcode::kWord64Sar && -+ right->opcode() == IrOpcode::kWord64Sar) { -+ Int64BinopMatcher rightInput(right), leftInput(left); -+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { -+ // Combine both shifted operands with Ddiv. -+ Emit(kLoong64Ddiv, g.DefineSameAsFirst(node), -+ g.UseRegister(leftInput.left().node()), -+ g.UseRegister(rightInput.left().node())); -+ return; -+ } -+ } -+ } -+ Emit(kLoong64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitUint32Div(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher m(node); -+ Emit(kLoong64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitInt32Mod(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher m(node); -+ Node* left = node->InputAt(0); -+ Node* right = node->InputAt(1); -+ if (CanCover(node, left) && CanCover(node, right)) { -+ if (left->opcode() == IrOpcode::kWord64Sar && -+ right->opcode() == IrOpcode::kWord64Sar) { -+ Int64BinopMatcher rightInput(right), leftInput(left); -+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { -+ // Combine both shifted operands with Dmod. -+ Emit(kLoong64Dmod, g.DefineSameAsFirst(node), -+ g.UseRegister(leftInput.left().node()), -+ g.UseRegister(rightInput.left().node())); -+ return; -+ } -+ } -+ } -+ Emit(kLoong64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitUint32Mod(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int32BinopMatcher m(node); -+ Emit(kLoong64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitInt64Div(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ Emit(kLoong64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitUint64Div(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ Emit(kLoong64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitInt64Mod(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ Emit(kLoong64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitUint64Mod(Node* node) { -+ Loong64OperandGenerator g(this); -+ Int64BinopMatcher m(node); -+ Emit(kLoong64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), -+ g.UseRegister(m.right().node())); -+} -+ -+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { -+ VisitRR(this, kLoong64CvtDS, node); -+} -+ -+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) { -+ VisitRR(this, kLoong64CvtSW, node); -+} -+ -+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) { -+ VisitRR(this, kLoong64CvtSUw, node); -+} -+ -+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { -+ VisitRR(this, kLoong64CvtDW, node); -+} -+ -+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) { -+ VisitRR(this, kLoong64CvtDL, node); -+} -+ -+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { -+ VisitRR(this, kLoong64CvtDUw, node); -+} -+ -+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { -+ VisitRR(this, kLoong64TruncWS, node); -+} -+ -+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) { -+ VisitRR(this, kLoong64TruncUwS, node); -+} -+ -+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* value = node->InputAt(0); -+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction -+ // which does rounding and conversion to integer format. -+ if (CanCover(node, value)) { -+ switch (value->opcode()) { -+ case IrOpcode::kFloat64RoundDown: -+ Emit(kLoong64FloorWD, g.DefineAsRegister(node), -+ g.UseRegister(value->InputAt(0))); -+ return; -+ case IrOpcode::kFloat64RoundUp: -+ Emit(kLoong64CeilWD, g.DefineAsRegister(node), -+ g.UseRegister(value->InputAt(0))); -+ return; -+ case IrOpcode::kFloat64RoundTiesEven: -+ Emit(kLoong64RoundWD, g.DefineAsRegister(node), -+ g.UseRegister(value->InputAt(0))); -+ return; -+ case IrOpcode::kFloat64RoundTruncate: -+ Emit(kLoong64TruncWD, g.DefineAsRegister(node), -+ g.UseRegister(value->InputAt(0))); -+ return; -+ default: -+ break; -+ } -+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) { -+ Node* next = value->InputAt(0); -+ if (CanCover(value, next)) { -+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP)) -+ switch (next->opcode()) { -+ case IrOpcode::kFloat32RoundDown: -+ Emit(kLoong64FloorWS, g.DefineAsRegister(node), -+ g.UseRegister(next->InputAt(0))); -+ return; -+ case IrOpcode::kFloat32RoundUp: -+ Emit(kLoong64CeilWS, g.DefineAsRegister(node), -+ g.UseRegister(next->InputAt(0))); -+ return; -+ case IrOpcode::kFloat32RoundTiesEven: -+ Emit(kLoong64RoundWS, g.DefineAsRegister(node), -+ g.UseRegister(next->InputAt(0))); -+ return; -+ case IrOpcode::kFloat32RoundTruncate: -+ Emit(kLoong64TruncWS, g.DefineAsRegister(node), -+ g.UseRegister(next->InputAt(0))); -+ return; -+ default: -+ Emit(kLoong64TruncWS, g.DefineAsRegister(node), -+ g.UseRegister(value->InputAt(0))); -+ return; -+ } -+ } else { -+ // Match float32 -> float64 -> int32 representation change path. -+ Emit(kLoong64TruncWS, g.DefineAsRegister(node), -+ g.UseRegister(value->InputAt(0))); -+ return; -+ } -+ } -+ } -+ VisitRR(this, kLoong64TruncWD, node); -+} -+ -+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) { -+ VisitRR(this, kLoong64TruncLD, node); -+} -+ -+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { -+ VisitRR(this, kLoong64TruncUwD, node); -+} -+ -+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) { -+ VisitRR(this, kLoong64TruncUlD, node); -+} -+ -+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) { -+ VisitRR(this, kLoong64TruncUwD, node); -+} -+ -+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) { -+ VisitRR(this, kLoong64TruncLD, node); -+} -+ -+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { -+ Loong64OperandGenerator g(this); -+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; -+ InstructionOperand outputs[2]; -+ size_t output_count = 0; -+ outputs[output_count++] = g.DefineAsRegister(node); -+ -+ Node* success_output = NodeProperties::FindProjection(node, 1); -+ if (success_output) { -+ outputs[output_count++] = g.DefineAsRegister(success_output); -+ } -+ -+ this->Emit(kLoong64TruncLS, output_count, outputs, 1, inputs); -+} -+ -+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { -+ Loong64OperandGenerator g(this); -+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; -+ InstructionOperand outputs[2]; -+ size_t output_count = 0; -+ outputs[output_count++] = g.DefineAsRegister(node); -+ -+ Node* success_output = NodeProperties::FindProjection(node, 1); -+ if (success_output) { -+ outputs[output_count++] = g.DefineAsRegister(success_output); -+ } -+ -+ Emit(kLoong64TruncLD, output_count, outputs, 1, inputs); -+} -+ -+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { -+ Loong64OperandGenerator g(this); -+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; -+ InstructionOperand outputs[2]; -+ size_t output_count = 0; -+ outputs[output_count++] = g.DefineAsRegister(node); -+ -+ Node* success_output = NodeProperties::FindProjection(node, 1); -+ if (success_output) { -+ outputs[output_count++] = g.DefineAsRegister(success_output); -+ } -+ -+ Emit(kLoong64TruncUlS, output_count, outputs, 1, inputs); -+} -+ -+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { -+ Loong64OperandGenerator g(this); -+ -+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; -+ InstructionOperand outputs[2]; -+ size_t output_count = 0; -+ outputs[output_count++] = g.DefineAsRegister(node); -+ -+ Node* success_output = NodeProperties::FindProjection(node, 1); -+ if (success_output) { -+ outputs[output_count++] = g.DefineAsRegister(success_output); -+ } -+ -+ Emit(kLoong64TruncUlD, output_count, outputs, 1, inputs); -+} -+ -+void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { -+ UNIMPLEMENTED(); -+} -+ -+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { -+ Node* value = node->InputAt(0); -+ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) { -+ // Generate sign-extending load. -+ LoadRepresentation load_rep = LoadRepresentationOf(value->op()); -+ InstructionCode opcode = kArchNop; -+ switch (load_rep.representation()) { -+ case MachineRepresentation::kBit: // Fall through. -+ case MachineRepresentation::kWord8: -+ opcode = load_rep.IsUnsigned() ? kLoong64Lbu : kLoong64Lb; -+ break; -+ case MachineRepresentation::kWord16: -+ opcode = load_rep.IsUnsigned() ? kLoong64Lhu : kLoong64Lh; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = kLoong64Lw; -+ break; -+ default: -+ UNREACHABLE(); -+ return; -+ } -+ EmitLoad(this, value, opcode, node); -+ } else { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), -+ g.TempImmediate(0)); -+ } -+} -+ -+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* value = node->InputAt(0); -+ switch (value->opcode()) { -+ // 32-bit operations will write their result in a 64 bit register, -+ // clearing the top 32 bits of the destination register. -+ case IrOpcode::kUint32Div: -+ case IrOpcode::kUint32Mod: -+ case IrOpcode::kUint32MulHigh: { -+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); -+ return; -+ } -+ case IrOpcode::kLoad: { -+ LoadRepresentation load_rep = LoadRepresentationOf(value->op()); -+ if (load_rep.IsUnsigned()) { -+ switch (load_rep.representation()) { -+ case MachineRepresentation::kWord8: -+ case MachineRepresentation::kWord16: -+ case MachineRepresentation::kWord32: -+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); -+ return; -+ default: -+ break; -+ } -+ } -+ break; -+ } -+ default: -+ break; -+ } -+ Emit(kLoong64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), -+ g.TempImmediate(0), g.TempImmediate(32)); -+} -+ -+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* value = node->InputAt(0); -+ if (CanCover(node, value)) { -+ switch (value->opcode()) { -+ case IrOpcode::kWord64Sar: { -+ if (CanCoverTransitively(node, value, value->InputAt(0)) && -+ TryEmitExtendingLoad(this, value, node)) { -+ return; -+ } else { -+ Int64BinopMatcher m(value); -+ if (m.right().IsInRange(32, 63)) { -+ // After smi untagging no need for truncate. Combine sequence. -+ Emit(kLoong64Dsar, g.DefineSameAsFirst(node), -+ g.UseRegister(m.left().node()), -+ g.UseImmediate(m.right().node())); -+ return; -+ } -+ } -+ break; -+ } -+ default: -+ break; -+ } -+ } -+ Emit(kLoong64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), -+ g.TempImmediate(0), g.TempImmediate(32)); -+} -+ -+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* value = node->InputAt(0); -+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding -+ // instruction. -+ if (CanCover(node, value) && -+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) { -+ Emit(kLoong64CvtSW, g.DefineAsRegister(node), -+ g.UseRegister(value->InputAt(0))); -+ return; -+ } -+ VisitRR(this, kLoong64CvtSD, node); -+} -+ -+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { -+ VisitRR(this, kArchTruncateDoubleToI, node); -+} -+ -+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { -+ VisitRR(this, kLoong64TruncWD, node); -+} -+ -+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) { -+ VisitRR(this, kLoong64CvtSL, node); -+} -+ -+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) { -+ VisitRR(this, kLoong64CvtDL, node); -+} -+ -+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) { -+ VisitRR(this, kLoong64CvtSUl, node); -+} -+ -+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) { -+ VisitRR(this, kLoong64CvtDUl, node); -+} -+ -+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { -+ VisitRR(this, kLoong64Float64ExtractLowWord32, node); -+} -+ -+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { -+ VisitRR(this, kLoong64BitcastDL, node); -+} -+ -+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Float64InsertLowWord32, g.DefineAsRegister(node), -+ ImmediateOperand(ImmediateOperand::INLINE, 0), -+ g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { -+ VisitRR(this, kLoong64BitcastLD, node); -+} -+ -+void InstructionSelector::VisitFloat32Add(Node* node) { -+ // Optimization with Madd.S(z, x, y) is intentionally removed. -+ // See explanation for madd_s in assembler-loong64.cc. -+ VisitRRR(this, kLoong64AddS, node); -+} -+ -+void InstructionSelector::VisitFloat64Add(Node* node) { -+ // Optimization with Madd.D(z, x, y) is intentionally removed. -+ // See explanation for madd_d in assembler-loong64.cc. -+ VisitRRR(this, kLoong64AddD, node); -+} -+ -+void InstructionSelector::VisitFloat32Sub(Node* node) { -+ // Optimization with Msub.S(z, x, y) is intentionally removed. -+ // See explanation for madd_s in assembler-loong64.cc. -+ VisitRRR(this, kLoong64SubS, node); -+} -+ -+void InstructionSelector::VisitFloat64Sub(Node* node) { -+ // Optimization with Msub.D(z, x, y) is intentionally removed. -+ // See explanation for madd_d in assembler-loong64.cc. -+ VisitRRR(this, kLoong64SubD, node); -+} -+ -+void InstructionSelector::VisitFloat32Mul(Node* node) { -+ VisitRRR(this, kLoong64MulS, node); -+} -+ -+void InstructionSelector::VisitFloat64Mul(Node* node) { -+ VisitRRR(this, kLoong64MulD, node); -+} -+ -+void InstructionSelector::VisitFloat32Div(Node* node) { -+ VisitRRR(this, kLoong64DivS, node); -+} -+ -+void InstructionSelector::VisitFloat64Div(Node* node) { -+ VisitRRR(this, kLoong64DivD, node); -+} -+ -+void InstructionSelector::VisitFloat64Mod(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64ModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0), -+ g.UseFixed(node->InputAt(1), f1)) -+ ->MarkAsCall(); -+} -+ -+void InstructionSelector::VisitFloat32Max(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Float32Max, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -+} -+ -+void InstructionSelector::VisitFloat64Max(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Float64Max, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -+} -+ -+void InstructionSelector::VisitFloat32Min(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Float32Min, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -+} -+ -+void InstructionSelector::VisitFloat64Min(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Float64Min, g.DefineAsRegister(node), -+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -+} -+ -+void InstructionSelector::VisitFloat32Abs(Node* node) { -+ VisitRR(this, kLoong64AbsS, node); -+} -+ -+void InstructionSelector::VisitFloat64Abs(Node* node) { -+ VisitRR(this, kLoong64AbsD, node); -+} -+ -+void InstructionSelector::VisitFloat32Sqrt(Node* node) { -+ VisitRR(this, kLoong64SqrtS, node); -+} -+ -+void InstructionSelector::VisitFloat64Sqrt(Node* node) { -+ VisitRR(this, kLoong64SqrtD, node); -+} -+ -+void InstructionSelector::VisitFloat32RoundDown(Node* node) { -+ VisitRR(this, kLoong64Float32RoundDown, node); -+} -+ -+void InstructionSelector::VisitFloat64RoundDown(Node* node) { -+ VisitRR(this, kLoong64Float64RoundDown, node); -+} -+ -+void InstructionSelector::VisitFloat32RoundUp(Node* node) { -+ VisitRR(this, kLoong64Float32RoundUp, node); -+} -+ -+void InstructionSelector::VisitFloat64RoundUp(Node* node) { -+ VisitRR(this, kLoong64Float64RoundUp, node); -+} -+ -+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { -+ VisitRR(this, kLoong64Float32RoundTruncate, node); -+} -+ -+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { -+ VisitRR(this, kLoong64Float64RoundTruncate, node); -+} -+ -+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { -+ UNREACHABLE(); -+} -+ -+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { -+ VisitRR(this, kLoong64Float32RoundTiesEven, node); -+} -+ -+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { -+ VisitRR(this, kLoong64Float64RoundTiesEven, node); -+} -+ -+void InstructionSelector::VisitFloat32Neg(Node* node) { -+ VisitRR(this, kLoong64NegS, node); -+} -+ -+void InstructionSelector::VisitFloat64Neg(Node* node) { -+ VisitRR(this, kLoong64NegD, node); -+} -+ -+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, -+ InstructionCode opcode) { -+ Loong64OperandGenerator g(this); -+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2), -+ g.UseFixed(node->InputAt(1), f4)) -+ ->MarkAsCall(); -+} -+ -+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, -+ InstructionCode opcode) { -+ Loong64OperandGenerator g(this); -+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0)) -+ ->MarkAsCall(); -+} -+ -+void InstructionSelector::EmitPrepareArguments( -+ ZoneVector* arguments, const CallDescriptor* call_descriptor, -+ Node* node) { -+ Loong64OperandGenerator g(this); -+ -+ // Prepare for C function call. -+ if (call_descriptor->IsCFunctionCall()) { -+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast( -+ call_descriptor->ParameterCount())), -+ 0, nullptr, 0, nullptr); -+ -+ // Poke any stack arguments. -+ int slot = kCArgSlotCount; -+ for (PushParameter input : (*arguments)) { -+ Emit(kLoong64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), -+ g.TempImmediate(slot << kSystemPointerSizeLog2)); -+ ++slot; -+ } -+ } else { -+ int push_count = static_cast(call_descriptor->StackParameterCount()); -+ if (push_count > 0) { -+ // Calculate needed space -+ int stack_size = 0; -+ for (PushParameter input : (*arguments)) { -+ if (input.node) { -+ stack_size += input.location.GetSizeInPointers(); -+ } -+ } -+ Emit(kLoong64StackClaim, g.NoOutput(), -+ g.TempImmediate(stack_size << kSystemPointerSizeLog2)); -+ } -+ for (size_t n = 0; n < arguments->size(); ++n) { -+ PushParameter input = (*arguments)[n]; -+ if (input.node) { -+ Emit(kLoong64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), -+ g.TempImmediate(static_cast(n << kSystemPointerSizeLog2))); -+ } -+ } -+ } -+} -+ -+void InstructionSelector::EmitPrepareResults( -+ ZoneVector* results, const CallDescriptor* call_descriptor, -+ Node* node) { -+ Loong64OperandGenerator g(this); -+ -+ int reverse_slot = 0; -+ for (PushParameter output : *results) { -+ if (!output.location.IsCallerFrameSlot()) continue; -+ // Skip any alignment holes in nodes. -+ if (output.node != nullptr) { -+ DCHECK(!call_descriptor->IsCFunctionCall()); -+ if (output.location.GetType() == MachineType::Float32()) { -+ MarkAsFloat32(output.node); -+ } else if (output.location.GetType() == MachineType::Float64()) { -+ MarkAsFloat64(output.node); -+ } -+ Emit(kLoong64Peek, g.DefineAsRegister(output.node), -+ g.UseImmediate(reverse_slot)); -+ } -+ reverse_slot += output.location.GetSizeInPointers(); -+ } -+} -+ -+bool InstructionSelector::IsTailCallAddressImmediate() { return false; } -+ -+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } -+ -+void InstructionSelector::VisitUnalignedLoad(Node* node) { -+ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); -+ Loong64OperandGenerator g(this); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ -+ ArchOpcode opcode = kArchNop; -+ switch (load_rep.representation()) { -+ case MachineRepresentation::kFloat32: -+ opcode = kLoong64Ulwc1; -+ break; -+ case MachineRepresentation::kFloat64: -+ opcode = kLoong64Uldc1; -+ break; -+ case MachineRepresentation::kBit: // Fall through. -+ case MachineRepresentation::kWord8: -+ UNREACHABLE(); -+ case MachineRepresentation::kWord16: -+ opcode = load_rep.IsUnsigned() ? kLoong64Ulhu : kLoong64Ulh; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = load_rep.IsUnsigned() ? kLoong64Ulwu : kLoong64Ulw; -+ break; -+ case MachineRepresentation::kTaggedSigned: // Fall through. -+ case MachineRepresentation::kTaggedPointer: // Fall through. -+ case MachineRepresentation::kTagged: // Fall through. -+ case MachineRepresentation::kWord64: -+ opcode = kLoong64Uld; -+ break; -+ case MachineRepresentation::kCompressedPointer: // Fall through. -+ case MachineRepresentation::kCompressed: // Fall through. -+ case MachineRepresentation::kNone: -+ case MachineRepresentation::kSimd128: -+ UNREACHABLE(); -+ } -+ -+ if (g.CanBeImmediate(index, opcode)) { -+ Emit(opcode | AddressingModeField::encode(kMode_MRI), -+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); -+ } else { -+ InstructionOperand addr_reg = g.TempRegister(); -+ Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), addr_reg, -+ g.UseRegister(index), g.UseRegister(base)); -+ // Emit desired load opcode, using temp addr_reg. -+ Emit(opcode | AddressingModeField::encode(kMode_MRI), -+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); -+ } -+} -+ -+void InstructionSelector::VisitUnalignedStore(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ Node* value = node->InputAt(2); -+ -+ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op()); -+ ArchOpcode opcode = kArchNop; -+ switch (rep) { -+ case MachineRepresentation::kFloat32: -+ opcode = kLoong64Uswc1; -+ break; -+ case MachineRepresentation::kFloat64: -+ opcode = kLoong64Usdc1; -+ break; -+ case MachineRepresentation::kBit: // Fall through. -+ case MachineRepresentation::kWord8: -+ UNREACHABLE(); -+ case MachineRepresentation::kWord16: -+ opcode = kLoong64Ush; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = kLoong64Usw; -+ break; -+ case MachineRepresentation::kTaggedSigned: // Fall through. -+ case MachineRepresentation::kTaggedPointer: // Fall through. -+ case MachineRepresentation::kTagged: // Fall through. -+ case MachineRepresentation::kWord64: -+ opcode = kLoong64Usd; -+ break; -+ case MachineRepresentation::kCompressedPointer: // Fall through. -+ case MachineRepresentation::kCompressed: // Fall through. -+ case MachineRepresentation::kNone: -+ case MachineRepresentation::kSimd128: -+ UNREACHABLE(); -+ } -+ -+ if (g.CanBeImmediate(index, opcode)) { -+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), -+ g.UseRegister(base), g.UseImmediate(index), -+ g.UseRegisterOrImmediateZero(value)); -+ } else { -+ InstructionOperand addr_reg = g.TempRegister(); -+ Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), addr_reg, -+ g.UseRegister(index), g.UseRegister(base)); -+ // Emit desired store opcode, using temp addr_reg. -+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), -+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); -+ } -+} -+ -+namespace { -+ -+// Shared routine for multiple compare operations. -+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, -+ InstructionOperand left, InstructionOperand right, -+ FlagsContinuation* cont) { -+ selector->EmitWithContinuation(opcode, left, right, cont); -+} -+ -+// Shared routine for multiple float32 compare operations. -+void VisitFloat32Compare(InstructionSelector* selector, Node* node, -+ FlagsContinuation* cont) { -+ Loong64OperandGenerator g(selector); -+ Float32BinopMatcher m(node); -+ InstructionOperand lhs, rhs; -+ -+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) -+ : g.UseRegister(m.left().node()); -+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) -+ : g.UseRegister(m.right().node()); -+ VisitCompare(selector, kLoong64CmpS, lhs, rhs, cont); -+} -+ -+// Shared routine for multiple float64 compare operations. -+void VisitFloat64Compare(InstructionSelector* selector, Node* node, -+ FlagsContinuation* cont) { -+ Loong64OperandGenerator g(selector); -+ Float64BinopMatcher m(node); -+ InstructionOperand lhs, rhs; -+ -+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) -+ : g.UseRegister(m.left().node()); -+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) -+ : g.UseRegister(m.right().node()); -+ VisitCompare(selector, kLoong64CmpD, lhs, rhs, cont); -+} -+ -+// Shared routine for multiple word compare operations. -+void VisitWordCompare(InstructionSelector* selector, Node* node, -+ InstructionCode opcode, FlagsContinuation* cont, -+ bool commutative) { -+ Loong64OperandGenerator g(selector); -+ Node* left = node->InputAt(0); -+ Node* right = node->InputAt(1); -+ -+ // Match immediates on left or right side of comparison. -+ if (g.CanBeImmediate(right, opcode)) { -+ if (opcode == kLoong64Tst) { -+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), -+ cont); -+ } else { -+ switch (cont->condition()) { -+ case kEqual: -+ case kNotEqual: -+ if (cont->IsSet()) { -+ VisitCompare(selector, opcode, g.UseRegister(left), -+ g.UseImmediate(right), cont); -+ } else { -+ VisitCompare(selector, opcode, g.UseRegister(left), -+ g.UseRegister(right), cont); -+ } -+ break; -+ case kSignedLessThan: -+ case kSignedGreaterThanOrEqual: -+ case kUnsignedLessThan: -+ case kUnsignedGreaterThanOrEqual: -+ VisitCompare(selector, opcode, g.UseRegister(left), -+ g.UseImmediate(right), cont); -+ break; -+ default: -+ VisitCompare(selector, opcode, g.UseRegister(left), -+ g.UseRegister(right), cont); -+ } -+ } -+ } else if (g.CanBeImmediate(left, opcode)) { -+ if (!commutative) cont->Commute(); -+ if (opcode == kLoong64Tst) { -+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), -+ cont); -+ } else { -+ switch (cont->condition()) { -+ case kEqual: -+ case kNotEqual: -+ if (cont->IsSet()) { -+ VisitCompare(selector, opcode, g.UseRegister(right), -+ g.UseImmediate(left), cont); -+ } else { -+ VisitCompare(selector, opcode, g.UseRegister(right), -+ g.UseRegister(left), cont); -+ } -+ break; -+ case kSignedLessThan: -+ case kSignedGreaterThanOrEqual: -+ case kUnsignedLessThan: -+ case kUnsignedGreaterThanOrEqual: -+ VisitCompare(selector, opcode, g.UseRegister(right), -+ g.UseImmediate(left), cont); -+ break; -+ default: -+ VisitCompare(selector, opcode, g.UseRegister(right), -+ g.UseRegister(left), cont); -+ } -+ } -+ } else { -+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), -+ cont); -+ } -+} -+ -+bool IsNodeUnsigned(Node* n) { -+ NodeMatcher m(n); -+ -+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() || -+ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { -+ LoadRepresentation load_rep = LoadRepresentationOf(n->op()); -+ return load_rep.IsUnsigned(); -+ } else { -+ return m.IsUint32Div() || m.IsUint32LessThan() || -+ m.IsUint32LessThanOrEqual() || m.IsUint32Mod() || -+ m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() || -+ m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32(); -+ } -+} -+ -+// Shared routine for multiple word compare operations. -+void VisitFullWord32Compare(InstructionSelector* selector, Node* node, -+ InstructionCode opcode, FlagsContinuation* cont) { -+ Loong64OperandGenerator g(selector); -+ InstructionOperand leftOp = g.TempRegister(); -+ InstructionOperand rightOp = g.TempRegister(); -+ -+ selector->Emit(kLoong64Dshl, leftOp, g.UseRegister(node->InputAt(0)), -+ g.TempImmediate(32)); -+ selector->Emit(kLoong64Dshl, rightOp, g.UseRegister(node->InputAt(1)), -+ g.TempImmediate(32)); -+ -+ VisitCompare(selector, opcode, leftOp, rightOp, cont); -+} -+ -+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node, -+ InstructionCode opcode, -+ FlagsContinuation* cont) { -+ if (FLAG_debug_code) { -+ Loong64OperandGenerator g(selector); -+ InstructionOperand leftOp = g.TempRegister(); -+ InstructionOperand rightOp = g.TempRegister(); -+ InstructionOperand optimizedResult = g.TempRegister(); -+ InstructionOperand fullResult = g.TempRegister(); -+ FlagsCondition condition = cont->condition(); -+ InstructionCode testOpcode = opcode | -+ FlagsConditionField::encode(condition) | -+ FlagsModeField::encode(kFlags_set); -+ -+ selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)), -+ g.UseRegister(node->InputAt(1))); -+ -+ selector->Emit(kLoong64Dshl, leftOp, g.UseRegister(node->InputAt(0)), -+ g.TempImmediate(32)); -+ selector->Emit(kLoong64Dshl, rightOp, g.UseRegister(node->InputAt(1)), -+ g.TempImmediate(32)); -+ selector->Emit(testOpcode, fullResult, leftOp, rightOp); -+ -+ selector->Emit(kLoong64AssertEqual, g.NoOutput(), optimizedResult, fullResult, -+ g.TempImmediate(static_cast( -+ AbortReason::kUnsupportedNonPrimitiveCompare))); -+ } -+ -+ VisitWordCompare(selector, node, opcode, cont, false); -+} -+ -+void VisitWord32Compare(InstructionSelector* selector, Node* node, -+ FlagsContinuation* cont) { -+ // LOONG64 doesn't support Word32 compare instructions. Instead it relies -+ // that the values in registers are correctly sign-extended and uses -+ // Word64 comparison instead. This behavior is correct in most cases, -+ // but doesn't work when comparing signed with unsigned operands. -+ // We could simulate full Word32 compare in all cases but this would -+ // create an unnecessary overhead since unsigned integers are rarely -+ // used in JavaScript. -+ // The solution proposed here tries to match a comparison of signed -+ // with unsigned operand, and perform full Word32Compare only -+ // in those cases. Unfortunately, the solution is not complete because -+ // it might skip cases where Word32 full compare is needed, so -+ // basically it is a hack. -+ // When call to a host function in simulator, if the function return a -+ // int32 value, the simulator do not sign-extended to int64 because in -+ // simulator we do not know the function whether return a int32 or int64. -+ // so we need do a full word32 compare in this case. -+#ifndef USE_SIMULATOR -+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) { -+#else -+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) || -+ node->InputAt(0)->opcode() == IrOpcode::kCall || -+ node->InputAt(1)->opcode() == IrOpcode::kCall) { -+#endif -+ VisitFullWord32Compare(selector, node, kLoong64Cmp, cont); -+ } else { -+ VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont); -+ } -+} -+ -+void VisitWord64Compare(InstructionSelector* selector, Node* node, -+ FlagsContinuation* cont) { -+ VisitWordCompare(selector, node, kLoong64Cmp, cont, false); -+} -+ -+void EmitWordCompareZero(InstructionSelector* selector, Node* value, -+ FlagsContinuation* cont) { -+ Loong64OperandGenerator g(selector); -+ selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value), -+ g.TempImmediate(0), cont); -+} -+ -+void VisitAtomicLoad(InstructionSelector* selector, Node* node, -+ ArchOpcode opcode) { -+ Loong64OperandGenerator g(selector); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ if (g.CanBeImmediate(index, opcode)) { -+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), -+ g.DefineAsRegister(node), g.UseRegister(base), -+ g.UseImmediate(index)); -+ } else { -+ InstructionOperand addr_reg = g.TempRegister(); -+ selector->Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), -+ addr_reg, g.UseRegister(index), g.UseRegister(base)); -+ // Emit desired load opcode, using temp addr_reg. -+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), -+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); -+ } -+} -+ -+void VisitAtomicStore(InstructionSelector* selector, Node* node, -+ ArchOpcode opcode) { -+ Loong64OperandGenerator g(selector); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ Node* value = node->InputAt(2); -+ -+ if (g.CanBeImmediate(index, opcode)) { -+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), -+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index), -+ g.UseRegisterOrImmediateZero(value)); -+ } else { -+ InstructionOperand addr_reg = g.TempRegister(); -+ selector->Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), -+ addr_reg, g.UseRegister(index), g.UseRegister(base)); -+ // Emit desired store opcode, using temp addr_reg. -+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), -+ g.NoOutput(), addr_reg, g.TempImmediate(0), -+ g.UseRegisterOrImmediateZero(value)); -+ } -+} -+ -+void VisitAtomicExchange(InstructionSelector* selector, Node* node, -+ ArchOpcode opcode) { -+ Loong64OperandGenerator g(selector); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ Node* value = node->InputAt(2); -+ -+ AddressingMode addressing_mode = kMode_MRI; -+ InstructionOperand inputs[3]; -+ size_t input_count = 0; -+ inputs[input_count++] = g.UseUniqueRegister(base); -+ inputs[input_count++] = g.UseUniqueRegister(index); -+ inputs[input_count++] = g.UseUniqueRegister(value); -+ InstructionOperand outputs[1]; -+ outputs[0] = g.UseUniqueRegister(node); -+ InstructionOperand temp[3]; -+ temp[0] = g.TempRegister(); -+ temp[1] = g.TempRegister(); -+ temp[2] = g.TempRegister(); -+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); -+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); -+} -+ -+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, -+ ArchOpcode opcode) { -+ Loong64OperandGenerator g(selector); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ Node* old_value = node->InputAt(2); -+ Node* new_value = node->InputAt(3); -+ -+ AddressingMode addressing_mode = kMode_MRI; -+ InstructionOperand inputs[4]; -+ size_t input_count = 0; -+ inputs[input_count++] = g.UseUniqueRegister(base); -+ inputs[input_count++] = g.UseUniqueRegister(index); -+ inputs[input_count++] = g.UseUniqueRegister(old_value); -+ inputs[input_count++] = g.UseUniqueRegister(new_value); -+ InstructionOperand outputs[1]; -+ outputs[0] = g.UseUniqueRegister(node); -+ InstructionOperand temp[3]; -+ temp[0] = g.TempRegister(); -+ temp[1] = g.TempRegister(); -+ temp[2] = g.TempRegister(); -+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); -+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); -+} -+ -+void VisitAtomicBinop(InstructionSelector* selector, Node* node, -+ ArchOpcode opcode) { -+ Loong64OperandGenerator g(selector); -+ Node* base = node->InputAt(0); -+ Node* index = node->InputAt(1); -+ Node* value = node->InputAt(2); -+ -+ AddressingMode addressing_mode = kMode_MRI; -+ InstructionOperand inputs[3]; -+ size_t input_count = 0; -+ inputs[input_count++] = g.UseUniqueRegister(base); -+ inputs[input_count++] = g.UseUniqueRegister(index); -+ inputs[input_count++] = g.UseUniqueRegister(value); -+ InstructionOperand outputs[1]; -+ outputs[0] = g.UseUniqueRegister(node); -+ InstructionOperand temps[4]; -+ temps[0] = g.TempRegister(); -+ temps[1] = g.TempRegister(); -+ temps[2] = g.TempRegister(); -+ temps[3] = g.TempRegister(); -+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); -+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps); -+} -+ -+} // namespace -+ -+void InstructionSelector::VisitStackPointerGreaterThan( -+ Node* node, FlagsContinuation* cont) { -+ StackCheckKind kind = StackCheckKindOf(node->op()); -+ InstructionCode opcode = -+ kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); -+ -+ Loong64OperandGenerator g(this); -+ -+ // No outputs. -+ InstructionOperand* const outputs = nullptr; -+ const int output_count = 0; -+ -+ // Applying an offset to this stack check requires a temp register. Offsets -+ // are only applied to the first stack check. If applying an offset, we must -+ // ensure the input and temp registers do not alias, thus kUniqueRegister. -+ InstructionOperand temps[] = {g.TempRegister()}; -+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0); -+ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) -+ ? OperandGenerator::kUniqueRegister -+ : OperandGenerator::kRegister; -+ -+ Node* const value = node->InputAt(0); -+ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; -+ static constexpr int input_count = arraysize(inputs); -+ -+ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, -+ temp_count, temps, cont); -+} -+ -+// Shared routine for word comparisons against zero. -+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, -+ FlagsContinuation* cont) { -+ // Try to combine with comparisons against 0 by simply inverting the branch. -+ while (CanCover(user, value)) { -+ if (value->opcode() == IrOpcode::kWord32Equal) { -+ Int32BinopMatcher m(value); -+ if (!m.right().Is(0)) break; -+ user = value; -+ value = m.left().node(); -+ } else if (value->opcode() == IrOpcode::kWord64Equal) { -+ Int64BinopMatcher m(value); -+ if (!m.right().Is(0)) break; -+ user = value; -+ value = m.left().node(); -+ } else { -+ break; -+ } -+ -+ cont->Negate(); -+ } -+ -+ if (CanCover(user, value)) { -+ switch (value->opcode()) { -+ case IrOpcode::kWord32Equal: -+ cont->OverwriteAndNegateIfEqual(kEqual); -+ return VisitWord32Compare(this, value, cont); -+ case IrOpcode::kInt32LessThan: -+ cont->OverwriteAndNegateIfEqual(kSignedLessThan); -+ return VisitWord32Compare(this, value, cont); -+ case IrOpcode::kInt32LessThanOrEqual: -+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); -+ return VisitWord32Compare(this, value, cont); -+ case IrOpcode::kUint32LessThan: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); -+ return VisitWord32Compare(this, value, cont); -+ case IrOpcode::kUint32LessThanOrEqual: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); -+ return VisitWord32Compare(this, value, cont); -+ case IrOpcode::kWord64Equal: -+ cont->OverwriteAndNegateIfEqual(kEqual); -+ return VisitWord64Compare(this, value, cont); -+ case IrOpcode::kInt64LessThan: -+ cont->OverwriteAndNegateIfEqual(kSignedLessThan); -+ return VisitWord64Compare(this, value, cont); -+ case IrOpcode::kInt64LessThanOrEqual: -+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); -+ return VisitWord64Compare(this, value, cont); -+ case IrOpcode::kUint64LessThan: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); -+ return VisitWord64Compare(this, value, cont); -+ case IrOpcode::kUint64LessThanOrEqual: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); -+ return VisitWord64Compare(this, value, cont); -+ case IrOpcode::kFloat32Equal: -+ cont->OverwriteAndNegateIfEqual(kEqual); -+ return VisitFloat32Compare(this, value, cont); -+ case IrOpcode::kFloat32LessThan: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); -+ return VisitFloat32Compare(this, value, cont); -+ case IrOpcode::kFloat32LessThanOrEqual: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); -+ return VisitFloat32Compare(this, value, cont); -+ case IrOpcode::kFloat64Equal: -+ cont->OverwriteAndNegateIfEqual(kEqual); -+ return VisitFloat64Compare(this, value, cont); -+ case IrOpcode::kFloat64LessThan: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); -+ return VisitFloat64Compare(this, value, cont); -+ case IrOpcode::kFloat64LessThanOrEqual: -+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); -+ return VisitFloat64Compare(this, value, cont); -+ case IrOpcode::kProjection: -+ // Check if this is the overflow output projection of an -+ // WithOverflow node. -+ if (ProjectionIndexOf(value->op()) == 1u) { -+ // We cannot combine the WithOverflow with this branch -+ // unless the 0th projection (the use of the actual value of the -+ // is either nullptr, which means there's no use of the -+ // actual value, or was already defined, which means it is scheduled -+ // *AFTER* this branch). -+ Node* const node = value->InputAt(0); -+ Node* const result = NodeProperties::FindProjection(node, 0); -+ if (result == nullptr || IsDefined(result)) { -+ switch (node->opcode()) { -+ case IrOpcode::kInt32AddWithOverflow: -+ cont->OverwriteAndNegateIfEqual(kOverflow); -+ return VisitBinop(this, node, kLoong64Dadd, cont); -+ case IrOpcode::kInt32SubWithOverflow: -+ cont->OverwriteAndNegateIfEqual(kOverflow); -+ return VisitBinop(this, node, kLoong64Dsub, cont); -+ case IrOpcode::kInt32MulWithOverflow: -+ cont->OverwriteAndNegateIfEqual(kOverflow); -+ return VisitBinop(this, node, kLoong64MulOvf, cont); -+ case IrOpcode::kInt64AddWithOverflow: -+ cont->OverwriteAndNegateIfEqual(kOverflow); -+ return VisitBinop(this, node, kLoong64DaddOvf, cont); -+ case IrOpcode::kInt64SubWithOverflow: -+ cont->OverwriteAndNegateIfEqual(kOverflow); -+ return VisitBinop(this, node, kLoong64DsubOvf, cont); -+ default: -+ break; -+ } -+ } -+ } -+ break; -+ case IrOpcode::kWord32And: -+ case IrOpcode::kWord64And: -+ return VisitWordCompare(this, value, kLoong64Tst, cont, true); -+ case IrOpcode::kStackPointerGreaterThan: -+ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); -+ return VisitStackPointerGreaterThan(value, cont); -+ default: -+ break; -+ } -+ } -+ -+ // Continuation could not be combined with a compare, emit compare against 0. -+ EmitWordCompareZero(this, value, cont); -+} -+ -+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { -+ Loong64OperandGenerator g(this); -+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); -+ -+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch. -+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { -+ static const size_t kMaxTableSwitchValueRange = 2 << 16; -+ size_t table_space_cost = 10 + 2 * sw.value_range(); -+ size_t table_time_cost = 3; -+ size_t lookup_space_cost = 2 + 2 * sw.case_count(); -+ size_t lookup_time_cost = sw.case_count(); -+ if (sw.case_count() > 0 && -+ table_space_cost + 3 * table_time_cost <= -+ lookup_space_cost + 3 * lookup_time_cost && -+ sw.min_value() > std::numeric_limits::min() && -+ sw.value_range() <= kMaxTableSwitchValueRange) { -+ InstructionOperand index_operand = value_operand; -+ if (sw.min_value()) { -+ index_operand = g.TempRegister(); -+ Emit(kLoong64Sub, index_operand, value_operand, -+ g.TempImmediate(sw.min_value())); -+ } -+ // Generate a table lookup. -+ return EmitTableSwitch(sw, index_operand); -+ } -+ } -+ -+ // Generate a tree of conditional jumps. -+ return EmitBinarySearchSwitch(sw, value_operand); -+} -+ -+void InstructionSelector::VisitWord32Equal(Node* const node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); -+ Int32BinopMatcher m(node); -+ if (m.right().Is(0)) { -+ return VisitWordCompareZero(m.node(), m.left().node(), &cont); -+ } -+ -+ VisitWord32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitInt32LessThan(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); -+ VisitWord32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { -+ FlagsContinuation cont = -+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); -+ VisitWord32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitUint32LessThan(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); -+ VisitWord32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { -+ FlagsContinuation cont = -+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); -+ VisitWord32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { -+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); -+ return VisitBinop(this, node, kLoong64Dadd, &cont); -+ } -+ FlagsContinuation cont; -+ VisitBinop(this, node, kLoong64Dadd, &cont); -+} -+ -+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { -+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); -+ return VisitBinop(this, node, kLoong64Dsub, &cont); -+ } -+ FlagsContinuation cont; -+ VisitBinop(this, node, kLoong64Dsub, &cont); -+} -+ -+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { -+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); -+ return VisitBinop(this, node, kLoong64MulOvf, &cont); -+ } -+ FlagsContinuation cont; -+ VisitBinop(this, node, kLoong64MulOvf, &cont); -+} -+ -+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { -+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); -+ return VisitBinop(this, node, kLoong64DaddOvf, &cont); -+ } -+ FlagsContinuation cont; -+ VisitBinop(this, node, kLoong64DaddOvf, &cont); -+} -+ -+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { -+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); -+ return VisitBinop(this, node, kLoong64DsubOvf, &cont); -+ } -+ FlagsContinuation cont; -+ VisitBinop(this, node, kLoong64DsubOvf, &cont); -+} -+ -+void InstructionSelector::VisitWord64Equal(Node* const node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); -+ Int64BinopMatcher m(node); -+ if (m.right().Is(0)) { -+ return VisitWordCompareZero(m.node(), m.left().node(), &cont); -+ } -+ -+ VisitWord64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitInt64LessThan(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); -+ VisitWord64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { -+ FlagsContinuation cont = -+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); -+ VisitWord64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitUint64LessThan(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); -+ VisitWord64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { -+ FlagsContinuation cont = -+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); -+ VisitWord64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitFloat32Equal(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); -+ VisitFloat32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitFloat32LessThan(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); -+ VisitFloat32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { -+ FlagsContinuation cont = -+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); -+ VisitFloat32Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitFloat64Equal(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); -+ VisitFloat64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitFloat64LessThan(Node* node) { -+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); -+ VisitFloat64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { -+ FlagsContinuation cont = -+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); -+ VisitFloat64Compare(this, node, &cont); -+} -+ -+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { -+ VisitRR(this, kLoong64Float64ExtractLowWord32, node); -+} -+ -+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { -+ VisitRR(this, kLoong64Float64ExtractHighWord32, node); -+} -+ -+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { -+ VisitRR(this, kLoong64Float64SilenceNaN, node); -+} -+ -+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* left = node->InputAt(0); -+ Node* right = node->InputAt(1); -+ Emit(kLoong64Float64InsertLowWord32, g.DefineSameAsFirst(node), -+ g.UseRegister(left), g.UseRegister(right)); -+} -+ -+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Node* left = node->InputAt(0); -+ Node* right = node->InputAt(1); -+ Emit(kLoong64Float64InsertHighWord32, g.DefineSameAsFirst(node), -+ g.UseRegister(left), g.UseRegister(right)); -+} -+ -+void InstructionSelector::VisitMemoryBarrier(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Sync, g.NoOutput()); -+} -+ -+void InstructionSelector::VisitWord32AtomicLoad(Node* node) { -+ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); -+ ArchOpcode opcode = kArchNop; -+ switch (load_rep.representation()) { -+ case MachineRepresentation::kWord8: -+ opcode = -+ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8; -+ break; -+ case MachineRepresentation::kWord16: -+ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16 -+ : kWord32AtomicLoadUint16; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = kWord32AtomicLoadWord32; -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ VisitAtomicLoad(this, node, opcode); -+} -+ -+void InstructionSelector::VisitWord32AtomicStore(Node* node) { -+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); -+ ArchOpcode opcode = kArchNop; -+ switch (rep) { -+ case MachineRepresentation::kWord8: -+ opcode = kWord32AtomicStoreWord8; -+ break; -+ case MachineRepresentation::kWord16: -+ opcode = kWord32AtomicStoreWord16; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = kWord32AtomicStoreWord32; -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ -+ VisitAtomicStore(this, node, opcode); -+} -+ -+void InstructionSelector::VisitWord64AtomicLoad(Node* node) { -+ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); -+ ArchOpcode opcode = kArchNop; -+ switch (load_rep.representation()) { -+ case MachineRepresentation::kWord8: -+ opcode = kLoong64Word64AtomicLoadUint8; -+ break; -+ case MachineRepresentation::kWord16: -+ opcode = kLoong64Word64AtomicLoadUint16; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = kLoong64Word64AtomicLoadUint32; -+ break; -+ case MachineRepresentation::kWord64: -+ opcode = kLoong64Word64AtomicLoadUint64; -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ VisitAtomicLoad(this, node, opcode); -+} -+ -+void InstructionSelector::VisitWord64AtomicStore(Node* node) { -+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); -+ ArchOpcode opcode = kArchNop; -+ switch (rep) { -+ case MachineRepresentation::kWord8: -+ opcode = kLoong64Word64AtomicStoreWord8; -+ break; -+ case MachineRepresentation::kWord16: -+ opcode = kLoong64Word64AtomicStoreWord16; -+ break; -+ case MachineRepresentation::kWord32: -+ opcode = kLoong64Word64AtomicStoreWord32; -+ break; -+ case MachineRepresentation::kWord64: -+ opcode = kLoong64Word64AtomicStoreWord64; -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ -+ VisitAtomicStore(this, node, opcode); -+} -+ -+void InstructionSelector::VisitWord32AtomicExchange(Node* node) { -+ ArchOpcode opcode = kArchNop; -+ MachineType type = AtomicOpType(node->op()); -+ if (type == MachineType::Int8()) { -+ opcode = kWord32AtomicExchangeInt8; -+ } else if (type == MachineType::Uint8()) { -+ opcode = kWord32AtomicExchangeUint8; -+ } else if (type == MachineType::Int16()) { -+ opcode = kWord32AtomicExchangeInt16; -+ } else if (type == MachineType::Uint16()) { -+ opcode = kWord32AtomicExchangeUint16; -+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { -+ opcode = kWord32AtomicExchangeWord32; -+ } else { -+ UNREACHABLE(); -+ return; -+ } -+ -+ VisitAtomicExchange(this, node, opcode); -+} -+ -+void InstructionSelector::VisitWord64AtomicExchange(Node* node) { -+ ArchOpcode opcode = kArchNop; -+ MachineType type = AtomicOpType(node->op()); -+ if (type == MachineType::Uint8()) { -+ opcode = kLoong64Word64AtomicExchangeUint8; -+ } else if (type == MachineType::Uint16()) { -+ opcode = kLoong64Word64AtomicExchangeUint16; -+ } else if (type == MachineType::Uint32()) { -+ opcode = kLoong64Word64AtomicExchangeUint32; -+ } else if (type == MachineType::Uint64()) { -+ opcode = kLoong64Word64AtomicExchangeUint64; -+ } else { -+ UNREACHABLE(); -+ return; -+ } -+ VisitAtomicExchange(this, node, opcode); -+} -+ -+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { -+ ArchOpcode opcode = kArchNop; -+ MachineType type = AtomicOpType(node->op()); -+ if (type == MachineType::Int8()) { -+ opcode = kWord32AtomicCompareExchangeInt8; -+ } else if (type == MachineType::Uint8()) { -+ opcode = kWord32AtomicCompareExchangeUint8; -+ } else if (type == MachineType::Int16()) { -+ opcode = kWord32AtomicCompareExchangeInt16; -+ } else if (type == MachineType::Uint16()) { -+ opcode = kWord32AtomicCompareExchangeUint16; -+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { -+ opcode = kWord32AtomicCompareExchangeWord32; -+ } else { -+ UNREACHABLE(); -+ return; -+ } -+ -+ VisitAtomicCompareExchange(this, node, opcode); -+} -+ -+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { -+ ArchOpcode opcode = kArchNop; -+ MachineType type = AtomicOpType(node->op()); -+ if (type == MachineType::Uint8()) { -+ opcode = kLoong64Word64AtomicCompareExchangeUint8; -+ } else if (type == MachineType::Uint16()) { -+ opcode = kLoong64Word64AtomicCompareExchangeUint16; -+ } else if (type == MachineType::Uint32()) { -+ opcode = kLoong64Word64AtomicCompareExchangeUint32; -+ } else if (type == MachineType::Uint64()) { -+ opcode = kLoong64Word64AtomicCompareExchangeUint64; -+ } else { -+ UNREACHABLE(); -+ return; -+ } -+ VisitAtomicCompareExchange(this, node, opcode); -+} -+void InstructionSelector::VisitWord32AtomicBinaryOperation( -+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, -+ ArchOpcode uint16_op, ArchOpcode word32_op) { -+ ArchOpcode opcode = kArchNop; -+ MachineType type = AtomicOpType(node->op()); -+ if (type == MachineType::Int8()) { -+ opcode = int8_op; -+ } else if (type == MachineType::Uint8()) { -+ opcode = uint8_op; -+ } else if (type == MachineType::Int16()) { -+ opcode = int16_op; -+ } else if (type == MachineType::Uint16()) { -+ opcode = uint16_op; -+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { -+ opcode = word32_op; -+ } else { -+ UNREACHABLE(); -+ return; -+ } -+ -+ VisitAtomicBinop(this, node, opcode); -+} -+ -+#define VISIT_ATOMIC_BINOP(op) \ -+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ -+ VisitWord32AtomicBinaryOperation( \ -+ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ -+ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ -+ kWord32Atomic##op##Word32); \ -+ } -+VISIT_ATOMIC_BINOP(Add) -+VISIT_ATOMIC_BINOP(Sub) -+VISIT_ATOMIC_BINOP(And) -+VISIT_ATOMIC_BINOP(Or) -+VISIT_ATOMIC_BINOP(Xor) -+#undef VISIT_ATOMIC_BINOP -+ -+void InstructionSelector::VisitWord64AtomicBinaryOperation( -+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, -+ ArchOpcode uint64_op) { -+ ArchOpcode opcode = kArchNop; -+ MachineType type = AtomicOpType(node->op()); -+ if (type == MachineType::Uint8()) { -+ opcode = uint8_op; -+ } else if (type == MachineType::Uint16()) { -+ opcode = uint16_op; -+ } else if (type == MachineType::Uint32()) { -+ opcode = uint32_op; -+ } else if (type == MachineType::Uint64()) { -+ opcode = uint64_op; -+ } else { -+ UNREACHABLE(); -+ return; -+ } -+ VisitAtomicBinop(this, node, opcode); -+} -+ -+#define VISIT_ATOMIC_BINOP(op) \ -+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ -+ VisitWord64AtomicBinaryOperation( \ -+ node, kLoong64Word64Atomic##op##Uint8, kLoong64Word64Atomic##op##Uint16, \ -+ kLoong64Word64Atomic##op##Uint32, kLoong64Word64Atomic##op##Uint64); \ -+ } -+VISIT_ATOMIC_BINOP(Add) -+VISIT_ATOMIC_BINOP(Sub) -+VISIT_ATOMIC_BINOP(And) -+VISIT_ATOMIC_BINOP(Or) -+VISIT_ATOMIC_BINOP(Xor) -+#undef VISIT_ATOMIC_BINOP -+ -+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { -+ UNREACHABLE(); -+} -+ -+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { -+ UNREACHABLE(); -+} -+ -+#define SIMD_TYPE_LIST(V) \ -+ V(F64x2) \ -+ V(F32x4) \ -+ V(I64x2) \ -+ V(I32x4) \ -+ V(I16x8) \ -+ V(I8x16) -+ -+#define SIMD_UNOP_LIST(V) \ -+ V(F64x2Abs, kLoong64F64x2Abs) \ -+ V(F64x2Neg, kLoong64F64x2Neg) \ -+ V(F64x2Sqrt, kLoong64F64x2Sqrt) \ -+ V(I64x2Neg, kLoong64I64x2Neg) \ -+ V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4) \ -+ V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4) \ -+ V(F32x4Abs, kLoong64F32x4Abs) \ -+ V(F32x4Neg, kLoong64F32x4Neg) \ -+ V(F32x4Sqrt, kLoong64F32x4Sqrt) \ -+ V(F32x4RecipApprox, kLoong64F32x4RecipApprox) \ -+ V(F32x4RecipSqrtApprox, kLoong64F32x4RecipSqrtApprox) \ -+ V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4) \ -+ V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4) \ -+ V(I32x4Neg, kLoong64I32x4Neg) \ -+ V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low) \ -+ V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High) \ -+ V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low) \ -+ V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High) \ -+ V(I32x4Abs, kLoong64I32x4Abs) \ -+ V(I16x8Neg, kLoong64I16x8Neg) \ -+ V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low) \ -+ V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High) \ -+ V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low) \ -+ V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High) \ -+ V(I16x8Abs, kLoong64I16x8Abs) \ -+ V(I8x16Neg, kLoong64I8x16Neg) \ -+ V(I8x16Abs, kLoong64I8x16Abs) \ -+ V(S128Not, kLoong64S128Not) \ -+ V(S1x4AnyTrue, kLoong64S1x4AnyTrue) \ -+ V(S1x4AllTrue, kLoong64S1x4AllTrue) \ -+ V(S1x8AnyTrue, kLoong64S1x8AnyTrue) \ -+ V(S1x8AllTrue, kLoong64S1x8AllTrue) \ -+ V(S1x16AnyTrue, kLoong64S1x16AnyTrue) \ -+ V(S1x16AllTrue, kLoong64S1x16AllTrue) -+ -+#define SIMD_SHIFT_OP_LIST(V) \ -+ V(I64x2Shl) \ -+ V(I64x2ShrS) \ -+ V(I64x2ShrU) \ -+ V(I32x4Shl) \ -+ V(I32x4ShrS) \ -+ V(I32x4ShrU) \ -+ V(I16x8Shl) \ -+ V(I16x8ShrS) \ -+ V(I16x8ShrU) \ -+ V(I8x16Shl) \ -+ V(I8x16ShrS) \ -+ V(I8x16ShrU) -+ -+#define SIMD_BINOP_LIST(V) \ -+ V(F64x2Add, kLoong64F64x2Add) \ -+ V(F64x2Sub, kLoong64F64x2Sub) \ -+ V(F64x2Mul, kLoong64F64x2Mul) \ -+ V(F64x2Div, kLoong64F64x2Div) \ -+ V(F64x2Min, kLoong64F64x2Min) \ -+ V(F64x2Max, kLoong64F64x2Max) \ -+ V(F64x2Eq, kLoong64F64x2Eq) \ -+ V(F64x2Ne, kLoong64F64x2Ne) \ -+ V(F64x2Lt, kLoong64F64x2Lt) \ -+ V(F64x2Le, kLoong64F64x2Le) \ -+ V(I64x2Add, kLoong64I64x2Add) \ -+ V(I64x2Sub, kLoong64I64x2Sub) \ -+ V(I64x2Mul, kLoong64I64x2Mul) \ -+ V(F32x4Add, kLoong64F32x4Add) \ -+ V(F32x4AddHoriz, kLoong64F32x4AddHoriz) \ -+ V(F32x4Sub, kLoong64F32x4Sub) \ -+ V(F32x4Mul, kLoong64F32x4Mul) \ -+ V(F32x4Div, kLoong64F32x4Div) \ -+ V(F32x4Max, kLoong64F32x4Max) \ -+ V(F32x4Min, kLoong64F32x4Min) \ -+ V(F32x4Eq, kLoong64F32x4Eq) \ -+ V(F32x4Ne, kLoong64F32x4Ne) \ -+ V(F32x4Lt, kLoong64F32x4Lt) \ -+ V(F32x4Le, kLoong64F32x4Le) \ -+ V(I32x4Add, kLoong64I32x4Add) \ -+ V(I32x4AddHoriz, kLoong64I32x4AddHoriz) \ -+ V(I32x4Sub, kLoong64I32x4Sub) \ -+ V(I32x4Mul, kLoong64I32x4Mul) \ -+ V(I32x4MaxS, kLoong64I32x4MaxS) \ -+ V(I32x4MinS, kLoong64I32x4MinS) \ -+ V(I32x4MaxU, kLoong64I32x4MaxU) \ -+ V(I32x4MinU, kLoong64I32x4MinU) \ -+ V(I32x4Eq, kLoong64I32x4Eq) \ -+ V(I32x4Ne, kLoong64I32x4Ne) \ -+ V(I32x4GtS, kLoong64I32x4GtS) \ -+ V(I32x4GeS, kLoong64I32x4GeS) \ -+ V(I32x4GtU, kLoong64I32x4GtU) \ -+ V(I32x4GeU, kLoong64I32x4GeU) \ -+ V(I16x8Add, kLoong64I16x8Add) \ -+ V(I16x8AddSaturateS, kLoong64I16x8AddSaturateS) \ -+ V(I16x8AddSaturateU, kLoong64I16x8AddSaturateU) \ -+ V(I16x8AddHoriz, kLoong64I16x8AddHoriz) \ -+ V(I16x8Sub, kLoong64I16x8Sub) \ -+ V(I16x8SubSaturateS, kLoong64I16x8SubSaturateS) \ -+ V(I16x8SubSaturateU, kLoong64I16x8SubSaturateU) \ -+ V(I16x8Mul, kLoong64I16x8Mul) \ -+ V(I16x8MaxS, kLoong64I16x8MaxS) \ -+ V(I16x8MinS, kLoong64I16x8MinS) \ -+ V(I16x8MaxU, kLoong64I16x8MaxU) \ -+ V(I16x8MinU, kLoong64I16x8MinU) \ -+ V(I16x8Eq, kLoong64I16x8Eq) \ -+ V(I16x8Ne, kLoong64I16x8Ne) \ -+ V(I16x8GtS, kLoong64I16x8GtS) \ -+ V(I16x8GeS, kLoong64I16x8GeS) \ -+ V(I16x8GtU, kLoong64I16x8GtU) \ -+ V(I16x8GeU, kLoong64I16x8GeU) \ -+ V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \ -+ V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4) \ -+ V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4) \ -+ V(I8x16Add, kLoong64I8x16Add) \ -+ V(I8x16AddSaturateS, kLoong64I8x16AddSaturateS) \ -+ V(I8x16AddSaturateU, kLoong64I8x16AddSaturateU) \ -+ V(I8x16Sub, kLoong64I8x16Sub) \ -+ V(I8x16SubSaturateS, kLoong64I8x16SubSaturateS) \ -+ V(I8x16SubSaturateU, kLoong64I8x16SubSaturateU) \ -+ V(I8x16Mul, kLoong64I8x16Mul) \ -+ V(I8x16MaxS, kLoong64I8x16MaxS) \ -+ V(I8x16MinS, kLoong64I8x16MinS) \ -+ V(I8x16MaxU, kLoong64I8x16MaxU) \ -+ V(I8x16MinU, kLoong64I8x16MinU) \ -+ V(I8x16Eq, kLoong64I8x16Eq) \ -+ V(I8x16Ne, kLoong64I8x16Ne) \ -+ V(I8x16GtS, kLoong64I8x16GtS) \ -+ V(I8x16GeS, kLoong64I8x16GeS) \ -+ V(I8x16GtU, kLoong64I8x16GtU) \ -+ V(I8x16GeU, kLoong64I8x16GeU) \ -+ V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \ -+ V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8) \ -+ V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8) \ -+ V(S128And, kLoong64S128And) \ -+ V(S128Or, kLoong64S128Or) \ -+ V(S128Xor, kLoong64S128Xor) \ -+ V(S128AndNot, kLoong64S128AndNot) -+ -+void InstructionSelector::VisitS128Zero(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64S128Zero, g.DefineAsRegister(node)); -+} -+ -+#define SIMD_VISIT_SPLAT(Type) \ -+ void InstructionSelector::Visit##Type##Splat(Node* node) { \ -+ VisitRR(this, kLoong64##Type##Splat, node); \ -+ } -+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) -+#undef SIMD_VISIT_SPLAT -+ -+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \ -+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \ -+ VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node); \ -+ } -+SIMD_VISIT_EXTRACT_LANE(F64x2, ) -+SIMD_VISIT_EXTRACT_LANE(F32x4, ) -+SIMD_VISIT_EXTRACT_LANE(I64x2, ) -+SIMD_VISIT_EXTRACT_LANE(I32x4, ) -+SIMD_VISIT_EXTRACT_LANE(I16x8, U) -+SIMD_VISIT_EXTRACT_LANE(I16x8, S) -+SIMD_VISIT_EXTRACT_LANE(I8x16, U) -+SIMD_VISIT_EXTRACT_LANE(I8x16, S) -+#undef SIMD_VISIT_EXTRACT_LANE -+ -+#define SIMD_VISIT_REPLACE_LANE(Type) \ -+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ -+ VisitRRIR(this, kLoong64##Type##ReplaceLane, node); \ -+ } -+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) -+#undef SIMD_VISIT_REPLACE_LANE -+ -+#define SIMD_VISIT_UNOP(Name, instruction) \ -+ void InstructionSelector::Visit##Name(Node* node) { \ -+ VisitRR(this, instruction, node); \ -+ } -+SIMD_UNOP_LIST(SIMD_VISIT_UNOP) -+#undef SIMD_VISIT_UNOP -+ -+#define SIMD_VISIT_SHIFT_OP(Name) \ -+ void InstructionSelector::Visit##Name(Node* node) { \ -+ VisitSimdShift(this, kLoong64##Name, node); \ -+ } -+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) -+#undef SIMD_VISIT_SHIFT_OP -+ -+#define SIMD_VISIT_BINOP(Name, instruction) \ -+ void InstructionSelector::Visit##Name(Node* node) { \ -+ VisitRRR(this, instruction, node); \ -+ } -+SIMD_BINOP_LIST(SIMD_VISIT_BINOP) -+#undef SIMD_VISIT_BINOP -+ -+void InstructionSelector::VisitS128Select(Node* node) { -+ VisitRRRR(this, kLoong64S128Select, node); -+} -+ -+namespace { -+ -+struct ShuffleEntry { -+ uint8_t shuffle[kSimd128Size]; -+ ArchOpcode opcode; -+}; -+ -+static const ShuffleEntry arch_shuffles[] = { -+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, -+ kLoong64S32x4InterleaveRight}, -+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, -+ kLoong64S32x4InterleaveLeft}, -+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}, -+ kLoong64S32x4PackEven}, -+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}, -+ kLoong64S32x4PackOdd}, -+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}, -+ kLoong64S32x4InterleaveEven}, -+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}, -+ kLoong64S32x4InterleaveOdd}, -+ -+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, -+ kLoong64S16x8InterleaveRight}, -+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, -+ kLoong64S16x8InterleaveLeft}, -+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, -+ kLoong64S16x8PackEven}, -+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, -+ kLoong64S16x8PackOdd}, -+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}, -+ kLoong64S16x8InterleaveEven}, -+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}, -+ kLoong64S16x8InterleaveOdd}, -+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kLoong64S16x4Reverse}, -+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kLoong64S16x2Reverse}, -+ -+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, -+ kLoong64S8x16InterleaveRight}, -+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, -+ kLoong64S8x16InterleaveLeft}, -+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, -+ kLoong64S8x16PackEven}, -+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, -+ kLoong64S8x16PackOdd}, -+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, -+ kLoong64S8x16InterleaveEven}, -+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, -+ kLoong64S8x16InterleaveOdd}, -+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kLoong64S8x8Reverse}, -+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kLoong64S8x4Reverse}, -+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kLoong64S8x2Reverse}}; -+ -+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, -+ size_t num_entries, bool is_swizzle, -+ ArchOpcode* opcode) { -+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; -+ for (size_t i = 0; i < num_entries; ++i) { -+ const ShuffleEntry& entry = table[i]; -+ int j = 0; -+ for (; j < kSimd128Size; ++j) { -+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { -+ break; -+ } -+ } -+ if (j == kSimd128Size) { -+ *opcode = entry.opcode; -+ return true; -+ } -+ } -+ return false; -+} -+ -+} // namespace -+ -+void InstructionSelector::VisitS8x16Shuffle(Node* node) { -+ uint8_t shuffle[kSimd128Size]; -+ bool is_swizzle; -+ CanonicalizeShuffle(node, shuffle, &is_swizzle); -+ uint8_t shuffle32x4[4]; -+ ArchOpcode opcode; -+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles), -+ is_swizzle, &opcode)) { -+ VisitRRR(this, opcode, node); -+ return; -+ } -+ Node* input0 = node->InputAt(0); -+ Node* input1 = node->InputAt(1); -+ uint8_t offset; -+ Loong64OperandGenerator g(this); -+ if (TryMatchConcat(shuffle, &offset)) { -+ Emit(kLoong64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1), -+ g.UseRegister(input0), g.UseImmediate(offset)); -+ return; -+ } -+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) { -+ Emit(kLoong64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), -+ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4))); -+ return; -+ } -+ Emit(kLoong64S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), -+ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)), -+ g.UseImmediate(Pack4Lanes(shuffle + 4)), -+ g.UseImmediate(Pack4Lanes(shuffle + 8)), -+ g.UseImmediate(Pack4Lanes(shuffle + 12))); -+} -+ -+void InstructionSelector::VisitS8x16Swizzle(Node* node) { -+ Loong64OperandGenerator g(this); -+ InstructionOperand temps[] = {g.TempSimd128Register()}; -+ // We don't want input 0 or input 1 to be the same as output, since we will -+ // modify output before do the calculation. -+ Emit(kLoong64S8x16Swizzle, g.DefineAsRegister(node), -+ g.UseUniqueRegister(node->InputAt(0)), -+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); -+} -+ -+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -+} -+ -+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) { -+ Loong64OperandGenerator g(this); -+ Emit(kLoong64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), -+ g.TempImmediate(0)); -+} -+ -+// static -+MachineOperatorBuilder::Flags -+InstructionSelector::SupportedMachineOperatorFlags() { -+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags; -+ return flags | MachineOperatorBuilder::kWord32Ctz | -+ MachineOperatorBuilder::kWord64Ctz | -+ MachineOperatorBuilder::kWord32Popcnt | -+ MachineOperatorBuilder::kWord64Popcnt | -+ MachineOperatorBuilder::kWord32ShiftIsSafe | -+ MachineOperatorBuilder::kInt32DivIsSafe | -+ MachineOperatorBuilder::kUint32DivIsSafe | -+ MachineOperatorBuilder::kFloat64RoundDown | -+ MachineOperatorBuilder::kFloat32RoundDown | -+ MachineOperatorBuilder::kFloat64RoundUp | -+ MachineOperatorBuilder::kFloat32RoundUp | -+ MachineOperatorBuilder::kFloat64RoundTruncate | -+ MachineOperatorBuilder::kFloat32RoundTruncate | -+ MachineOperatorBuilder::kFloat64RoundTiesEven | -+ MachineOperatorBuilder::kFloat32RoundTiesEven; -+} -+ -+// static -+MachineOperatorBuilder::AlignmentRequirements -+InstructionSelector::AlignmentRequirements() { -+ return MachineOperatorBuilder::AlignmentRequirements:: -+ FullUnalignedAccessSupport(); -+} -+ -+#undef SIMD_BINOP_LIST -+#undef SIMD_SHIFT_OP_LIST -+#undef SIMD_UNOP_LIST -+#undef SIMD_TYPE_LIST -+#undef TRACE_UNIMPL -+#undef TRACE -+ -+} // namespace compiler -+} // namespace internal -+} // namespace v8 -diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc -index 4967f2bb..a1110b73 100644 ---- a/deps/v8/src/compiler/c-linkage.cc -+++ b/deps/v8/src/compiler/c-linkage.cc -@@ -94,9 +94,22 @@ namespace { - #define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7 - #define CALLEE_SAVE_REGISTERS \ - s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \ -- s7.bit() --#define CALLEE_SAVE_FP_REGISTERS \ -- f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit() -+ s7.bit() | fp.bit() -+#define CALLEE_SAVE_FP_REGISTERS \ -+ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \ -+ f30.bit() | f31.bit() -+ -+#elif V8_TARGET_ARCH_LOONG64 -+// =========================================================================== -+// == loong64 ================================================================= -+// =========================================================================== -+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7 -+#define CALLEE_SAVE_REGISTERS \ -+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \ -+ s7.bit() | fp.bit() -+#define CALLEE_SAVE_FP_REGISTERS \ -+ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \ -+ f30.bit() | f31.bit() - - #elif V8_TARGET_ARCH_PPC64 - // =========================================================================== -diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc -index ccf45202..abbc3493 100644 ---- a/deps/v8/src/debug/debug-evaluate.cc -+++ b/deps/v8/src/debug/debug-evaluate.cc -@@ -1062,7 +1062,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) { - } - CHECK(!failed); - #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ -- defined(V8_TARGET_ARCH_MIPS64) -+ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64) - // Isolate-independent builtin calls and jumps do not emit reloc infos - // on PPC. We try to avoid using PC relative code due to performance - // issue with especially older hardwares. -diff --git a/deps/v8/src/debug/loong64/debug-loong64.cc b/deps/v8/src/debug/loong64/debug-loong64.cc -new file mode 100644 -index 00000000..cf350101 ---- /dev/null -+++ b/deps/v8/src/debug/loong64/debug-loong64.cc -@@ -0,0 +1,56 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/debug/debug.h" -+ -+#include "src/codegen/macro-assembler.h" -+#include "src/debug/liveedit.h" -+#include "src/execution/frames-inl.h" -+ -+namespace v8 { -+namespace internal { -+ -+#define __ ACCESS_MASM(masm) -+ -+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) { -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0); -+ } -+ __ MaybeDropFrames(); -+ -+ // Return to caller. -+ __ Ret(); -+} -+ -+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) { -+ // Frame is being dropped: -+ // - Drop to the target frame specified by a1. -+ // - Look up current function on the frame. -+ // - Leave the frame. -+ // - Restart the frame by calling the function. -+ __ mov(fp, a1); -+ __ Ld_d(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); -+ -+ // Pop return address and frame. -+ __ LeaveFrame(StackFrame::INTERNAL); -+ -+ __ Ld_d(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); -+ __ Ld_hu( -+ a0, FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset)); -+ __ mov(a2, a0); -+ -+ __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION); -+} -+ -+const bool LiveEdit::kFrameDropperSupported = true; -+ -+#undef __ -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc -new file mode 100644 -index 00000000..23a0051d ---- /dev/null -+++ b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc -@@ -0,0 +1,241 @@ -+// Copyright 2011 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#include "src/codegen/macro-assembler.h" -+#include "src/codegen/register-configuration.h" -+#include "src/codegen/safepoint-table.h" -+#include "src/deoptimizer/deoptimizer.h" -+ -+namespace v8 { -+namespace internal { -+ -+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false; -+const int Deoptimizer::kNonLazyDeoptExitSize = 0; -+const int Deoptimizer::kLazyDeoptExitSize = 0; -+ -+#define __ masm-> -+ -+// This code tries to be close to ia32 code so that any changes can be -+// easily ported. -+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, -+ Isolate* isolate, -+ DeoptimizeKind deopt_kind) { -+ NoRootArrayScope no_root_array(masm); -+ -+ // Unlike on ARM we don't save all the registers, just the useful ones. -+ // For the rest, there are gaps on the stack, so the offsets remain the same. -+ const int kNumberOfRegisters = Register::kNumRegisters; -+ -+ RegList restored_regs = kJSCallerSaved | kCalleeSaved; -+ RegList saved_regs = restored_regs | sp.bit() | ra.bit(); -+ -+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; -+ -+ // Save all double FPU registers before messing with them. -+ __ Sub_d(sp, sp, Operand(kDoubleRegsSize)); -+ const RegisterConfiguration* config = RegisterConfiguration::Default(); -+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { -+ int code = config->GetAllocatableDoubleCode(i); -+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); -+ int offset = code * kDoubleSize; -+ __ Fst_d(fpu_reg, MemOperand(sp, offset)); -+ } -+ -+ // Push saved_regs (needed to populate FrameDescription::registers_). -+ // Leave gaps for other registers. -+ __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize); -+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { -+ if ((saved_regs & (1 << i)) != 0) { -+ __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i)); -+ } -+ } -+ -+ __ li(a2, Operand(ExternalReference::Create( -+ IsolateAddressId::kCEntryFPAddress, isolate))); -+ __ St_d(fp, MemOperand(a2, 0)); -+ -+ const int kSavedRegistersAreaSize = -+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; -+ -+ // Get the bailout is passed as kRootRegister by the caller. -+ __ mov(a2, kRootRegister); -+ -+ // Get the address of the location in the code object (a3) (return -+ // address for lazy deoptimization) and compute the fp-to-sp delta in -+ // register a4. -+ __ mov(a3, ra); -+ __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize)); -+ -+ __ Sub_d(a4, fp, a4); -+ -+ // Allocate a new deoptimizer object. -+ __ PrepareCallCFunction(6, a5); -+ // Pass six arguments, according to n64 ABI. -+ __ mov(a0, zero_reg); -+ Label context_check; -+ __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); -+ __ JumpIfSmi(a1, &context_check); -+ __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); -+ __ bind(&context_check); -+ __ li(a1, Operand(static_cast(deopt_kind))); -+ // a2: bailout id already loaded. -+ // a3: code address or 0 already loaded. -+ // a4: already has fp-to-sp delta. -+ __ li(a5, Operand(ExternalReference::isolate_address(isolate))); -+ -+ // Call Deoptimizer::New(). -+ { -+ AllowExternalCallThatCantCauseGC scope(masm); -+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); -+ } -+ -+ // Preserve "deoptimizer" object in register v0 and get the input -+ // frame descriptor pointer to a1 (deoptimizer->input_); -+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. -+ // TODO save a0 -+ //__ mov(a0, v0); -+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset())); -+ -+ // Copy core registers into FrameDescription::registers_[kNumRegisters]. -+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); -+ for (int i = 0; i < kNumberOfRegisters; i++) { -+ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); -+ if ((saved_regs & (1 << i)) != 0) { -+ __ Ld_d(a2, MemOperand(sp, i * kPointerSize)); -+ __ St_d(a2, MemOperand(a1, offset)); -+ } else if (FLAG_debug_code) { -+ __ li(a2, Operand(kDebugZapValue)); -+ __ St_d(a2, MemOperand(a1, offset)); -+ } -+ } -+ -+ int double_regs_offset = FrameDescription::double_registers_offset(); -+ // Copy FPU registers to -+ // double_registers_[DoubleRegister::kNumAllocatableRegisters] -+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { -+ int code = config->GetAllocatableDoubleCode(i); -+ int dst_offset = code * kDoubleSize + double_regs_offset; -+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; -+ __ Fld_d(f0, MemOperand(sp, src_offset)); -+ __ Fst_d(f0, MemOperand(a1, dst_offset)); -+ } -+ -+ // Remove the saved registers from the stack. -+ __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize)); -+ -+ // Compute a pointer to the unwinding limit in register a2; that is -+ // the first stack slot not part of the input frame. -+ __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset())); -+ __ Add_d(a2, a2, sp); -+ -+ // Unwind the stack down to - but not including - the unwinding -+ // limit and copy the contents of the activation frame to the input -+ // frame description. -+ __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset())); -+ Label pop_loop; -+ Label pop_loop_header; -+ __ Branch(&pop_loop_header); -+ __ bind(&pop_loop); -+ __ pop(a4); -+ __ St_d(a4, MemOperand(a3, 0)); -+ __ addi_d(a3, a3, sizeof(uint64_t)); -+ __ bind(&pop_loop_header); -+ __ BranchShort(&pop_loop, ne, a2, Operand(sp)); -+ // Compute the output frame in the deoptimizer. -+ __ push(a0); // Preserve deoptimizer object across call. -+ // a0: deoptimizer object; a1: scratch. -+ __ PrepareCallCFunction(1, a1); -+ // Call Deoptimizer::ComputeOutputFrames(). -+ { -+ AllowExternalCallThatCantCauseGC scope(masm); -+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); -+ } -+ __ pop(a0); // Restore deoptimizer object (class Deoptimizer). -+ -+ __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); -+ -+ // Replace the current (input) frame with the output frames. -+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; -+ // Outer loop state: a4 = current "FrameDescription** output_", -+ // a1 = one past the last FrameDescription**. -+ __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset())); -+ __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. -+ __ Alsl_d(a1, a1, a4, kPointerSizeLog2, t7); -+ __ Branch(&outer_loop_header); -+ __ bind(&outer_push_loop); -+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index. -+ __ Ld_d(a2, MemOperand(a4, 0)); // output_[ix] -+ __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset())); -+ __ Branch(&inner_loop_header); -+ __ bind(&inner_push_loop); -+ __ Sub_d(a3, a3, Operand(sizeof(uint64_t))); -+ __ Add_d(a6, a2, Operand(a3)); -+ __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset())); -+ __ push(a7); -+ __ bind(&inner_loop_header); -+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); -+ -+ __ Add_d(a4, a4, Operand(kPointerSize)); -+ __ bind(&outer_loop_header); -+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1)); -+ -+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset())); -+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { -+ int code = config->GetAllocatableDoubleCode(i); -+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); -+ int src_offset = code * kDoubleSize + double_regs_offset; -+ __ Fld_d(fpu_reg, MemOperand(a1, src_offset)); -+ } -+ -+ // Push pc and continuation from the last output frame. -+ __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset())); -+ __ push(a6); -+ __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset())); -+ __ push(a6); -+ -+ // Technically restoring 'at' should work unless zero_reg is also restored -+ // but it's safer to check for this. -+ DCHECK(!(t7.bit() & restored_regs)); -+ // Restore the registers from the last output frame. -+ __ mov(t7, a2); -+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) { -+ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); -+ if ((restored_regs & (1 << i)) != 0) { -+ __ Ld_d(ToRegister(i), MemOperand(t7, offset)); -+ } -+ } -+ -+ __ pop(t7); // Get continuation, leave pc on stack. -+ __ pop(ra); -+ __ Jump(t7); -+ __ stop(); -+} -+ -+// Maximum size of a table entry generated below. -+const int Deoptimizer::table_entry_size_ = 2 * kInstrSize; -+ -+Float32 RegisterValues::GetFloatRegister(unsigned n) const { -+ return Float32::FromBits( -+ static_cast(double_registers_[n].get_bits())); -+} -+ -+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { -+ SetFrameSlot(offset, value); -+} -+ -+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { -+ SetFrameSlot(offset, value); -+} -+ -+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { -+ // No embedded constant pool support. -+ UNREACHABLE(); -+} -+ -+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; } -+ -+#undef __ -+ -+} // namespace internal -+} // namespace v8 -diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc -index 5f364373..ec8d0340 100644 ---- a/deps/v8/src/diagnostics/gdb-jit.cc -+++ b/deps/v8/src/diagnostics/gdb-jit.cc -@@ -1077,6 +1077,8 @@ class DebugInfoSection : public DebugSection { - UNIMPLEMENTED(); - #elif V8_TARGET_ARCH_MIPS64 - UNIMPLEMENTED(); -+#elif V8_TARGET_ARCH_LOONG64 -+ UNIMPLEMENTED(); - #elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX - w->Write(DW_OP_reg31); // The frame pointer is here on PPC64. - #elif V8_TARGET_ARCH_S390 -diff --git a/deps/v8/src/diagnostics/loong64/disasm-loong64.cc b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc -new file mode 100644 -index 00000000..6fe44186 ---- /dev/null -+++ b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc -@@ -0,0 +1,1695 @@ -+#include -+#include -+#include -+#include -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/base/platform/platform.h" -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/diagnostics/disasm.h" -+ -+namespace v8 { -+namespace internal { -+ -+//------------------------------------------------------------------------------ -+ -+// Decoder decodes and disassembles instructions into an output buffer. -+// It uses the converter to convert register names and call destinations into -+// more informative description. -+class Decoder { -+ public: -+ Decoder(const disasm::NameConverter& converter, -+ v8::internal::Vector out_buffer) -+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) { -+ out_buffer_[out_buffer_pos_] = '\0'; -+ } -+ -+ ~Decoder() {} -+ -+ // Writes one disassembled instruction into 'buffer' (0-terminated). -+ // Returns the length of the disassembled machine instruction in bytes. -+ int InstructionDecode(byte* instruction); -+ -+ private: -+ // Bottleneck functions to print into the out_buffer. -+ void PrintChar(const char ch); -+ void Print(const char* str); -+ -+ // Printing of common values. -+ void PrintRegister(int reg); -+ void PrintFPURegister(int freg); -+ void PrintFPUStatusRegister(int freg); -+ void PrintRj(Instruction* instr); -+ void PrintRk(Instruction* instr); -+ void PrintRd(Instruction* instr); -+ void PrintFj(Instruction* instr); -+ void PrintFk(Instruction* instr); -+ void PrintFd(Instruction* instr); -+ void PrintFa(Instruction* instr); -+ void PrintSa2(Instruction* instr); -+ void PrintSa3(Instruction* instr); -+ void PrintUi5(Instruction* instr); -+ void PrintUi6(Instruction* instr); -+ void PrintUi12(Instruction* instr); -+ void PrintXi12(Instruction* instr); -+ void PrintMsbw(Instruction* instr); -+ void PrintLsbw(Instruction* instr); -+ void PrintMsbd(Instruction* instr); -+ void PrintLsbd(Instruction* instr); -+ // void PrintCond(Instruction* instr); -+ void PrintSi12(Instruction* instr); -+ void PrintSi14(Instruction* instr); -+ void PrintSi16(Instruction* instr); -+ void PrintSi20(Instruction* instr); -+ void PrintCj(Instruction* instr); -+ void PrintCd(Instruction* instr); -+ void PrintCa(Instruction* instr); -+ void PrintCode(Instruction* instr); -+ void PrintHint5(Instruction* instr); -+ void PrintHint15(Instruction* instr); -+ void PrintPCOffs16(Instruction* instr); -+ void PrintPCOffs21(Instruction* instr); -+ void PrintPCOffs26(Instruction* instr); -+ void PrintOffs16(Instruction* instr); -+ void PrintOffs21(Instruction* instr); -+ void PrintOffs26(Instruction* instr); -+ -+ // Handle formatting of instructions and their options. -+ int FormatRegister(Instruction* instr, const char* option); -+ int FormatFPURegister(Instruction* instr, const char* option); -+ int FormatOption(Instruction* instr, const char* option); -+ void Format(Instruction* instr, const char* format); -+ void Unknown(Instruction* instr); -+ int DecodeBreakInstr(Instruction* instr); -+ -+ // Each of these functions decodes one particular instruction type. -+ int InstructionDecode(Instruction* instr); -+ void DecodeTypekOp6(Instruction* instr); -+ void DecodeTypekOp7(Instruction* instr); -+ void DecodeTypekOp8(Instruction* instr); -+ void DecodeTypekOp10(Instruction* instr); -+ void DecodeTypekOp12(Instruction* instr); -+ void DecodeTypekOp14(Instruction* instr); -+ int DecodeTypekOp17(Instruction* instr); -+ void DecodeTypekOp22(Instruction* instr); -+ -+ const disasm::NameConverter& converter_; -+ v8::internal::Vector out_buffer_; -+ int out_buffer_pos_; -+ -+ DISALLOW_COPY_AND_ASSIGN(Decoder); -+}; -+ -+// Support for assertions in the Decoder formatting functions. -+#define STRING_STARTS_WITH(string, compare_string) \ -+ (strncmp(string, compare_string, strlen(compare_string)) == 0) -+ -+// Append the ch to the output buffer. -+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; } -+ -+// Append the str to the output buffer. -+void Decoder::Print(const char* str) { -+ char cur = *str++; -+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) { -+ PrintChar(cur); -+ cur = *str++; -+ } -+ out_buffer_[out_buffer_pos_] = 0; -+} -+ -+// Print the register name according to the active name converter. -+void Decoder::PrintRegister(int reg) { -+ Print(converter_.NameOfCPURegister(reg)); -+} -+ -+void Decoder::PrintRj(Instruction* instr) { -+ int reg = instr->RjValue(); -+ PrintRegister(reg); -+} -+ -+void Decoder::PrintRk(Instruction* instr) { -+ int reg = instr->RkValue(); -+ PrintRegister(reg); -+} -+ -+void Decoder::PrintRd(Instruction* instr) { -+ int reg = instr->RdValue(); -+ PrintRegister(reg); -+} -+ -+// Print the FPUregister name according to the active name converter. -+void Decoder::PrintFPURegister(int freg) { -+ Print(converter_.NameOfXMMRegister(freg)); -+} -+ -+void Decoder::PrintFj(Instruction* instr) { -+ int freg = instr->FjValue(); -+ PrintFPURegister(freg); -+} -+ -+void Decoder::PrintFk(Instruction* instr) { -+ int freg = instr->FkValue(); -+ PrintFPURegister(freg); -+} -+ -+void Decoder::PrintFd(Instruction* instr) { -+ int freg = instr->FdValue(); -+ PrintFPURegister(freg); -+} -+ -+void Decoder::PrintFa(Instruction* instr) { -+ int freg = instr->FaValue(); -+ PrintFPURegister(freg); -+} -+ -+// Print the integer value of the sa field. -+void Decoder::PrintSa2(Instruction* instr) { -+ int sa = instr->Sa2Value(); -+ uint32_t opcode = (instr->InstructionBits() >> 18) << 18; -+ if (opcode == ALSL || opcode == ALSL_D) { -+ sa += 1; -+ } -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); -+} -+ -+void Decoder::PrintSa3(Instruction* instr) { -+ int sa = instr->Sa3Value(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); -+} -+ -+void Decoder::PrintUi5(Instruction* instr) { -+ int ui = instr->Ui5Value(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); -+} -+ -+void Decoder::PrintUi6(Instruction* instr) { -+ int ui = instr->Ui6Value(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); -+} -+ -+void Decoder::PrintUi12(Instruction* instr) { -+ int ui = instr->Ui12Value(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); -+} -+ -+void Decoder::PrintXi12(Instruction* instr) { -+ int xi = instr->Ui12Value(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi); -+} -+ -+void Decoder::PrintMsbd(Instruction* instr) { -+ int msbd = instr->MsbdValue(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbd); -+} -+ -+void Decoder::PrintLsbd(Instruction* instr) { -+ int lsbd = instr->LsbdValue(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbd); -+} -+ -+void Decoder::PrintMsbw(Instruction* instr) { -+ int msbw = instr->MsbwValue(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbw); -+} -+ -+void Decoder::PrintLsbw(Instruction* instr) { -+ int lsbw = instr->LsbwValue(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbw); -+} -+ -+void Decoder::PrintSi12(Instruction* instr) { -+ int si = ((instr->Si12Value()) << (32 - kSi12Bits)) >> (32 - kSi12Bits); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); -+} -+ -+void Decoder::PrintSi14(Instruction* instr) { -+ int si = ((instr->Si14Value()) << (32 - kSi14Bits)) >> (32 - kSi14Bits); -+ si <<= 2; -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); -+} -+ -+void Decoder::PrintSi16(Instruction* instr) { -+ int si = ((instr->Si16Value()) << (32 - kSi16Bits)) >> (32 - kSi16Bits); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); -+} -+ -+void Decoder::PrintSi20(Instruction* instr) { -+ int si = ((instr->Si20Value()) << (32 - kSi20Bits)) >> (32 - kSi20Bits); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); -+} -+ -+void Decoder::PrintCj(Instruction* instr) { -+ int cj = instr->CjValue(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cj); -+} -+ -+void Decoder::PrintCd(Instruction* instr) { -+ int cd = instr->CdValue(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cd); -+} -+ -+void Decoder::PrintCa(Instruction* instr) { -+ int ca = instr->CaValue(); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ca); -+} -+ -+void Decoder::PrintCode(Instruction* instr) { -+ int code = instr->CodeValue(); -+ out_buffer_pos_ += -+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", code, code); -+} -+ -+void Decoder::PrintHint5(Instruction* instr) { -+ int hint = instr->Hint5Value(); -+ out_buffer_pos_ += -+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint); -+} -+ -+void Decoder::PrintHint15(Instruction* instr) { -+ int hint = instr->Hint15Value(); -+ out_buffer_pos_ += -+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint); -+} -+ -+void Decoder::PrintPCOffs16(Instruction* instr) { -+ int n_bits = 2; -+ int offs = instr->Offs16Value(); -+ int target = ((offs << n_bits) << (32 - kOffsLowBits - n_bits)) >> -+ (32 - kOffsLowBits - n_bits); -+ out_buffer_pos_ += SNPrintF( -+ out_buffer_ + out_buffer_pos_, "%s", -+ converter_.NameOfAddress(reinterpret_cast(instr) + target)); -+} -+ -+void Decoder::PrintPCOffs21(Instruction* instr) { -+ int n_bits = 2; -+ int offs = instr->Offs21Value(); -+ int target = -+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs21HighBits - n_bits)) >> -+ (32 - kOffsLowBits - kOffs21HighBits - n_bits); -+ out_buffer_pos_ += SNPrintF( -+ out_buffer_ + out_buffer_pos_, "%s", -+ converter_.NameOfAddress(reinterpret_cast(instr) + target)); -+} -+ -+void Decoder::PrintPCOffs26(Instruction* instr) { -+ int n_bits = 2; -+ int offs = instr->Offs26Value(); -+ int target = -+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs26HighBits - n_bits)) >> -+ (32 - kOffsLowBits - kOffs26HighBits - n_bits); -+ out_buffer_pos_ += SNPrintF( -+ out_buffer_ + out_buffer_pos_, "%s", -+ converter_.NameOfAddress(reinterpret_cast(instr) + target)); -+} -+ -+void Decoder::PrintOffs16(Instruction* instr) { -+ int offs = instr->Offs16Value(); -+ offs <<= (32 - kOffsLowBits); -+ offs >>= (32 - kOffsLowBits); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); -+} -+ -+void Decoder::PrintOffs21(Instruction* instr) { -+ int offs = instr->Offs21Value(); -+ offs <<= (32 - kOffsLowBits - kOffs21HighBits); -+ offs >>= (32 - kOffsLowBits - kOffs21HighBits); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); -+} -+ -+void Decoder::PrintOffs26(Instruction* instr) { -+ int offs = instr->Offs26Value(); -+ offs <<= (32 - kOffsLowBits - kOffs26HighBits); -+ offs >>= (32 - kOffsLowBits - kOffs26HighBits); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); -+} -+ -+// Handle all register based formatting in this function to reduce the -+// complexity of FormatOption. -+int Decoder::FormatRegister(Instruction* instr, const char* format) { -+ DCHECK_EQ(format[0], 'r'); -+ if (format[1] == 'j') { // 'rj: Rj register. -+ int reg = instr->RjValue(); -+ PrintRegister(reg); -+ return 2; -+ } else if (format[1] == 'k') { // 'rk: rk register. -+ int reg = instr->RkValue(); -+ PrintRegister(reg); -+ return 2; -+ } else if (format[1] == 'd') { // 'rd: rd register. -+ int reg = instr->RdValue(); -+ PrintRegister(reg); -+ return 2; -+ } -+ UNREACHABLE(); -+ return 0; -+} -+ -+// Handle all FPUregister based formatting in this function to reduce the -+// complexity of FormatOption. -+int Decoder::FormatFPURegister(Instruction* instr, const char* format) { -+ DCHECK_EQ(format[0], 'f'); -+ if (format[1] == 'j') { // 'fj: fj register. -+ int reg = instr->FjValue(); -+ PrintFPURegister(reg); -+ return 2; -+ } else if (format[1] == 'k') { // 'fk: fk register. -+ int reg = instr->FkValue(); -+ PrintFPURegister(reg); -+ return 2; -+ } else if (format[1] == 'd') { // 'fd: fd register. -+ int reg = instr->FdValue(); -+ PrintFPURegister(reg); -+ return 2; -+ } else if (format[1] == 'a') { // 'fa: fa register. -+ int reg = instr->FaValue(); -+ PrintFPURegister(reg); -+ return 2; -+ } -+ UNREACHABLE(); -+ return 0; -+} -+ -+// FormatOption takes a formatting string and interprets it based on -+// the current instructions. The format string points to the first -+// character of the option string (the option escape has already been -+// consumed by the caller.) FormatOption returns the number of -+// characters that were consumed from the formatting string. -+int Decoder::FormatOption(Instruction* instr, const char* format) { -+ switch (format[0]) { -+ case 'c': { -+ switch (format[1]) { -+ case 'a': -+ DCHECK(STRING_STARTS_WITH(format, "ca")); -+ PrintCa(instr); -+ return 2; -+ case 'd': -+ DCHECK(STRING_STARTS_WITH(format, "cd")); -+ PrintCd(instr); -+ return 2; -+ case 'j': -+ DCHECK(STRING_STARTS_WITH(format, "cj")); -+ PrintCj(instr); -+ return 2; -+ case 'o': -+ DCHECK(STRING_STARTS_WITH(format, "code")); -+ PrintCode(instr); -+ return 4; -+ } -+ } -+ case 'f': { -+ return FormatFPURegister(instr, format); -+ } -+ case 'h': { -+ if (format[4] == '5') { -+ DCHECK(STRING_STARTS_WITH(format, "hint5")); -+ PrintHint5(instr); -+ return 5; -+ } else if (format[4] == '1') { -+ DCHECK(STRING_STARTS_WITH(format, "hint15")); -+ PrintHint15(instr); -+ return 6; -+ } -+ break; -+ } -+ case 'l': { -+ switch (format[3]) { -+ case 'w': -+ DCHECK(STRING_STARTS_WITH(format, "lsbw")); -+ PrintLsbw(instr); -+ return 4; -+ case 'd': -+ DCHECK(STRING_STARTS_WITH(format, "lsbd")); -+ PrintLsbd(instr); -+ return 4; -+ default: -+ return 0; -+ } -+ } -+ case 'm': { -+ if (format[3] == 'w') { -+ DCHECK(STRING_STARTS_WITH(format, "msbw")); -+ PrintMsbw(instr); -+ } else if (format[3] == 'd') { -+ DCHECK(STRING_STARTS_WITH(format, "msbd")); -+ PrintMsbd(instr); -+ } -+ return 4; -+ } -+ case 'o': { -+ if (format[1] == 'f') { -+ if (format[4] == '1') { -+ DCHECK(STRING_STARTS_WITH(format, "offs16")); -+ PrintOffs16(instr); -+ return 6; -+ } else if (format[4] == '2') { -+ if (format[5] == '1') { -+ DCHECK(STRING_STARTS_WITH(format, "offs21")); -+ PrintOffs21(instr); -+ return 6; -+ } else if (format[5] == '6') { -+ DCHECK(STRING_STARTS_WITH(format, "offs26")); -+ PrintOffs26(instr); -+ return 6; -+ } -+ } -+ } -+ break; -+ } -+ case 'p': { -+ if (format[6] == '1') { -+ DCHECK(STRING_STARTS_WITH(format, "pcoffs16")); -+ PrintPCOffs16(instr); -+ return 8; -+ } else if (format[6] == '2') { -+ if (format[7] == '1') { -+ DCHECK(STRING_STARTS_WITH(format, "pcoffs21")); -+ PrintPCOffs21(instr); -+ return 8; -+ } else if (format[7] == '6') { -+ DCHECK(STRING_STARTS_WITH(format, "pcoffs26")); -+ PrintPCOffs26(instr); -+ return 8; -+ } -+ } -+ break; -+ } -+ case 'r': { -+ return FormatRegister(instr, format); -+ break; -+ } -+ case 's': { -+ switch (format[1]) { -+ case 'a': -+ if (format[2] == '2') { -+ DCHECK(STRING_STARTS_WITH(format, "sa2")); -+ PrintSa2(instr); -+ } else if (format[2] == '3') { -+ DCHECK(STRING_STARTS_WITH(format, "sa3")); -+ PrintSa3(instr); -+ } -+ return 3; -+ case 'i': -+ if (format[2] == '2') { -+ DCHECK(STRING_STARTS_WITH(format, "si20")); -+ PrintSi20(instr); -+ return 4; -+ } else if (format[2] == '1') { -+ switch (format[3]) { -+ case '2': -+ DCHECK(STRING_STARTS_WITH(format, "si12")); -+ PrintSi12(instr); -+ return 4; -+ case '4': -+ DCHECK(STRING_STARTS_WITH(format, "si14")); -+ PrintSi14(instr); -+ return 4; -+ case '6': -+ DCHECK(STRING_STARTS_WITH(format, "si16")); -+ PrintSi16(instr); -+ return 4; -+ default: -+ break; -+ } -+ } -+ break; -+ default: -+ break; -+ } -+ break; -+ } -+ case 'u': { -+ if (format[2] == '5') { -+ DCHECK(STRING_STARTS_WITH(format, "ui5")); -+ PrintUi5(instr); -+ return 3; -+ } else if (format[2] == '6') { -+ DCHECK(STRING_STARTS_WITH(format, "ui6")); -+ PrintUi6(instr); -+ return 3; -+ } else if (format[2] == '1') { -+ DCHECK(STRING_STARTS_WITH(format, "ui12")); -+ PrintUi12(instr); -+ return 4; -+ } -+ break; -+ } -+ case 'x': { -+ DCHECK(STRING_STARTS_WITH(format, "xi12")); -+ PrintXi12(instr); -+ return 4; -+ } -+ default: -+ UNREACHABLE(); -+ } -+ return 0; -+} -+ -+// Format takes a formatting string for a whole instruction and prints it into -+// the output buffer. All escaped options are handed to FormatOption to be -+// parsed further. -+void Decoder::Format(Instruction* instr, const char* format) { -+ char cur = *format++; -+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) { -+ if (cur == '\'') { // Single quote is used as the formatting escape. -+ format += FormatOption(instr, format); -+ } else { -+ out_buffer_[out_buffer_pos_++] = cur; -+ } -+ cur = *format++; -+ } -+ out_buffer_[out_buffer_pos_] = '\0'; -+} -+ -+// For currently unimplemented decodings the disassembler calls Unknown(instr) -+// which will just print "unknown" of the instruction bits. -+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); } -+ -+int Decoder::DecodeBreakInstr(Instruction* instr) { -+ // This is already known to be BREAK instr, just extract the code. -+ /*if (instr->Bits(14, 0) == static_cast(kMaxStopCode)) { -+ // This is stop(msg). -+ Format(instr, "break, code: 'code"); -+ out_buffer_pos_ += SNPrintF( -+ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64, -+ static_cast(reinterpret_cast(instr + kInstrSize)), -+ reinterpret_cast( -+ *reinterpret_cast(instr + kInstrSize))); -+ // Size 3: the break_ instr, plus embedded 64-bit char pointer. -+ return 3 * kInstrSize; -+ } else { -+ Format(instr, "break, code: 'code"); -+ return kInstrSize; -+ }*/ -+ Format(instr, "break code: 'code"); -+ return kInstrSize; -+} //=================================================== -+ -+void Decoder::DecodeTypekOp6(Instruction* instr) { -+ switch (instr->Bits(31, 26) << 26) { -+ case ADDU16I_D: -+ Format(instr, "addu16i.d 'rd, 'rj, 'si16"); -+ break; -+ case BEQZ: -+ Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21"); -+ break; -+ case BNEZ: -+ Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21"); -+ break; -+ case BCZ: -+ if (instr->Bit(8)) -+ Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21"); -+ else -+ Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21"); -+ break; -+ case JIRL: -+ Format(instr, "jirl 'rd, 'rj, 'offs16"); -+ break; -+ case B: -+ Format(instr, "b 'offs26 -> 'pcoffs26"); -+ break; -+ case BL: -+ Format(instr, "bl 'offs26 -> 'pcoffs26"); -+ break; -+ case BEQ: -+ Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16"); -+ break; -+ case BNE: -+ Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16"); -+ break; -+ case BLT: -+ Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16"); -+ break; -+ case BGE: -+ Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16"); -+ break; -+ case BLTU: -+ Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16"); -+ break; -+ case BGEU: -+ Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Decoder::DecodeTypekOp7(Instruction* instr) { -+ switch (instr->Bits(31, 25) << 25) { -+ case LU12I_W: -+ Format(instr, "lu12i.w 'rd, 'si20"); -+ break; -+ case LU32I_D: -+ Format(instr, "lu32i.d 'rd, 'si20"); -+ break; -+ case PCADDI: -+ Format(instr, "pcaddi 'rd, 'si20"); -+ break; -+ case PCALAU12I: -+ Format(instr, "pcalau12i 'rd, 'si20"); -+ break; -+ case PCADDU12I: -+ Format(instr, "pcaddu12i 'rd, 'si20"); -+ break; -+ case PCADDU18I: -+ Format(instr, "pcaddu18i 'rd, 'si20"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Decoder::DecodeTypekOp8(Instruction* instr) { -+ switch (instr->Bits(31, 24) << 24) { -+ case LDPTR_W: -+ Format(instr, "ldptr.w 'rd, 'rj, 'si14"); -+ break; -+ case STPTR_W: -+ Format(instr, "stptr.w 'rd, 'rj, 'si14"); -+ break; -+ case LDPTR_D: -+ Format(instr, "ldptr.d 'rd, 'rj, 'si14"); -+ break; -+ case STPTR_D: -+ Format(instr, "stptr.d 'rd, 'rj, 'si14"); -+ break; -+ case LL_W: -+ Format(instr, "ll.w 'rd, 'rj, 'si14"); -+ break; -+ case SC_W: -+ Format(instr, "sc.w 'rd, 'rj, 'si14"); -+ break; -+ case LL_D: -+ Format(instr, "ll.d 'rd, 'rj, 'si14"); -+ break; -+ case SC_D: -+ Format(instr, "sc.d 'rd, 'rj, 'si14"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Decoder::DecodeTypekOp10(Instruction* instr) { -+ switch (instr->Bits(31, 22) << 22) { -+ case BSTR_W: { -+ if (instr->Bit(21) != 0) { -+ if (instr->Bit(15) == 0) { -+ Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw"); -+ } else { -+ Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw"); -+ } -+ } -+ break; -+ } -+ case BSTRINS_D: -+ Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd"); -+ break; -+ case BSTRPICK_D: -+ Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd"); -+ break; -+ case SLTI: -+ Format(instr, "slti 'rd, 'rj, 'si12"); -+ break; -+ case SLTUI: -+ Format(instr, "sltui 'rd, 'rj, 'si12"); -+ break; -+ case ADDI_W: -+ Format(instr, "addi.w 'rd, 'rj, 'si12"); -+ break; -+ case ADDI_D: -+ Format(instr, "addi.d 'rd, 'rj, 'si12"); -+ break; -+ case LU52I_D: -+ Format(instr, "lu52i.d 'rd, 'rj, 'si12"); -+ break; -+ case ANDI: -+ Format(instr, "andi 'rd, 'rj, 'xi12"); -+ break; -+ case ORI: -+ Format(instr, "ori 'rd, 'rj, 'xi12"); -+ break; -+ case XORI: -+ Format(instr, "xori 'rd, 'rj, 'xi12"); -+ break; -+ case LD_B: -+ Format(instr, "ld.b 'rd, 'rj, 'si12"); -+ break; -+ case LD_H: -+ Format(instr, "ld.h 'rd, 'rj, 'si12"); -+ break; -+ case LD_W: -+ Format(instr, "ld.w 'rd, 'rj, 'si12"); -+ break; -+ case LD_D: -+ Format(instr, "ld.d 'rd, 'rj, 'si12"); -+ break; -+ case ST_B: -+ Format(instr, "st.b 'rd, 'rj, 'si12"); -+ break; -+ case ST_H: -+ Format(instr, "st.h 'rd, 'rj, 'si12"); -+ break; -+ case ST_W: -+ Format(instr, "st.w 'rd, 'rj, 'si12"); -+ break; -+ case ST_D: -+ Format(instr, "st.d 'rd, 'rj, 'si12"); -+ break; -+ case LD_BU: -+ Format(instr, "ld.bu 'rd, 'rj, 'si12"); -+ break; -+ case LD_HU: -+ Format(instr, "ld.hu 'rd, 'rj, 'si12"); -+ break; -+ case LD_WU: -+ Format(instr, "ld.wu 'rd, 'rj, 'si12"); -+ break; -+ break; -+ case FLD_S: -+ Format(instr, "fld.s 'fd, 'rj, 'si12"); -+ break; -+ case FST_S: -+ Format(instr, "fst.s 'fd, 'rj, 'si12"); -+ break; -+ case FLD_D: -+ Format(instr, "fld.d 'fd, 'rj, 'si12"); -+ break; -+ case FST_D: -+ Format(instr, "fst.d 'fd, 'rj, 'si12"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Decoder::DecodeTypekOp12(Instruction* instr) { -+ switch (instr->Bits(31, 20) << 20) { -+ case FMADD_S: -+ Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FMADD_D: -+ Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FMSUB_S: -+ Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FMSUB_D: -+ Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FNMADD_S: -+ Format(instr, "fnmadd.s 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FNMADD_D: -+ Format(instr, "fnmadd.d 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FNMSUB_S: -+ Format(instr, "fnmsub.s 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FNMSUB_D: -+ Format(instr, "fnmsub.d 'fd, 'fj, 'fk, 'fa"); -+ break; -+ case FCMP_COND_S: -+ switch (instr->Bits(19, 15)) { -+ case CAF: -+ Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SAF: -+ Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CLT: -+ Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CEQ: -+ Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SEQ: -+ Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CLE: -+ Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SLE: -+ Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CUN: -+ Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SUN: -+ Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CULT: -+ Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SULT: -+ Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CUEQ: -+ Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SUEQ: -+ Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CULE: -+ Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SULE: -+ Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CNE: -+ Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SNE: -+ Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk"); -+ break; -+ case COR: -+ Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SOR: -+ Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk"); -+ break; -+ case CUNE: -+ Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk"); -+ break; -+ case SUNE: -+ Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ break; -+ case FCMP_COND_D: -+ switch (instr->Bits(19, 15)) { -+ case CAF: -+ Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SAF: -+ Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CLT: -+ Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CEQ: -+ Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SEQ: -+ Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CLE: -+ Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SLE: -+ Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CUN: -+ Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SUN: -+ Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CULT: -+ Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SULT: -+ Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CUEQ: -+ Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SUEQ: -+ Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CULE: -+ Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SULE: -+ Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CNE: -+ Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SNE: -+ Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk"); -+ break; -+ case COR: -+ Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SOR: -+ Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk"); -+ break; -+ case CUNE: -+ Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk"); -+ break; -+ case SUNE: -+ Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ break; -+ case FSEL: -+ Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Decoder::DecodeTypekOp14(Instruction* instr) { -+ switch (instr->Bits(31, 18) << 18) { -+ case ALSL: -+ if (instr->Bit(17)) -+ Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2"); -+ else -+ Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2"); -+ break; -+ case BYTEPICK_W: -+ Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2"); -+ break; -+ case BYTEPICK_D: -+ Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3"); -+ break; -+ case ALSL_D: -+ Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2"); -+ break; -+ case SLLI: -+ if (instr->Bit(16)) -+ Format(instr, "slli.d 'rd, 'rj, 'ui6"); -+ else -+ Format(instr, "slli.w 'rd, 'rj, 'ui5"); -+ break; -+ case SRLI: -+ if (instr->Bit(16)) -+ Format(instr, "srli.d 'rd, 'rj, 'ui6"); -+ else -+ Format(instr, "srli.w 'rd, 'rj, 'ui5"); -+ break; -+ case SRAI: -+ if (instr->Bit(16)) -+ Format(instr, "srai.d 'rd, 'rj, 'ui6"); -+ else -+ Format(instr, "srai.w 'rd, 'rj, 'ui5"); -+ break; -+ case ROTRI: -+ if (instr->Bit(16)) -+ Format(instr, "rotri.d 'rd, 'rj, 'ui6"); -+ else -+ Format(instr, "rotri.w 'rd, 'rj, 'ui5"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+int Decoder::DecodeTypekOp17(Instruction* instr) { -+ switch (instr->Bits(31, 15) << 15) { -+ case ADD_W: -+ Format(instr, "add.w 'rd, 'rj, 'rk"); -+ break; -+ case ADD_D: -+ Format(instr, "add.d 'rd, 'rj, 'rk"); -+ break; -+ case SUB_W: -+ Format(instr, "sub.w 'rd, 'rj, 'rk"); -+ break; -+ case SUB_D: -+ Format(instr, "sub.d 'rd, 'rj, 'rk"); -+ break; -+ case SLT: -+ Format(instr, "slt 'rd, 'rj, 'rk"); -+ break; -+ case SLTU: -+ Format(instr, "sltu 'rd, 'rj, 'rk"); -+ break; -+ case MASKEQZ: -+ Format(instr, "maskeqz 'rd, 'rj, 'rk"); -+ break; -+ case MASKNEZ: -+ Format(instr, "masknez 'rd, 'rj, 'rk"); -+ break; -+ case NOR: -+ Format(instr, "nor 'rd, 'rj, 'rk"); -+ break; -+ case AND: -+ Format(instr, "and 'rd, 'rj, 'rk"); -+ break; -+ case OR: -+ Format(instr, "or 'rd, 'rj, 'rk"); -+ break; -+ case XOR: -+ Format(instr, "xor 'rd, 'rj, 'rk"); -+ break; -+ case ORN: -+ Format(instr, "orn 'rd, 'rj, 'rk"); -+ break; -+ case ANDN: -+ Format(instr, "andn 'rd, 'rj, 'rk"); -+ break; -+ case SLL_W: -+ Format(instr, "sll.w 'rd, 'rj, 'rk"); -+ break; -+ case SRL_W: -+ Format(instr, "srl.w 'rd, 'rj, 'rk"); -+ break; -+ case SRA_W: -+ Format(instr, "sra.w 'rd, 'rj, 'rk"); -+ break; -+ case SLL_D: -+ Format(instr, "sll.d 'rd, 'rj, 'rk"); -+ break; -+ case SRL_D: -+ Format(instr, "srl.d 'rd, 'rj, 'rk"); -+ break; -+ case SRA_D: -+ Format(instr, "sra.d 'rd, 'rj, 'rk"); -+ break; -+ case ROTR_D: -+ Format(instr, "rotr.d 'rd, 'rj, 'rk"); -+ break; -+ case ROTR_W: -+ Format(instr, "rotr.w 'rd, 'rj, 'rk"); -+ break; -+ case MUL_W: -+ Format(instr, "mul.w 'rd, 'rj, 'rk"); -+ break; -+ case MULH_W: -+ Format(instr, "mulh.w 'rd, 'rj, 'rk"); -+ break; -+ case MULH_WU: -+ Format(instr, "mulh.wu 'rd, 'rj, 'rk"); -+ break; -+ case MUL_D: -+ Format(instr, "mul.d 'rd, 'rj, 'rk"); -+ break; -+ case MULH_D: -+ Format(instr, "mulh.d 'rd, 'rj, 'rk"); -+ break; -+ case MULH_DU: -+ Format(instr, "mulh.du 'rd, 'rj, 'rk"); -+ break; -+ case MULW_D_W: -+ Format(instr, "mulw.d.w 'rd, 'rj, 'rk"); -+ break; -+ case MULW_D_WU: -+ Format(instr, "mulw.d.wu 'rd, 'rj, 'rk"); -+ break; -+ case DIV_W: -+ Format(instr, "div.w 'rd, 'rj, 'rk"); -+ break; -+ case MOD_W: -+ Format(instr, "mod.w 'rd, 'rj, 'rk"); -+ break; -+ case DIV_WU: -+ Format(instr, "div.wu 'rd, 'rj, 'rk"); -+ break; -+ case MOD_WU: -+ Format(instr, "mod.wu 'rd, 'rj, 'rk"); -+ break; -+ case DIV_D: -+ Format(instr, "div.d 'rd, 'rj, 'rk"); -+ break; -+ case MOD_D: -+ Format(instr, "mod.d 'rd, 'rj, 'rk"); -+ break; -+ case DIV_DU: -+ Format(instr, "div.du 'rd, 'rj, 'rk"); -+ break; -+ case MOD_DU: -+ Format(instr, "mod.du 'rd, 'rj, 'rk"); -+ break; -+ case BREAK: -+ return DecodeBreakInstr(instr); -+ case FADD_S: -+ Format(instr, "fadd.s 'fd, 'fj, 'fk"); -+ break; -+ case FADD_D: -+ Format(instr, "fadd.d 'fd, 'fj, 'fk"); -+ break; -+ case FSUB_S: -+ Format(instr, "fsub.s 'fd, 'fj, 'fk"); -+ break; -+ case FSUB_D: -+ Format(instr, "fsub.d 'fd, 'fj, 'fk"); -+ break; -+ case FMUL_S: -+ Format(instr, "fmul.s 'fd, 'fj, 'fk"); -+ break; -+ case FMUL_D: -+ Format(instr, "fmul.d 'fd, 'fj, 'fk"); -+ break; -+ case FDIV_S: -+ Format(instr, "fdiv.s 'fd, 'fj, 'fk"); -+ break; -+ case FDIV_D: -+ Format(instr, "fdiv.d 'fd, 'fj, 'fk"); -+ break; -+ case FMAX_S: -+ Format(instr, "fmax.s 'fd, 'fj, 'fk"); -+ break; -+ case FMAX_D: -+ Format(instr, "fmax.d 'fd, 'fj, 'fk"); -+ break; -+ case FMIN_S: -+ Format(instr, "fmin.s 'fd, 'fj, 'fk"); -+ break; -+ case FMIN_D: -+ Format(instr, "fmin.d 'fd, 'fj, 'fk"); -+ break; -+ case FMAXA_S: -+ Format(instr, "fmaxa.s 'fd, 'fj, 'fk"); -+ break; -+ case FMAXA_D: -+ Format(instr, "fmaxa.d 'fd, 'fj, 'fk"); -+ break; -+ case FMINA_S: -+ Format(instr, "fmina.s 'fd, 'fj, 'fk"); -+ break; -+ case FMINA_D: -+ Format(instr, "fmina.d 'fd, 'fj, 'fk"); -+ break; -+ case LDX_B: -+ Format(instr, "ldx.b 'rd, 'rj, 'rk"); -+ break; -+ case LDX_H: -+ Format(instr, "ldx.h 'rd, 'rj, 'rk"); -+ break; -+ case LDX_W: -+ Format(instr, "ldx.w 'rd, 'rj, 'rk"); -+ break; -+ case LDX_D: -+ Format(instr, "ldx.d 'rd, 'rj, 'rk"); -+ break; -+ case STX_B: -+ Format(instr, "stx.b 'rd, 'rj, 'rk"); -+ break; -+ case STX_H: -+ Format(instr, "stx.h 'rd, 'rj, 'rk"); -+ break; -+ case STX_W: -+ Format(instr, "stx.w 'rd, 'rj, 'rk"); -+ break; -+ case STX_D: -+ Format(instr, "stx.d 'rd, 'rj, 'rk"); -+ break; -+ case LDX_BU: -+ Format(instr, "ldx.bu 'rd, 'rj, 'rk"); -+ break; -+ case LDX_HU: -+ Format(instr, "ldx.hu 'rd, 'rj, 'rk"); -+ break; -+ case LDX_WU: -+ Format(instr, "ldx.wu 'rd, 'rj, 'rk"); -+ break; -+ case FLDX_S: -+ Format(instr, "fldx.s 'fd, 'rj, 'rk"); -+ break; -+ case FLDX_D: -+ Format(instr, "fldx.d 'fd, 'rj, 'rk"); -+ break; -+ case FSTX_S: -+ Format(instr, "fstx.s 'fd, 'rj, 'rk"); -+ break; -+ case FSTX_D: -+ Format(instr, "fstx.d 'fd, 'rj, 'rk"); -+ break; -+ case AMSWAP_W: -+ Format(instr, "amswap.w 'rd, 'rk, 'rj"); -+ break; -+ case AMSWAP_D: -+ Format(instr, "amswap.d 'rd, 'rk, 'rj"); -+ break; -+ case AMADD_W: -+ Format(instr, "amadd.w 'rd, 'rk, 'rj"); -+ break; -+ case AMADD_D: -+ Format(instr, "amadd.d 'rd, 'rk, 'rj"); -+ break; -+ case AMAND_W: -+ Format(instr, "amand.w 'rd, 'rk, 'rj"); -+ break; -+ case AMAND_D: -+ Format(instr, "amand.d 'rd, 'rk, 'rj"); -+ break; -+ case AMOR_W: -+ Format(instr, "amor.w 'rd, 'rk, 'rj"); -+ break; -+ case AMOR_D: -+ Format(instr, "amor.d 'rd, 'rk, 'rj"); -+ break; -+ case AMXOR_W: -+ Format(instr, "amxor.w 'rd, 'rk, 'rj"); -+ break; -+ case AMXOR_D: -+ Format(instr, "amxor.d 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_W: -+ Format(instr, "ammax.w 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_D: -+ Format(instr, "ammax.d 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_W: -+ Format(instr, "ammin.w 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_D: -+ Format(instr, "ammin.d 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_WU: -+ Format(instr, "ammax.wu 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_DU: -+ Format(instr, "ammax.du 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_WU: -+ Format(instr, "ammin.wu 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_DU: -+ Format(instr, "ammin.du 'rd, 'rk, 'rj"); -+ break; -+ case AMSWAP_DB_W: -+ Format(instr, "amswap_db.w 'rd, 'rk, 'rj"); -+ break; -+ case AMSWAP_DB_D: -+ Format(instr, "amswap_db.d 'rd, 'rk, 'rj"); -+ break; -+ case AMADD_DB_W: -+ Format(instr, "amadd_db.w 'rd, 'rk, 'rj"); -+ break; -+ case AMADD_DB_D: -+ Format(instr, "amadd_db.d 'rd, 'rk, 'rj"); -+ break; -+ case AMAND_DB_W: -+ Format(instr, "amand_db.w 'rd, 'rk, 'rj"); -+ break; -+ case AMAND_DB_D: -+ Format(instr, "amand_db.d 'rd, 'rk, 'rj"); -+ break; -+ case AMOR_DB_W: -+ Format(instr, "amor_db.w 'rd, 'rk, 'rj"); -+ break; -+ case AMOR_DB_D: -+ Format(instr, "amor_db.d 'rd, 'rk, 'rj"); -+ break; -+ case AMXOR_DB_W: -+ Format(instr, "amxor_db.w 'rd, 'rk, 'rj"); -+ break; -+ case AMXOR_DB_D: -+ Format(instr, "amxor_db.d 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_DB_W: -+ Format(instr, "ammax_db.w 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_DB_D: -+ Format(instr, "ammax_db.d 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_DB_W: -+ Format(instr, "ammin_db.w 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_DB_D: -+ Format(instr, "ammin_db.d 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_DB_WU: -+ Format(instr, "ammax_db.wu 'rd, 'rk, 'rj"); -+ break; -+ case AMMAX_DB_DU: -+ Format(instr, "ammax_db.du 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_DB_WU: -+ Format(instr, "ammin_db.wu 'rd, 'rk, 'rj"); -+ break; -+ case AMMIN_DB_DU: -+ Format(instr, "ammin_db.du 'rd, 'rk, 'rj"); -+ break; -+ case DBAR: -+ Format(instr, "dbar 'hint15"); -+ break; -+ case IBAR: -+ Format(instr, "ibar 'hint15"); -+ break; -+ case FSCALEB_S: -+ Format(instr, "fscaleb.s 'fd, 'fj, 'fk"); -+ break; -+ case FSCALEB_D: -+ Format(instr, "fscaleb.d 'fd, 'fj, 'fk"); -+ break; -+ case FCOPYSIGN_S: -+ Format(instr, "fcopysign.s 'fd, 'fj, 'fk"); -+ break; -+ case FCOPYSIGN_D: -+ Format(instr, "fcopysign.d 'fd, 'fj, 'fk"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ return kInstrSize; -+} -+ -+void Decoder::DecodeTypekOp22(Instruction* instr) { -+ switch (instr->Bits(31, 10) << 10) { -+ case CLZ_W: -+ Format(instr, "clz.w 'rd, 'rj"); -+ break; -+ case CTZ_W: -+ Format(instr, "ctz.w 'rd, 'rj"); -+ break; -+ case CLZ_D: -+ Format(instr, "clz.d 'rd, 'rj"); -+ break; -+ case CTZ_D: -+ Format(instr, "ctz.d 'rd, 'rj"); -+ break; -+ case REVB_2H: -+ Format(instr, "revb.2h 'rd, 'rj"); -+ break; -+ case REVB_4H: -+ Format(instr, "revb.4h 'rd, 'rj"); -+ break; -+ case REVB_2W: -+ Format(instr, "revb.2w 'rd, 'rj"); -+ break; -+ case REVB_D: -+ Format(instr, "revb.d 'rd, 'rj"); -+ break; -+ case REVH_2W: -+ Format(instr, "revh.2w 'rd, 'rj"); -+ break; -+ case REVH_D: -+ Format(instr, "revh.d 'rd, 'rj"); -+ break; -+ case BITREV_4B: -+ Format(instr, "bitrev.4b 'rd, 'rj"); -+ break; -+ case BITREV_8B: -+ Format(instr, "bitrev.8b 'rd, 'rj"); -+ break; -+ case BITREV_W: -+ Format(instr, "bitrev.w 'rd, 'rj"); -+ break; -+ case BITREV_D: -+ Format(instr, "bitrev.d 'rd, 'rj"); -+ break; -+ case EXT_W_B: -+ Format(instr, "ext.w.b 'rd, 'rj"); -+ break; -+ case EXT_W_H: -+ Format(instr, "ext.w.h 'rd, 'rj"); -+ break; -+ case FABS_S: -+ Format(instr, "fabs.s 'fd, 'fj"); -+ break; -+ case FABS_D: -+ Format(instr, "fabs.d 'fd, 'fj"); -+ break; -+ case FNEG_S: -+ Format(instr, "fneg.s 'fd, 'fj"); -+ break; -+ case FNEG_D: -+ Format(instr, "fneg.d 'fd, 'fj"); -+ break; -+ case FSQRT_S: -+ Format(instr, "fsqrt.s 'fd, 'fj"); -+ break; -+ case FSQRT_D: -+ Format(instr, "fsqrt.d 'fd, 'fj"); -+ break; -+ case FMOV_S: -+ Format(instr, "fmov.s 'fd, 'fj"); -+ break; -+ case FMOV_D: -+ Format(instr, "fmov.d 'fd, 'fj"); -+ break; -+ case MOVGR2FR_W: -+ Format(instr, "movgr2fr.w 'fd, 'rj"); -+ break; -+ case MOVGR2FR_D: -+ Format(instr, "movgr2fr.d 'fd, 'rj"); -+ break; -+ case MOVGR2FRH_W: -+ Format(instr, "movgr2frh.w 'fd, 'rj"); -+ break; -+ case MOVFR2GR_S: -+ Format(instr, "movfr2gr.s 'rd, 'fj"); -+ break; -+ case MOVFR2GR_D: -+ Format(instr, "movfr2gr.d 'rd, 'fj"); -+ break; -+ case MOVFRH2GR_S: -+ Format(instr, "movfrh2gr.s 'rd, 'fj"); -+ break; -+ case MOVGR2FCSR: -+ Format(instr, "movgr2fcsr fcsr, 'rj"); -+ break; -+ case MOVFCSR2GR: -+ Format(instr, "movfcsr2gr 'rd, fcsr"); -+ break; -+ case FCVT_S_D: -+ Format(instr, "fcvt.s.d 'fd, 'fj"); -+ break; -+ case FCVT_D_S: -+ Format(instr, "fcvt.d.s 'fd, 'fj"); -+ break; -+ case FTINTRM_W_S: -+ Format(instr, "ftintrm.w.s 'fd, 'fj"); -+ break; -+ case FTINTRM_W_D: -+ Format(instr, "ftintrm.w.d 'fd, 'fj"); -+ break; -+ case FTINTRM_L_S: -+ Format(instr, "ftintrm.l.s 'fd, 'fj"); -+ break; -+ case FTINTRM_L_D: -+ Format(instr, "ftintrm.l.d 'fd, 'fj"); -+ break; -+ case FTINTRP_W_S: -+ Format(instr, "ftintrp.w.s 'fd, 'fj"); -+ break; -+ case FTINTRP_W_D: -+ Format(instr, "ftintrp.w.d 'fd, 'fj"); -+ break; -+ case FTINTRP_L_S: -+ Format(instr, "ftintrp.l.s 'fd, 'fj"); -+ break; -+ case FTINTRP_L_D: -+ Format(instr, "ftintrp.l.d 'fd, 'fj"); -+ break; -+ case FTINTRZ_W_S: -+ Format(instr, "ftintrz.w.s 'fd, 'fj"); -+ break; -+ case FTINTRZ_W_D: -+ Format(instr, "ftintrz.w.d 'fd, 'fj"); -+ break; -+ case FTINTRZ_L_S: -+ Format(instr, "ftintrz.l.s 'fd, 'fj"); -+ break; -+ case FTINTRZ_L_D: -+ Format(instr, "ftintrz.l.d 'fd, 'fj"); -+ break; -+ case FTINTRNE_W_S: -+ Format(instr, "ftintrne.w.s 'fd, 'fj"); -+ break; -+ case FTINTRNE_W_D: -+ Format(instr, "ftintrne.w.d 'fd, 'fj"); -+ break; -+ case FTINTRNE_L_S: -+ Format(instr, "ftintrne.l.s 'fd, 'fj"); -+ break; -+ case FTINTRNE_L_D: -+ Format(instr, "ftintrne.l.d 'fd, 'fj"); -+ break; -+ case FTINT_W_S: -+ Format(instr, "ftint.w.s 'fd, 'fj"); -+ break; -+ case FTINT_W_D: -+ Format(instr, "ftint.w.d 'fd, 'fj"); -+ break; -+ case FTINT_L_S: -+ Format(instr, "ftint.l.s 'fd, 'fj"); -+ break; -+ case FTINT_L_D: -+ Format(instr, "ftint.l.d 'fd, 'fj"); -+ break; -+ case FFINT_S_W: -+ Format(instr, "ffint.s.w 'fd, 'fj"); -+ break; -+ case FFINT_S_L: -+ Format(instr, "ffint.s.l 'fd, 'fj"); -+ break; -+ case FFINT_D_W: -+ Format(instr, "ffint.d.w 'fd, 'fj"); -+ break; -+ case FFINT_D_L: -+ Format(instr, "ffint.d.l 'fd, 'fj"); -+ break; -+ case FRINT_S: -+ Format(instr, "frint.s 'fd, 'fj"); -+ break; -+ case FRINT_D: -+ Format(instr, "frint.d 'fd, 'fj"); -+ break; -+ case MOVFR2CF: -+ Format(instr, "movfr2cf fcc'cd, 'fj"); -+ break; -+ case MOVCF2FR: -+ Format(instr, "movcf2fr 'fd, fcc'cj"); -+ break; -+ case MOVGR2CF: -+ Format(instr, "movgr2cf fcc'cd, 'rj"); -+ break; -+ case MOVCF2GR: -+ Format(instr, "movcf2gr 'rd, fcc'cj"); -+ break; -+ case FRECIP_S: -+ Format(instr, "frecip.s 'fd, 'fj"); -+ break; -+ case FRECIP_D: -+ Format(instr, "frecip.d 'fd, 'fj"); -+ break; -+ case FRSQRT_S: -+ Format(instr, "frsqrt.s 'fd, 'fj"); -+ break; -+ case FRSQRT_D: -+ Format(instr, "frsqrt.d 'fd, 'fj"); -+ break; -+ case FCLASS_S: -+ Format(instr, "fclass.s 'fd, 'fj"); -+ break; -+ case FCLASS_D: -+ Format(instr, "fclass.d 'fd, 'fj"); -+ break; -+ case FLOGB_S: -+ Format(instr, "flogb.s 'fd, 'fj"); -+ break; -+ case FLOGB_D: -+ Format(instr, "flogb.d 'fd, 'fj"); -+ break; -+ case CLO_W: -+ Format(instr, "clo.w 'rd, 'rj"); -+ break; -+ case CTO_W: -+ Format(instr, "cto.w 'rd, 'rj"); -+ break; -+ case CLO_D: -+ Format(instr, "clo.d 'rd, 'rj"); -+ break; -+ case CTO_D: -+ Format(instr, "cto.d 'rd, 'rj"); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+int Decoder::InstructionDecode(byte* instr_ptr) { -+ Instruction* instr = Instruction::At(instr_ptr); -+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ", -+ instr->InstructionBits()); -+ switch (instr->InstructionType()) { -+ case Instruction::kOp6Type: { -+ DecodeTypekOp6(instr); -+ break; -+ } -+ case Instruction::kOp7Type: { -+ DecodeTypekOp7(instr); -+ break; -+ } -+ case Instruction::kOp8Type: { -+ DecodeTypekOp8(instr); -+ break; -+ } -+ case Instruction::kOp10Type: { -+ DecodeTypekOp10(instr); -+ break; -+ } -+ case Instruction::kOp12Type: { -+ DecodeTypekOp12(instr); -+ break; -+ } -+ case Instruction::kOp14Type: { -+ DecodeTypekOp14(instr); -+ break; -+ } -+ case Instruction::kOp17Type: { -+ return DecodeTypekOp17(instr); -+ } -+ case Instruction::kOp22Type: { -+ DecodeTypekOp22(instr); -+ break; -+ } -+ case Instruction::kUnsupported: { -+ Format(instr, "UNSUPPORTED"); -+ break; -+ } -+ default: { -+ Format(instr, "UNSUPPORTED"); -+ break; -+ } -+ } -+ return kInstrSize; -+} -+ -+} // namespace internal -+} // namespace v8 -+ -+//------------------------------------------------------------------------------ -+ -+namespace disasm { -+ -+const char* NameConverter::NameOfAddress(byte* addr) const { -+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast(addr)); -+ return tmp_buffer_.begin(); -+} -+ -+const char* NameConverter::NameOfConstant(byte* addr) const { -+ return NameOfAddress(addr); -+} -+ -+const char* NameConverter::NameOfCPURegister(int reg) const { -+ return v8::internal::Registers::Name(reg); -+} -+ -+const char* NameConverter::NameOfXMMRegister(int reg) const { -+ return v8::internal::FPURegisters::Name(reg); -+} -+ -+const char* NameConverter::NameOfByteCPURegister(int reg) const { -+ UNREACHABLE(); -+ return "nobytereg"; -+} -+ -+const char* NameConverter::NameInCode(byte* addr) const { -+ // The default name converter is called for unknown code. So we will not try -+ // to access any memory. -+ return ""; -+} -+ -+//------------------------------------------------------------------------------ -+ -+int Disassembler::InstructionDecode(v8::internal::Vector buffer, -+ byte* instruction) { -+ v8::internal::Decoder d(converter_, buffer); -+ return d.InstructionDecode(instruction); -+} -+ -+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; } -+ -+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end, -+ UnimplementedOpcodeAction unimplemented_action) { -+ NameConverter converter; -+ Disassembler d(converter, unimplemented_action); -+ for (byte* pc = begin; pc < end;) { -+ v8::internal::EmbeddedVector buffer; -+ buffer[0] = '\0'; -+ byte* prev_pc = pc; -+ pc += d.InstructionDecode(buffer, pc); -+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast(prev_pc), -+ *reinterpret_cast(prev_pc), buffer.begin()); -+ } -+} -+ -+#undef STRING_STARTS_WITH -+ -+} // namespace disasm -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h -index dbe78ddf..71f12991 100644 ---- a/deps/v8/src/diagnostics/perf-jit.h -+++ b/deps/v8/src/diagnostics/perf-jit.h -@@ -83,6 +83,7 @@ class PerfJitLogger : public CodeEventLogger { - static const uint32_t kElfMachARM = 40; - static const uint32_t kElfMachMIPS = 8; - static const uint32_t kElfMachMIPS64 = 8; -+ static const uint32_t kElfMachLOONG64 = 258; - static const uint32_t kElfMachARM64 = 183; - static const uint32_t kElfMachS390x = 22; - static const uint32_t kElfMachPPC64 = 21; -@@ -98,6 +99,8 @@ class PerfJitLogger : public CodeEventLogger { - return kElfMachMIPS; - #elif V8_TARGET_ARCH_MIPS64 - return kElfMachMIPS64; -+#elif V8_TARGET_ARCH_LOONG64 -+ return kElfMachLOONG64; - #elif V8_TARGET_ARCH_ARM64 - return kElfMachARM64; - #elif V8_TARGET_ARCH_S390X -diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h -index 4809eeca..39fc6343 100644 ---- a/deps/v8/src/execution/frame-constants.h -+++ b/deps/v8/src/execution/frame-constants.h -@@ -389,6 +389,8 @@ inline static int FrameSlotToFPOffset(int slot) { - #include "src/execution/mips/frame-constants-mips.h" // NOLINT - #elif V8_TARGET_ARCH_MIPS64 - #include "src/execution/mips64/frame-constants-mips64.h" // NOLINT -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/execution/loong64/frame-constants-loong64.h" // NOLINT - #elif V8_TARGET_ARCH_S390 - #include "src/execution/s390/frame-constants-s390.h" // NOLINT - #else -diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.cc b/deps/v8/src/execution/loong64/frame-constants-loong64.cc -new file mode 100644 -index 00000000..21925d03 ---- /dev/null -+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.cc -@@ -0,0 +1,32 @@ -+// Copyright 2020 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/codegen/loong64/assembler-loong64-inl.h" -+#include "src/execution/frame-constants.h" -+#include "src/execution/frames.h" -+ -+#include "src/execution/loong64/frame-constants-loong64.h" -+ -+namespace v8 { -+namespace internal { -+ -+Register JavaScriptFrame::fp_register() { return v8::internal::fp; } -+Register JavaScriptFrame::context_register() { return cp; } -+Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); } -+ -+int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) { -+ return register_count; -+} -+ -+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) { -+ USE(register_count); -+ return 0; -+} -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.h b/deps/v8/src/execution/loong64/frame-constants-loong64.h -new file mode 100644 -index 00000000..a11fedfb ---- /dev/null -+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.h -@@ -0,0 +1,75 @@ -+// Copyright 2020 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_ -+#define V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_ -+ -+#include "src/base/bits.h" -+#include "src/base/macros.h" -+#include "src/execution/frame-constants.h" -+ -+namespace v8 { -+namespace internal { -+ -+class EntryFrameConstants : public AllStatic { -+ public: -+ // This is the offset to where JSEntry pushes the current value of -+ // Isolate::c_entry_fp onto the stack. -+ static constexpr int kCallerFPOffset = -+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); -+}; -+ -+class WasmCompileLazyFrameConstants : public TypedFrameConstants { -+ public: -+ static constexpr int kNumberOfSavedGpParamRegs = 7; -+ static constexpr int kNumberOfSavedFpParamRegs = 7; -+ -+ // FP-relative. -+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(7); -+ static constexpr int kFixedFrameSizeFromFp = -+ TypedFrameConstants::kFixedFrameSizeFromFp + -+ kNumberOfSavedGpParamRegs * kPointerSize + -+ kNumberOfSavedFpParamRegs * kDoubleSize; -+}; -+ -+// Frame constructed by the {WasmDebugBreak} builtin. -+// After pushing the frame type marker, the builtin pushes all Liftoff cache -+// registers (see liftoff-assembler-defs.h). -+class WasmDebugBreakFrameConstants : public TypedFrameConstants { -+ public: -+ // {a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, t6, t7, t8} -+ static constexpr uint32_t kPushedGpRegs = 0b111111111111111110000; -+ // {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26} -+ static constexpr uint32_t kPushedFpRegs = 0b101010101010101010101010101; -+ -+ static constexpr int kNumPushedGpRegisters = -+ base::bits::CountPopulation(kPushedGpRegs); -+ static constexpr int kNumPushedFpRegisters = -+ base::bits::CountPopulation(kPushedFpRegs); -+ -+ static constexpr int kLastPushedGpRegisterOffset = -+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize; -+ static constexpr int kLastPushedFpRegisterOffset = -+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize; -+ -+ // Offsets are fp-relative. -+ static int GetPushedGpRegisterOffset(int reg_code) { -+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code)); -+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1); -+ return kLastPushedGpRegisterOffset + -+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize; -+ } -+ -+ static int GetPushedFpRegisterOffset(int reg_code) { -+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code)); -+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1); -+ return kLastPushedFpRegisterOffset + -+ base::bits::CountPopulation(lower_regs) * kDoubleSize; -+ } -+}; -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_ -diff --git a/deps/v8/src/execution/loong64/simulator-loong64.cc b/deps/v8/src/execution/loong64/simulator-loong64.cc -new file mode 100644 -index 00000000..030d57f1 ---- /dev/null -+++ b/deps/v8/src/execution/loong64/simulator-loong64.cc -@@ -0,0 +1,5563 @@ -+// Copyright 2020 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#include "src/execution/loong64/simulator-loong64.h" -+ -+// Only build the simulator if not compiling for real LOONG64 hardware. -+#if defined(USE_SIMULATOR) -+ -+#include -+#include -+#include -+#include -+ -+#include "src/base/bits.h" -+#include "src/codegen/assembler-inl.h" -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/diagnostics/disasm.h" -+#include "src/heap/combined-heap.h" -+#include "src/runtime/runtime-utils.h" -+#include "src/utils/ostreams.h" -+#include "src/utils/vector.h" -+ -+namespace v8 { -+namespace internal { -+ -+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor, -+ Simulator::GlobalMonitor::Get) -+ -+// #define PRINT_SIM_LOG -+ -+// Util functions. -+inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); } -+ -+uint32_t get_fcsr_condition_bit(uint32_t cc) { -+ if (cc == 0) { -+ return 23; -+ } else { -+ return 24 + cc; -+ } -+} -+ -+static int64_t MultiplyHighSigned(int64_t u, int64_t v) { -+ uint64_t u0, v0, w0; -+ int64_t u1, v1, w1, w2, t; -+ -+ u0 = u & 0xFFFFFFFFL; -+ u1 = u >> 32; -+ v0 = v & 0xFFFFFFFFL; -+ v1 = v >> 32; -+ -+ w0 = u0 * v0; -+ t = u1 * v0 + (w0 >> 32); -+ w1 = t & 0xFFFFFFFFL; -+ w2 = t >> 32; -+ w1 = u0 * v1 + w1; -+ -+ return u1 * v1 + w2 + (w1 >> 32); -+} -+ -+static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) { -+ uint64_t u0, v0, w0; -+ uint64_t u1, v1, w1, w2, t; -+ -+ u0 = u & 0xFFFFFFFFL; -+ u1 = u >> 32; -+ v0 = v & 0xFFFFFFFFL; -+ v1 = v >> 32; -+ -+ w0 = u0 * v0; -+ t = u1 * v0 + (w0 >> 32); -+ w1 = t & 0xFFFFFFFFL; -+ w2 = t >> 32; -+ w1 = u0 * v1 + w1; -+ -+ return u1 * v1 + w2 + (w1 >> 32); -+} -+ -+#ifdef PRINT_SIM_LOG -+inline void printf_instr(const char* _Format, ...) { -+ va_list varList; -+ va_start(varList, _Format); -+ vprintf(_Format, varList); -+ va_end(varList); -+} -+#else -+#define printf_instr(...) -+#endif -+ -+// This macro provides a platform independent use of sscanf. The reason for -+// SScanF not being implemented in a platform independent was through -+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time -+// Library does not provide vsscanf. -+#define SScanF sscanf // NOLINT -+ -+// The Loong64Debugger class is used by the simulator while debugging simulated -+// code. -+class Loong64Debugger { -+ public: -+ explicit Loong64Debugger(Simulator* sim) : sim_(sim) {} -+ -+ void Stop(Instruction* instr); -+ void Debug(); -+ // Print all registers with a nice formatting. -+ void PrintAllRegs(); -+ void PrintAllRegsIncludingFPU(); -+ -+ private: -+ // We set the breakpoint code to 0xFFFF to easily recognize it. -+ static const Instr kBreakpointInstr = BREAK | 0xFFFF; -+ static const Instr kNopInstr = 0x0; -+ -+ Simulator* sim_; -+ -+ int64_t GetRegisterValue(int regnum); -+ int64_t GetFPURegisterValue(int regnum); -+ float GetFPURegisterValueFloat(int regnum); -+ double GetFPURegisterValueDouble(int regnum); -+ bool GetValue(const char* desc, int64_t* value); -+ -+ // Set or delete a breakpoint. Returns true if successful. -+ bool SetBreakpoint(Instruction* breakpc); -+ bool DeleteBreakpoint(Instruction* breakpc); -+ -+ // Undo and redo all breakpoints. This is needed to bracket disassembly and -+ // execution to skip past breakpoints when run from the debugger. -+ void UndoBreakpoints(); -+ void RedoBreakpoints(); -+}; -+ -+inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); } -+ -+void Loong64Debugger::Stop(Instruction* instr) { -+ // Get the stop code. -+ uint32_t code = instr->Bits(25, 6); -+ PrintF("Simulator hit (%u)\n", code); -+ Debug(); -+} -+ -+int64_t Loong64Debugger::GetRegisterValue(int regnum) { -+ if (regnum == kNumSimuRegisters) { -+ return sim_->get_pc(); -+ } else { -+ return sim_->get_register(regnum); -+ } -+} -+ -+int64_t Loong64Debugger::GetFPURegisterValue(int regnum) { -+ if (regnum == kNumFPURegisters) { -+ return sim_->get_pc(); -+ } else { -+ return sim_->get_fpu_register(regnum); -+ } -+} -+ -+float Loong64Debugger::GetFPURegisterValueFloat(int regnum) { -+ if (regnum == kNumFPURegisters) { -+ return sim_->get_pc(); -+ } else { -+ return sim_->get_fpu_register_float(regnum); -+ } -+} -+ -+double Loong64Debugger::GetFPURegisterValueDouble(int regnum) { -+ if (regnum == kNumFPURegisters) { -+ return sim_->get_pc(); -+ } else { -+ return sim_->get_fpu_register_double(regnum); -+ } -+} -+ -+bool Loong64Debugger::GetValue(const char* desc, int64_t* value) { -+ int regnum = Registers::Number(desc); -+ int fpuregnum = FPURegisters::Number(desc); -+ -+ if (regnum != kInvalidRegister) { -+ *value = GetRegisterValue(regnum); -+ return true; -+ } else if (fpuregnum != kInvalidFPURegister) { -+ *value = GetFPURegisterValue(fpuregnum); -+ return true; -+ } else if (strncmp(desc, "0x", 2) == 0) { -+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast(value)) == -+ 1; -+ } else { -+ return SScanF(desc, "%" SCNu64, reinterpret_cast(value)) == 1; -+ } -+ return false; -+} -+ -+bool Loong64Debugger::SetBreakpoint(Instruction* breakpc) { -+ // Check if a breakpoint can be set. If not return without any side-effects. -+ if (sim_->break_pc_ != nullptr) { -+ return false; -+ } -+ -+ // Set the breakpoint. -+ sim_->break_pc_ = breakpc; -+ sim_->break_instr_ = breakpc->InstructionBits(); -+ // Not setting the breakpoint instruction in the code itself. It will be set -+ // when the debugger shell continues. -+ return true; -+} -+ -+bool Loong64Debugger::DeleteBreakpoint(Instruction* breakpc) { -+ if (sim_->break_pc_ != nullptr) { -+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); -+ } -+ -+ sim_->break_pc_ = nullptr; -+ sim_->break_instr_ = 0; -+ return true; -+} -+ -+void Loong64Debugger::UndoBreakpoints() { -+ if (sim_->break_pc_ != nullptr) { -+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); -+ } -+} -+ -+void Loong64Debugger::RedoBreakpoints() { -+ if (sim_->break_pc_ != nullptr) { -+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr); -+ } -+} -+ -+void Loong64Debugger::PrintAllRegs() { -+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n) -+ -+ PrintF("\n"); -+ // at, v0, a0. -+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64 -+ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n", -+ REG_INFO(1), REG_INFO(2), REG_INFO(4)); -+ // v1, a1. -+ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 -+ " %14" PRId64 " \n", -+ "", REG_INFO(3), REG_INFO(5)); -+ // a2. -+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", -+ REG_INFO(6)); -+ // a3. -+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", -+ REG_INFO(7)); -+ PrintF("\n"); -+ // a4-t3, s0-s7 -+ for (int i = 0; i < 8; i++) { -+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 -+ " %14" PRId64 " \n", -+ REG_INFO(8 + i), REG_INFO(16 + i)); -+ } -+ PrintF("\n"); -+ // t8, k0, LO. -+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 -+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", -+ REG_INFO(24), REG_INFO(26), REG_INFO(32)); -+ // t9, k1, HI. -+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 -+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", -+ REG_INFO(25), REG_INFO(27), REG_INFO(33)); -+ // sp, fp, gp. -+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 -+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", -+ REG_INFO(29), REG_INFO(30), REG_INFO(28)); -+ // pc. -+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 -+ " %14" PRId64 " \n", -+ REG_INFO(31), REG_INFO(34)); -+ -+#undef REG_INFO -+} -+ -+void Loong64Debugger::PrintAllRegsIncludingFPU() { -+#define FPU_REG_INFO(n) \ -+ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n) -+ -+ PrintAllRegs(); -+ -+ PrintF("\n\n"); -+ // f0, f1, f2, ... f31. -+ // TODO(plind): consider printing 2 columns for space efficiency. -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30)); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31)); -+ -+#undef FPU_REG_INFO -+} -+ -+void Loong64Debugger::Debug() { -+ intptr_t last_pc = -1; -+ bool done = false; -+ -+#define COMMAND_SIZE 63 -+#define ARG_SIZE 255 -+ -+#define STR(a) #a -+#define XSTR(a) STR(a) -+ -+ char cmd[COMMAND_SIZE + 1]; -+ char arg1[ARG_SIZE + 1]; -+ char arg2[ARG_SIZE + 1]; -+ char* argv[3] = {cmd, arg1, arg2}; -+ -+ // Make sure to have a proper terminating character if reaching the limit. -+ cmd[COMMAND_SIZE] = 0; -+ arg1[ARG_SIZE] = 0; -+ arg2[ARG_SIZE] = 0; -+ -+ // Undo all set breakpoints while running in the debugger shell. This will -+ // make them invisible to all commands. -+ UndoBreakpoints(); -+ -+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) { -+ if (last_pc != sim_->get_pc()) { -+ disasm::NameConverter converter; -+ disasm::Disassembler dasm(converter); -+ // Use a reasonably large buffer. -+ v8::internal::EmbeddedVector buffer; -+ dasm.InstructionDecode(buffer, reinterpret_cast(sim_->get_pc())); -+ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin()); -+ last_pc = sim_->get_pc(); -+ } -+ char* line = ReadLine("sim> "); -+ if (line == nullptr) { -+ break; -+ } else { -+ char* last_input = sim_->last_debugger_input(); -+ if (strcmp(line, "\n") == 0 && last_input != nullptr) { -+ line = last_input; -+ } else { -+ // Ownership is transferred to sim_; -+ sim_->set_last_debugger_input(line); -+ } -+ // Use sscanf to parse the individual parts of the command line. At the -+ // moment no command expects more than two parameters. -+ int argc = SScanF(line, -+ "%" XSTR(COMMAND_SIZE) "s " -+ "%" XSTR(ARG_SIZE) "s " -+ "%" XSTR(ARG_SIZE) "s", -+ cmd, arg1, arg2); -+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { -+ Instruction* instr = reinterpret_cast(sim_->get_pc()); -+ if (!(instr->IsTrap()) || -+ instr->InstructionBits() == rtCallRedirInstr) { -+ sim_->InstructionDecode( -+ reinterpret_cast(sim_->get_pc())); -+ } else { -+ // Allow si to jump over generated breakpoints. -+ PrintF("/!\\ Jumping over generated breakpoint.\n"); -+ sim_->set_pc(sim_->get_pc() + kInstrSize); -+ } -+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { -+ // Execute the one instruction we broke at with breakpoints disabled. -+ sim_->InstructionDecode(reinterpret_cast(sim_->get_pc())); -+ // Leave the debugger shell. -+ done = true; -+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { -+ if (argc == 2) { -+ int64_t value; -+ double dvalue; -+ if (strcmp(arg1, "all") == 0) { -+ PrintAllRegs(); -+ } else if (strcmp(arg1, "allf") == 0) { -+ PrintAllRegsIncludingFPU(); -+ } else { -+ int regnum = Registers::Number(arg1); -+ int fpuregnum = FPURegisters::Number(arg1); -+ -+ if (regnum != kInvalidRegister) { -+ value = GetRegisterValue(regnum); -+ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value, -+ value); -+ } else if (fpuregnum != kInvalidFPURegister) { -+ value = GetFPURegisterValue(fpuregnum); -+ dvalue = GetFPURegisterValueDouble(fpuregnum); -+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", -+ FPURegisters::Name(fpuregnum), value, dvalue); -+ } else { -+ PrintF("%s unrecognized\n", arg1); -+ } -+ } -+ } else { -+ if (argc == 3) { -+ if (strcmp(arg2, "single") == 0) { -+ int64_t value; -+ float fvalue; -+ int fpuregnum = FPURegisters::Number(arg1); -+ -+ if (fpuregnum != kInvalidFPURegister) { -+ value = GetFPURegisterValue(fpuregnum); -+ value &= 0xFFFFFFFFUL; -+ fvalue = GetFPURegisterValueFloat(fpuregnum); -+ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue); -+ } else { -+ PrintF("%s unrecognized\n", arg1); -+ } -+ } else { -+ PrintF("print single\n"); -+ } -+ } else { -+ PrintF("print or print single\n"); -+ } -+ } -+ } else if ((strcmp(cmd, "po") == 0) || -+ (strcmp(cmd, "printobject") == 0)) { -+ if (argc == 2) { -+ int64_t value; -+ StdoutStream os; -+ if (GetValue(arg1, &value)) { -+ Object obj(value); -+ os << arg1 << ": \n"; -+#ifdef DEBUG -+ obj.Print(os); -+ os << "\n"; -+#else -+ os << Brief(obj) << "\n"; -+#endif -+ } else { -+ os << arg1 << " unrecognized\n"; -+ } -+ } else { -+ PrintF("printobject \n"); -+ } -+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 || -+ strcmp(cmd, "dump") == 0) { -+ int64_t* cur = nullptr; -+ int64_t* end = nullptr; -+ int next_arg = 1; -+ -+ if (strcmp(cmd, "stack") == 0) { -+ cur = reinterpret_cast(sim_->get_register(Simulator::sp)); -+ } else { // Command "mem". -+ int64_t value; -+ if (!GetValue(arg1, &value)) { -+ PrintF("%s unrecognized\n", arg1); -+ continue; -+ } -+ cur = reinterpret_cast(value); -+ next_arg++; -+ } -+ -+ int64_t words; -+ if (argc == next_arg) { -+ words = 10; -+ } else { -+ if (!GetValue(argv[next_arg], &words)) { -+ words = 10; -+ } -+ } -+ end = cur + words; -+ -+ bool skip_obj_print = (strcmp(cmd, "dump") == 0); -+ while (cur < end) { -+ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ", -+ reinterpret_cast(cur), *cur, *cur); -+ Object obj(*cur); -+ Heap* current_heap = sim_->isolate_->heap(); -+ if (!skip_obj_print) { -+ if (obj.IsSmi() || -+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) { -+ PrintF(" ("); -+ if (obj.IsSmi()) { -+ PrintF("smi %d", Smi::ToInt(obj)); -+ } else { -+ obj.ShortPrint(); -+ } -+ PrintF(")"); -+ } -+ } -+ PrintF("\n"); -+ cur++; -+ } -+ -+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) || -+ (strcmp(cmd, "di") == 0)) { -+ disasm::NameConverter converter; -+ disasm::Disassembler dasm(converter); -+ // Use a reasonably large buffer. -+ v8::internal::EmbeddedVector buffer; -+ -+ byte* cur = nullptr; -+ byte* end = nullptr; -+ -+ if (argc == 1) { -+ cur = reinterpret_cast(sim_->get_pc()); -+ end = cur + (10 * kInstrSize); -+ } else if (argc == 2) { -+ int regnum = Registers::Number(arg1); -+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) { -+ // The argument is an address or a register name. -+ int64_t value; -+ if (GetValue(arg1, &value)) { -+ cur = reinterpret_cast(value); -+ // Disassemble 10 instructions at . -+ end = cur + (10 * kInstrSize); -+ } -+ } else { -+ // The argument is the number of instructions. -+ int64_t value; -+ if (GetValue(arg1, &value)) { -+ cur = reinterpret_cast(sim_->get_pc()); -+ // Disassemble instructions. -+ end = cur + (value * kInstrSize); -+ } -+ } -+ } else { -+ int64_t value1; -+ int64_t value2; -+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { -+ cur = reinterpret_cast(value1); -+ end = cur + (value2 * kInstrSize); -+ } -+ } -+ -+ while (cur < end) { -+ dasm.InstructionDecode(buffer, cur); -+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), -+ buffer.begin()); -+ cur += kInstrSize; -+ } -+ } else if (strcmp(cmd, "gdb") == 0) { -+ PrintF("relinquishing control to gdb\n"); -+ v8::base::OS::DebugBreak(); -+ PrintF("regaining control from gdb\n"); -+ } else if (strcmp(cmd, "break") == 0) { -+ if (argc == 2) { -+ int64_t value; -+ if (GetValue(arg1, &value)) { -+ if (!SetBreakpoint(reinterpret_cast(value))) { -+ PrintF("setting breakpoint failed\n"); -+ } -+ } else { -+ PrintF("%s unrecognized\n", arg1); -+ } -+ } else { -+ PrintF("break
\n"); -+ } -+ } else if (strcmp(cmd, "del") == 0) { -+ if (!DeleteBreakpoint(nullptr)) { -+ PrintF("deleting breakpoint failed\n"); -+ } -+ } else if (strcmp(cmd, "flags") == 0) { -+ PrintF("No flags on LOONG64 !\n"); -+ } else if (strcmp(cmd, "stop") == 0) { -+ int64_t value; -+ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize; -+ Instruction* stop_instr = reinterpret_cast(stop_pc); -+ Instruction* msg_address = -+ reinterpret_cast(stop_pc + kInstrSize); -+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) { -+ // Remove the current stop. -+ if (sim_->IsStopInstruction(stop_instr)) { -+ stop_instr->SetInstructionBits(kNopInstr); -+ msg_address->SetInstructionBits(kNopInstr); -+ } else { -+ PrintF("Not at debugger stop.\n"); -+ } -+ } else if (argc == 3) { -+ // Print information about all/the specified breakpoint(s). -+ if (strcmp(arg1, "info") == 0) { -+ if (strcmp(arg2, "all") == 0) { -+ PrintF("Stop information:\n"); -+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; -+ i++) { -+ sim_->PrintStopInfo(i); -+ } -+ } else if (GetValue(arg2, &value)) { -+ sim_->PrintStopInfo(value); -+ } else { -+ PrintF("Unrecognized argument.\n"); -+ } -+ } else if (strcmp(arg1, "enable") == 0) { -+ // Enable all/the specified breakpoint(s). -+ if (strcmp(arg2, "all") == 0) { -+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; -+ i++) { -+ sim_->EnableStop(i); -+ } -+ } else if (GetValue(arg2, &value)) { -+ sim_->EnableStop(value); -+ } else { -+ PrintF("Unrecognized argument.\n"); -+ } -+ } else if (strcmp(arg1, "disable") == 0) { -+ // Disable all/the specified breakpoint(s). -+ if (strcmp(arg2, "all") == 0) { -+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; -+ i++) { -+ sim_->DisableStop(i); -+ } -+ } else if (GetValue(arg2, &value)) { -+ sim_->DisableStop(value); -+ } else { -+ PrintF("Unrecognized argument.\n"); -+ } -+ } -+ } else { -+ PrintF("Wrong usage. Use help command for more information.\n"); -+ } -+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) { -+ // Print registers and disassemble. -+ PrintAllRegs(); -+ PrintF("\n"); -+ -+ disasm::NameConverter converter; -+ disasm::Disassembler dasm(converter); -+ // Use a reasonably large buffer. -+ v8::internal::EmbeddedVector buffer; -+ -+ byte* cur = nullptr; -+ byte* end = nullptr; -+ -+ if (argc == 1) { -+ cur = reinterpret_cast(sim_->get_pc()); -+ end = cur + (10 * kInstrSize); -+ } else if (argc == 2) { -+ int64_t value; -+ if (GetValue(arg1, &value)) { -+ cur = reinterpret_cast(value); -+ // no length parameter passed, assume 10 instructions -+ end = cur + (10 * kInstrSize); -+ } -+ } else { -+ int64_t value1; -+ int64_t value2; -+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { -+ cur = reinterpret_cast(value1); -+ end = cur + (value2 * kInstrSize); -+ } -+ } -+ -+ while (cur < end) { -+ dasm.InstructionDecode(buffer, cur); -+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), -+ buffer.begin()); -+ cur += kInstrSize; -+ } -+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { -+ PrintF("cont\n"); -+ PrintF(" continue execution (alias 'c')\n"); -+ PrintF("stepi\n"); -+ PrintF(" step one instruction (alias 'si')\n"); -+ PrintF("print \n"); -+ PrintF(" print register content (alias 'p')\n"); -+ PrintF(" use register name 'all' to print all registers\n"); -+ PrintF("printobject \n"); -+ PrintF(" print an object from a register (alias 'po')\n"); -+ PrintF("stack []\n"); -+ PrintF(" dump stack content, default dump 10 words)\n"); -+ PrintF("mem
[]\n"); -+ PrintF(" dump memory content, default dump 10 words)\n"); -+ PrintF("dump []\n"); -+ PrintF( -+ " dump memory content without pretty printing JS objects, default " -+ "dump 10 words)\n"); -+ PrintF("flags\n"); -+ PrintF(" print flags\n"); -+ PrintF("disasm []\n"); -+ PrintF("disasm [
]\n"); -+ PrintF("disasm [[
] ]\n"); -+ PrintF(" disassemble code, default is 10 instructions\n"); -+ PrintF(" from pc (alias 'di')\n"); -+ PrintF("gdb\n"); -+ PrintF(" enter gdb\n"); -+ PrintF("break
\n"); -+ PrintF(" set a break point on the address\n"); -+ PrintF("del\n"); -+ PrintF(" delete the breakpoint\n"); -+ PrintF("stop feature:\n"); -+ PrintF(" Description:\n"); -+ PrintF(" Stops are debug instructions inserted by\n"); -+ PrintF(" the Assembler::stop() function.\n"); -+ PrintF(" When hitting a stop, the Simulator will\n"); -+ PrintF(" stop and give control to the Debugger.\n"); -+ PrintF(" All stop codes are watched:\n"); -+ PrintF(" - They can be enabled / disabled: the Simulator\n"); -+ PrintF(" will / won't stop when hitting them.\n"); -+ PrintF(" - The Simulator keeps track of how many times they \n"); -+ PrintF(" are met. (See the info command.) Going over a\n"); -+ PrintF(" disabled stop still increases its counter. \n"); -+ PrintF(" Commands:\n"); -+ PrintF(" stop info all/ : print infos about number \n"); -+ PrintF(" or all stop(s).\n"); -+ PrintF(" stop enable/disable all/ : enables / disables\n"); -+ PrintF(" all or number stop(s)\n"); -+ PrintF(" stop unstop\n"); -+ PrintF(" ignore the stop instruction at the current location\n"); -+ PrintF(" from now on\n"); -+ } else { -+ PrintF("Unknown command: %s\n", cmd); -+ } -+ } -+ } -+ -+ // Add all the breakpoints back to stop execution and enter the debugger -+ // shell when hit. -+ RedoBreakpoints(); -+ -+#undef COMMAND_SIZE -+#undef ARG_SIZE -+ -+#undef STR -+#undef XSTR -+} -+ -+bool Simulator::ICacheMatch(void* one, void* two) { -+ DCHECK_EQ(reinterpret_cast(one) & CachePage::kPageMask, 0); -+ DCHECK_EQ(reinterpret_cast(two) & CachePage::kPageMask, 0); -+ return one == two; -+} -+ -+static uint32_t ICacheHash(void* key) { -+ return static_cast(reinterpret_cast(key)) >> 2; -+} -+ -+static bool AllOnOnePage(uintptr_t start, size_t size) { -+ intptr_t start_page = (start & ~CachePage::kPageMask); -+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask); -+ return start_page == end_page; -+} -+ -+void Simulator::set_last_debugger_input(char* input) { -+ DeleteArray(last_debugger_input_); -+ last_debugger_input_ = input; -+} -+ -+void Simulator::SetRedirectInstruction(Instruction* instruction) { -+ instruction->SetInstructionBits(rtCallRedirInstr); -+} -+ -+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache, -+ void* start_addr, size_t size) { -+ int64_t start = reinterpret_cast(start_addr); -+ int64_t intra_line = (start & CachePage::kLineMask); -+ start -= intra_line; -+ size += intra_line; -+ size = ((size - 1) | CachePage::kLineMask) + 1; -+ int offset = (start & CachePage::kPageMask); -+ while (!AllOnOnePage(start, size - 1)) { -+ int bytes_to_flush = CachePage::kPageSize - offset; -+ FlushOnePage(i_cache, start, bytes_to_flush); -+ start += bytes_to_flush; -+ size -= bytes_to_flush; -+ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask); -+ offset = 0; -+ } -+ if (size != 0) { -+ FlushOnePage(i_cache, start, size); -+ } -+} -+ -+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache, -+ void* page) { -+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page)); -+ if (entry->value == nullptr) { -+ CachePage* new_page = new CachePage(); -+ entry->value = new_page; -+ } -+ return reinterpret_cast(entry->value); -+} -+ -+// Flush from start up to and not including start + size. -+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache, -+ intptr_t start, size_t size) { -+ DCHECK_LE(size, CachePage::kPageSize); -+ DCHECK(AllOnOnePage(start, size - 1)); -+ DCHECK_EQ(start & CachePage::kLineMask, 0); -+ DCHECK_EQ(size & CachePage::kLineMask, 0); -+ void* page = reinterpret_cast(start & (~CachePage::kPageMask)); -+ int offset = (start & CachePage::kPageMask); -+ CachePage* cache_page = GetCachePage(i_cache, page); -+ char* valid_bytemap = cache_page->ValidityByte(offset); -+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); -+} -+ -+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache, -+ Instruction* instr) { -+ int64_t address = reinterpret_cast(instr); -+ void* page = reinterpret_cast(address & (~CachePage::kPageMask)); -+ void* line = reinterpret_cast(address & (~CachePage::kLineMask)); -+ int offset = (address & CachePage::kPageMask); -+ CachePage* cache_page = GetCachePage(i_cache, page); -+ char* cache_valid_byte = cache_page->ValidityByte(offset); -+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); -+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); -+ if (cache_hit) { -+ // Check that the data in memory matches the contents of the I-cache. -+ CHECK_EQ(0, memcmp(reinterpret_cast(instr), -+ cache_page->CachedData(offset), kInstrSize)); -+ } else { -+ // Cache miss. Load memory into the cache. -+ memcpy(cached_line, line, CachePage::kLineLength); -+ *cache_valid_byte = CachePage::LINE_VALID; -+ } -+} -+ -+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { -+ // Set up simulator support first. Some of this information is needed to -+ // setup the architecture state. -+ stack_size_ = FLAG_sim_stack_size * KB; -+ stack_ = reinterpret_cast(malloc(stack_size_)); -+ pc_modified_ = false; -+ icount_ = 0; -+ break_count_ = 0; -+ break_pc_ = nullptr; -+ break_instr_ = 0; -+ -+ // Set up architecture state. -+ // All registers are initialized to zero to start with. -+ for (int i = 0; i < kNumSimuRegisters; i++) { -+ registers_[i] = 0; -+ } -+ for (int i = 0; i < kNumFPURegisters; i++) { -+ FPUregisters_[i] = 0; -+ } -+ for (int i = 0; i < kNumCFRegisters; i++) { -+ CFregisters_[i] = 0; -+ } -+ -+ FCSR_ = 0; -+ -+ // The sp is initialized to point to the bottom (high address) of the -+ // allocated stack area. To be safe in potential stack underflows we leave -+ // some buffer below. -+ registers_[sp] = reinterpret_cast(stack_) + stack_size_ - 64; -+ // The ra and pc are initialized to a known bad value that will cause an -+ // access violation if the simulator ever tries to execute it. -+ registers_[pc] = bad_ra; -+ registers_[ra] = bad_ra; -+ -+ last_debugger_input_ = nullptr; -+} -+ -+Simulator::~Simulator() { -+ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_); -+ free(stack_); -+} -+ -+// Get the active Simulator for the current thread. -+Simulator* Simulator::current(Isolate* isolate) { -+ v8::internal::Isolate::PerIsolateThreadData* isolate_data = -+ isolate->FindOrAllocatePerThreadDataForThisThread(); -+ DCHECK_NOT_NULL(isolate_data); -+ -+ Simulator* sim = isolate_data->simulator(); -+ if (sim == nullptr) { -+ // TODO(146): delete the simulator object when a thread/isolate goes away. -+ sim = new Simulator(isolate); -+ isolate_data->set_simulator(sim); -+ } -+ return sim; -+} -+ -+// Sets the register in the architecture state. It will also deal with updating -+// Simulator internal state for special registers such as PC. -+void Simulator::set_register(int reg, int64_t value) { -+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); -+ if (reg == pc) { -+ pc_modified_ = true; -+ } -+ -+ // Zero register always holds 0. -+ registers_[reg] = (reg == 0) ? 0 : value; -+} -+ -+void Simulator::set_dw_register(int reg, const int* dbl) { -+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); -+ registers_[reg] = dbl[1]; -+ registers_[reg] = registers_[reg] << 32; -+ registers_[reg] += dbl[0]; -+} -+ -+void Simulator::set_fpu_register(int fpureg, int64_t value) { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ FPUregisters_[fpureg] = value; -+} -+ -+void Simulator::set_fpu_register_word(int fpureg, int32_t value) { -+ // Set ONLY lower 32-bits, leaving upper bits untouched. -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ int32_t* pword; -+ pword = reinterpret_cast(&FPUregisters_[fpureg]); -+ -+ *pword = value; -+} -+ -+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) { -+ // Set ONLY upper 32-bits, leaving lower bits untouched. -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ int32_t* phiword; -+ phiword = (reinterpret_cast(&FPUregisters_[fpureg])) + 1; -+ -+ *phiword = value; -+} -+ -+void Simulator::set_fpu_register_float(int fpureg, float value) { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ *bit_cast(&FPUregisters_[fpureg]) = value; -+} -+ -+void Simulator::set_fpu_register_double(int fpureg, double value) { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ *bit_cast(&FPUregisters_[fpureg]) = value; -+} -+ -+void Simulator::set_cf_register(int cfreg, bool value) { -+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters)); -+ CFregisters_[cfreg] = value; -+} -+ -+// Get the register from the architecture state. This function does handle -+// the special case of accessing the PC register. -+int64_t Simulator::get_register(int reg) const { -+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); -+ if (reg == 0) -+ return 0; -+ else -+ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0); -+} -+ -+double Simulator::get_double_from_register_pair(int reg) { -+ // TODO(plind): bad ABI stuff, refactor or remove. -+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); -+ -+ double dm_val = 0.0; -+ // Read the bits from the unsigned integer register_[] array -+ // into the double precision floating point value and return it. -+ char buffer[sizeof(registers_[0])]; -+ memcpy(buffer, ®isters_[reg], sizeof(registers_[0])); -+ memcpy(&dm_val, buffer, sizeof(registers_[0])); -+ return (dm_val); -+} -+ -+int64_t Simulator::get_fpu_register(int fpureg) const { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ return FPUregisters_[fpureg]; -+} -+ -+int32_t Simulator::get_fpu_register_word(int fpureg) const { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ return static_cast(FPUregisters_[fpureg] & 0xFFFFFFFF); -+} -+ -+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ return static_cast(FPUregisters_[fpureg] & 0xFFFFFFFF); -+} -+ -+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ return static_cast((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF); -+} -+ -+float Simulator::get_fpu_register_float(int fpureg) const { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ return *bit_cast(const_cast(&FPUregisters_[fpureg])); -+} -+ -+double Simulator::get_fpu_register_double(int fpureg) const { -+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); -+ return *bit_cast(&FPUregisters_[fpureg]); -+} -+ -+bool Simulator::get_cf_register(int cfreg) const { -+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters)); -+ return CFregisters_[cfreg]; -+} -+ -+// Runtime FP routines take up to two double arguments and zero -+// or one integer arguments. All are constructed here, -+// from a0-a3 or fa0 and fa1 (n64). -+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { -+ const int fparg2 = f1; -+ *x = get_fpu_register_double(f0); -+ *y = get_fpu_register_double(fparg2); -+ *z = static_cast(get_register(a2)); -+} -+ -+// The return value is either in v0/v1 or f0. -+void Simulator::SetFpResult(const double& result) { -+ set_fpu_register_double(0, result); -+} -+ -+// Helper functions for setting and testing the FCSR register's bits. -+void Simulator::set_fcsr_bit(uint32_t cc, bool value) { -+ if (value) { -+ FCSR_ |= (1 << cc); -+ } else { -+ FCSR_ &= ~(1 << cc); -+ } -+} -+ -+bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); } -+ -+void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) { -+ FCSR_ |= mode & kFPURoundingModeMask; -+} -+ -+unsigned int Simulator::get_fcsr_rounding_mode() { -+ return FCSR_ & kFPURoundingModeMask; -+} -+ -+// Sets the rounding error codes in FCSR based on the result of the rounding. -+// Returns true if the operation was invalid. -+bool Simulator::set_fcsr_round_error(double original, double rounded) { -+ bool ret = false; -+ double max_int32 = std::numeric_limits::max(); -+ double min_int32 = std::numeric_limits::min(); -+ -+ if (!std::isfinite(original) || !std::isfinite(rounded)) { -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ if (original != rounded) { -+ set_fcsr_bit(kFCSRInexactFlagBit, true); -+ } -+ -+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { -+ set_fcsr_bit(kFCSRUnderflowFlagBit, true); -+ ret = true; -+ } -+ -+ if (rounded > max_int32 || rounded < min_int32) { -+ set_fcsr_bit(kFCSROverflowFlagBit, true); -+ // The reference is not really clear but it seems this is required: -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ return ret; -+} -+ -+// Sets the rounding error codes in FCSR based on the result of the rounding. -+// Returns true if the operation was invalid. -+bool Simulator::set_fcsr_round64_error(double original, double rounded) { -+ bool ret = false; -+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, -+ // loading the most accurate representation into max_int64, which is 2^63. -+ double max_int64 = std::numeric_limits::max(); -+ double min_int64 = std::numeric_limits::min(); -+ -+ if (!std::isfinite(original) || !std::isfinite(rounded)) { -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ if (original != rounded) { -+ set_fcsr_bit(kFCSRInexactFlagBit, true); -+ } -+ -+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { -+ set_fcsr_bit(kFCSRUnderflowFlagBit, true); -+ ret = true; -+ } -+ -+ if (rounded >= max_int64 || rounded < min_int64) { -+ set_fcsr_bit(kFCSROverflowFlagBit, true); -+ // The reference is not really clear but it seems this is required: -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ return ret; -+} -+ -+// Sets the rounding error codes in FCSR based on the result of the rounding. -+// Returns true if the operation was invalid. -+bool Simulator::set_fcsr_round_error(float original, float rounded) { -+ bool ret = false; -+ double max_int32 = std::numeric_limits::max(); -+ double min_int32 = std::numeric_limits::min(); -+ -+ if (!std::isfinite(original) || !std::isfinite(rounded)) { -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ if (original != rounded) { -+ set_fcsr_bit(kFCSRInexactFlagBit, true); -+ } -+ -+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { -+ set_fcsr_bit(kFCSRUnderflowFlagBit, true); -+ ret = true; -+ } -+ -+ if (rounded > max_int32 || rounded < min_int32) { -+ set_fcsr_bit(kFCSROverflowFlagBit, true); -+ // The reference is not really clear but it seems this is required: -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ return ret; -+} -+ -+void Simulator::set_fpu_register_word_invalid_result(float original, -+ float rounded) { -+ double max_int32 = std::numeric_limits::max(); -+ double min_int32 = std::numeric_limits::min(); -+ if (std::isnan(original)) { -+ set_fpu_register_word(fd_reg(), 0); -+ } else if (rounded > max_int32) { -+ set_fpu_register_word(fd_reg(), kFPUInvalidResult); -+ } else if (rounded < min_int32) { -+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::set_fpu_register_invalid_result(float original, float rounded) { -+ double max_int32 = std::numeric_limits::max(); -+ double min_int32 = std::numeric_limits::min(); -+ if (std::isnan(original)) { -+ set_fpu_register(fd_reg(), 0); -+ } else if (rounded > max_int32) { -+ set_fpu_register(fd_reg(), kFPUInvalidResult); -+ } else if (rounded < min_int32) { -+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::set_fpu_register_invalid_result64(float original, -+ float rounded) { -+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, -+ // loading the most accurate representation into max_int64, which is 2^63. -+ double max_int64 = std::numeric_limits::max(); -+ double min_int64 = std::numeric_limits::min(); -+ if (std::isnan(original)) { -+ set_fpu_register(fd_reg(), 0); -+ } else if (rounded >= max_int64) { -+ set_fpu_register(fd_reg(), kFPU64InvalidResult); -+ } else if (rounded < min_int64) { -+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::set_fpu_register_word_invalid_result(double original, -+ double rounded) { -+ double max_int32 = std::numeric_limits::max(); -+ double min_int32 = std::numeric_limits::min(); -+ if (std::isnan(original)) { -+ set_fpu_register_word(fd_reg(), 0); -+ } else if (rounded > max_int32) { -+ set_fpu_register_word(fd_reg(), kFPUInvalidResult); -+ } else if (rounded < min_int32) { -+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::set_fpu_register_invalid_result(double original, -+ double rounded) { -+ double max_int32 = std::numeric_limits::max(); -+ double min_int32 = std::numeric_limits::min(); -+ if (std::isnan(original)) { -+ set_fpu_register(fd_reg(), 0); -+ } else if (rounded > max_int32) { -+ set_fpu_register(fd_reg(), kFPUInvalidResult); -+ } else if (rounded < min_int32) { -+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::set_fpu_register_invalid_result64(double original, -+ double rounded) { -+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, -+ // loading the most accurate representation into max_int64, which is 2^63. -+ double max_int64 = std::numeric_limits::max(); -+ double min_int64 = std::numeric_limits::min(); -+ if (std::isnan(original)) { -+ set_fpu_register(fd_reg(), 0); -+ } else if (rounded >= max_int64) { -+ set_fpu_register(fd_reg(), kFPU64InvalidResult); -+ } else if (rounded < min_int64) { -+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+// Sets the rounding error codes in FCSR based on the result of the rounding. -+// Returns true if the operation was invalid. -+bool Simulator::set_fcsr_round64_error(float original, float rounded) { -+ bool ret = false; -+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, -+ // loading the most accurate representation into max_int64, which is 2^63. -+ double max_int64 = std::numeric_limits::max(); -+ double min_int64 = std::numeric_limits::min(); -+ -+ if (!std::isfinite(original) || !std::isfinite(rounded)) { -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ if (original != rounded) { -+ set_fcsr_bit(kFCSRInexactFlagBit, true); -+ } -+ -+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { -+ set_fcsr_bit(kFCSRUnderflowFlagBit, true); -+ ret = true; -+ } -+ -+ if (rounded >= max_int64 || rounded < min_int64) { -+ set_fcsr_bit(kFCSROverflowFlagBit, true); -+ // The reference is not really clear but it seems this is required: -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ ret = true; -+ } -+ -+ return ret; -+} -+ -+// For ftint instructions only -+void Simulator::round_according_to_fcsr(double toRound, double* rounded, -+ int32_t* rounded_int) { -+ // 0 RN (round to nearest): Round a result to the nearest -+ // representable value; if the result is exactly halfway between -+ // two representable values, round to zero. -+ -+ // 1 RZ (round toward zero): Round a result to the closest -+ // representable value whose absolute value is less than or -+ // equal to the infinitely accurate result. -+ -+ // 2 RP (round up, or toward +infinity): Round a result to the -+ // next representable value up. -+ -+ // 3 RN (round down, or toward −infinity): Round a result to -+ // the next representable value down. -+ // switch ((FCSR_ >> 8) & 3) { -+ switch (FCSR_ & kFPURoundingModeMask) { -+ case kRoundToNearest: -+ *rounded = std::floor(toRound + 0.5); -+ *rounded_int = static_cast(*rounded); -+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ *rounded_int -= 1; -+ *rounded -= 1.; -+ } -+ break; -+ case kRoundToZero: -+ *rounded = trunc(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToPlusInf: -+ *rounded = std::ceil(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToMinusInf: -+ *rounded = std::floor(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ } -+} -+ -+void Simulator::round64_according_to_fcsr(double toRound, double* rounded, -+ int64_t* rounded_int) { -+ // 0 RN (round to nearest): Round a result to the nearest -+ // representable value; if the result is exactly halfway between -+ // two representable values, round to zero. -+ -+ // 1 RZ (round toward zero): Round a result to the closest -+ // representable value whose absolute value is less than or. -+ // equal to the infinitely accurate result. -+ -+ // 2 RP (round up, or toward +infinity): Round a result to the -+ // next representable value up. -+ -+ // 3 RN (round down, or toward −infinity): Round a result to -+ // the next representable value down. -+ switch (FCSR_ & kFPURoundingModeMask) { -+ case kRoundToNearest: -+ *rounded = std::floor(toRound + 0.5); -+ *rounded_int = static_cast(*rounded); -+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ *rounded_int -= 1; -+ *rounded -= 1.; -+ } -+ break; -+ case kRoundToZero: -+ *rounded = std::trunc(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToPlusInf: -+ *rounded = std::ceil(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToMinusInf: -+ *rounded = std::floor(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ } -+} -+ -+void Simulator::round_according_to_fcsr(float toRound, float* rounded, -+ int32_t* rounded_int) { -+ // 0 RN (round to nearest): Round a result to the nearest -+ // representable value; if the result is exactly halfway between -+ // two representable values, round to zero. -+ -+ // 1 RZ (round toward zero): Round a result to the closest -+ // representable value whose absolute value is less than or -+ // equal to the infinitely accurate result. -+ -+ // 2 RP (round up, or toward +infinity): Round a result to the -+ // next representable value up. -+ -+ // 3 RN (round down, or toward −infinity): Round a result to -+ // the next representable value down. -+ switch (FCSR_ & kFPURoundingModeMask) { -+ case kRoundToNearest: -+ *rounded = std::floor(toRound + 0.5); -+ *rounded_int = static_cast(*rounded); -+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ *rounded_int -= 1; -+ *rounded -= 1.f; -+ } -+ break; -+ case kRoundToZero: -+ *rounded = std::trunc(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToPlusInf: -+ *rounded = std::ceil(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToMinusInf: -+ *rounded = std::floor(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ } -+} -+ -+void Simulator::round64_according_to_fcsr(float toRound, float* rounded, -+ int64_t* rounded_int) { -+ // 0 RN (round to nearest): Round a result to the nearest -+ // representable value; if the result is exactly halfway between -+ // two representable values, round to zero. -+ -+ // 1 RZ (round toward zero): Round a result to the closest -+ // representable value whose absolute value is less than or. -+ // equal to the infinitely accurate result. -+ -+ // 2 RP (round up, or toward +infinity): Round a result to the -+ // next representable value up. -+ -+ // 3 RN (round down, or toward −infinity): Round a result to -+ // the next representable value down. -+ switch (FCSR_ & kFPURoundingModeMask) { -+ case kRoundToNearest: -+ *rounded = std::floor(toRound + 0.5); -+ *rounded_int = static_cast(*rounded); -+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ *rounded_int -= 1; -+ *rounded -= 1.f; -+ } -+ break; -+ case kRoundToZero: -+ *rounded = trunc(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToPlusInf: -+ *rounded = std::ceil(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ case kRoundToMinusInf: -+ *rounded = std::floor(toRound); -+ *rounded_int = static_cast(*rounded); -+ break; -+ } -+} -+ -+// Raw access to the PC register. -+void Simulator::set_pc(int64_t value) { -+ pc_modified_ = true; -+ registers_[pc] = value; -+} -+ -+bool Simulator::has_bad_pc() const { -+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc)); -+} -+ -+// Raw access to the PC register without the special adjustment when reading. -+int64_t Simulator::get_pc() const { return registers_[pc]; } -+ -+// TODO(plind): refactor this messy debug code when we do unaligned access. -+void Simulator::DieOrDebug() { -+ if ((1)) { // Flag for this was removed. -+ Loong64Debugger dbg(this); -+ dbg.Debug(); -+ } else { -+ base::OS::Abort(); -+ } -+} -+ -+void Simulator::TraceRegWr(int64_t value, TraceType t) { -+ if (::v8::internal::FLAG_trace_sim) { -+ union { -+ int64_t fmt_int64; -+ int32_t fmt_int32[2]; -+ float fmt_float[2]; -+ double fmt_double; -+ } v; -+ v.fmt_int64 = value; -+ -+ switch (t) { -+ case WORD: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 -+ " uint32:%" PRIu32, -+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]); -+ break; -+ case DWORD: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64 -+ " uint64:%" PRIu64, -+ value, icount_, value, value); -+ break; -+ case FLOAT: -+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e", -+ v.fmt_int64, icount_, v.fmt_float[0]); -+ break; -+ case DOUBLE: -+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e", -+ v.fmt_int64, icount_, v.fmt_double); -+ break; -+ case FLOAT_DOUBLE: -+ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e", -+ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double); -+ break; -+ case WORD_DWORD: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 -+ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64, -+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0], -+ v.fmt_int64, v.fmt_int64); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+} -+ -+// TODO(plind): consider making icount_ printing a flag option. -+void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) { -+ if (::v8::internal::FLAG_trace_sim) { -+ union { -+ int64_t fmt_int64; -+ int32_t fmt_int32[2]; -+ float fmt_float[2]; -+ double fmt_double; -+ } v; -+ v.fmt_int64 = value; -+ -+ switch (t) { -+ case WORD: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 -+ ") int32:%" PRId32 " uint32:%" PRIu32, -+ v.fmt_int64, addr, icount_, v.fmt_int32[0], v.fmt_int32[0]); -+ break; -+ case DWORD: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 -+ ") int64:%" PRId64 " uint64:%" PRIu64, -+ value, addr, icount_, value, value); -+ break; -+ case FLOAT: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 -+ ") flt:%e", -+ v.fmt_int64, addr, icount_, v.fmt_float[0]); -+ break; -+ case DOUBLE: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 -+ ") dbl:%e", -+ v.fmt_int64, addr, icount_, v.fmt_double); -+ break; -+ case FLOAT_DOUBLE: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 -+ ") flt:%e dbl:%e", -+ v.fmt_int64, addr, icount_, v.fmt_float[0], v.fmt_double); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+} -+ -+void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) { -+ if (::v8::internal::FLAG_trace_sim) { -+ switch (t) { -+ case BYTE: -+ SNPrintF(trace_buf_, -+ " %02" PRIx8 " --> [%016" PRIx64 "] (%" PRId64 -+ ")", -+ static_cast(value), addr, icount_); -+ break; -+ case HALF: -+ SNPrintF(trace_buf_, -+ " %04" PRIx16 " --> [%016" PRIx64 "] (%" PRId64 -+ ")", -+ static_cast(value), addr, icount_); -+ break; -+ case WORD: -+ SNPrintF(trace_buf_, -+ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64 ")", -+ static_cast(value), addr, icount_); -+ break; -+ case DWORD: -+ SNPrintF(trace_buf_, -+ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )", -+ value, addr, icount_); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+} -+ -+template -+void Simulator::TraceMemRd(int64_t addr, T value) { -+ if (::v8::internal::FLAG_trace_sim) { -+ switch (sizeof(T)) { -+ case 1: -+ SNPrintF(trace_buf_, -+ "%08" PRIx8 " <-- [%08" PRIx64 "] (%" PRIu64 -+ ") int8:%" PRId8 " uint8:%" PRIu8, -+ static_cast(value), addr, icount_, -+ static_cast(value), static_cast(value)); -+ break; -+ case 2: -+ SNPrintF(trace_buf_, -+ "%08" PRIx16 " <-- [%08" PRIx64 "] (%" PRIu64 -+ ") int16:%" PRId16 " uint16:%" PRIu16, -+ static_cast(value), addr, icount_, -+ static_cast(value), static_cast(value)); -+ break; -+ case 4: -+ SNPrintF(trace_buf_, -+ "%08" PRIx32 " <-- [%08" PRIx64 "] (%" PRIu64 -+ ") int32:%" PRId32 " uint32:%" PRIu32, -+ static_cast(value), addr, icount_, -+ static_cast(value), static_cast(value)); -+ break; -+ case 8: -+ SNPrintF(trace_buf_, -+ "%08" PRIx64 " <-- [%08" PRIx64 "] (%" PRIu64 -+ ") int64:%" PRId64 " uint64:%" PRIu64, -+ static_cast(value), addr, icount_, -+ static_cast(value), static_cast(value)); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+} -+ -+template -+void Simulator::TraceMemWr(int64_t addr, T value) { -+ if (::v8::internal::FLAG_trace_sim) { -+ switch (sizeof(T)) { -+ case 1: -+ SNPrintF(trace_buf_, -+ " %02" PRIx8 " --> [%08" PRIx64 "] (%" PRIu64 ")", -+ static_cast(value), addr, icount_); -+ break; -+ case 2: -+ SNPrintF(trace_buf_, -+ " %04" PRIx16 " --> [%08" PRIx64 "] (%" PRIu64 ")", -+ static_cast(value), addr, icount_); -+ break; -+ case 4: -+ SNPrintF(trace_buf_, -+ "%08" PRIx32 " --> [%08" PRIx64 "] (%" PRIu64 ")", -+ static_cast(value), addr, icount_); -+ break; -+ case 8: -+ SNPrintF(trace_buf_, -+ "%16" PRIx64 " --> [%08" PRIx64 "] (%" PRIu64 ")", -+ static_cast(value), addr, icount_); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ } -+} -+ -+// TODO(plind): sign-extend and zero-extend not implmented properly -+// on all the ReadXX functions, I don't think re-interpret cast does it. -+int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) { -+ if (addr >= 0 && addr < 0x400) { -+ // This has to be a nullptr-dereference, drop into debugger. -+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR -+ " \n", -+ addr, reinterpret_cast(instr)); -+ DieOrDebug(); -+ } -+ /* if ((addr & 0x3) == 0)*/ { -+ local_monitor_.NotifyLoad(); -+ int32_t* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, static_cast(*ptr), t); -+ return *ptr; -+ } -+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", -+ // addr, -+ // reinterpret_cast(instr)); -+ // DieOrDebug(); -+ // return 0; -+} -+ -+uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) { -+ if (addr >= 0 && addr < 0x400) { -+ // This has to be a nullptr-dereference, drop into debugger. -+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR -+ " \n", -+ addr, reinterpret_cast(instr)); -+ DieOrDebug(); -+ } -+ // if ((addr & 0x3) == 0) { -+ local_monitor_.NotifyLoad(); -+ uint32_t* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, static_cast(*ptr), WORD); -+ return *ptr; -+ // } -+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, -+ // reinterpret_cast(instr)); -+ // DieOrDebug(); -+ // return 0; -+} -+ -+void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) { -+ if (addr >= 0 && addr < 0x400) { -+ // This has to be a nullptr-dereference, drop into debugger. -+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR -+ " \n", -+ addr, reinterpret_cast(instr)); -+ DieOrDebug(); -+ } -+ /*if ((addr & 0x3) == 0)*/ { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, WORD); -+ int* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ return; -+ } -+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", -+ // addr, -+ // reinterpret_cast(instr)); -+ // DieOrDebug(); -+} -+ -+void Simulator::WriteConditionalW(int64_t addr, int32_t value, -+ Instruction* instr, int32_t rk_reg) { -+ if (addr >= 0 && addr < 0x400) { -+ // This has to be a nullptr-dereference, drop into debugger. -+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR -+ " \n", -+ addr, reinterpret_cast(instr)); -+ DieOrDebug(); -+ } -+ if ((addr & 0x3) == 0) { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) && -+ GlobalMonitor::Get()->NotifyStoreConditional_Locked( -+ addr, &global_monitor_thread_)) { -+ local_monitor_.NotifyStore(); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, WORD); -+ int* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ set_register(rk_reg, 1); -+ } else { -+ set_register(rk_reg, 0); -+ } -+ return; -+ } -+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, -+ reinterpret_cast(instr)); -+ DieOrDebug(); -+} -+ -+int64_t Simulator::Read2W(int64_t addr, Instruction* instr) { -+ if (addr >= 0 && addr < 0x400) { -+ // This has to be a nullptr-dereference, drop into debugger. -+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR -+ " \n", -+ addr, reinterpret_cast(instr)); -+ DieOrDebug(); -+ } -+ /* if ((addr & kPointerAlignmentMask) == 0)*/ { -+ local_monitor_.NotifyLoad(); -+ int64_t* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, *ptr); -+ return *ptr; -+ } -+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", -+ // addr, -+ // reinterpret_cast(instr)); -+ // DieOrDebug(); -+ // return 0; -+} -+ -+void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) { -+ if (addr >= 0 && addr < 0x400) { -+ // This has to be a nullptr-dereference, drop into debugger. -+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR -+ "\n", -+ addr, reinterpret_cast(instr)); -+ DieOrDebug(); -+ } -+ /*if ((addr & kPointerAlignmentMask) == 0)*/ { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, DWORD); -+ int64_t* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ return; -+ } -+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", -+ // addr, -+ // reinterpret_cast(instr)); -+ // DieOrDebug(); -+} -+ -+void Simulator::WriteConditional2W(int64_t addr, int64_t value, -+ Instruction* instr, int32_t rk_reg) { -+ if (addr >= 0 && addr < 0x400) { -+ // This has to be a nullptr-dereference, drop into debugger. -+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR -+ "\n", -+ addr, reinterpret_cast(instr)); -+ DieOrDebug(); -+ } -+ if ((addr & kPointerAlignmentMask) == 0) { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ if (local_monitor_.NotifyStoreConditional(addr, -+ TransactionSize::DoubleWord) && -+ GlobalMonitor::Get()->NotifyStoreConditional_Locked( -+ addr, &global_monitor_thread_)) { -+ local_monitor_.NotifyStore(); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, DWORD); -+ int64_t* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ set_register(rk_reg, 1); -+ } else { -+ set_register(rk_reg, 0); -+ } -+ return; -+ } -+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, -+ reinterpret_cast(instr)); -+ DieOrDebug(); -+} -+ -+double Simulator::ReadD(int64_t addr, Instruction* instr) { -+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ { -+ local_monitor_.NotifyLoad(); -+ double* ptr = reinterpret_cast(addr); -+ return *ptr; -+ } -+ // PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR -+ // "\n", -+ // addr, reinterpret_cast(instr)); -+ // base::OS::Abort(); -+ // return 0; -+} -+ -+void Simulator::WriteD(int64_t addr, double value, Instruction* instr) { -+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ double* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ return; -+ } -+ // PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR -+ // "\n", -+ // addr, reinterpret_cast(instr)); -+ // DieOrDebug(); -+} -+ -+uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) { -+ // if ((addr & 1) == 0) { -+ local_monitor_.NotifyLoad(); -+ uint16_t* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, static_cast(*ptr)); -+ return *ptr; -+ // } -+ // PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64 -+ // " , pc=0x%08" V8PRIxPTR "\n", -+ // addr, reinterpret_cast(instr)); -+ // DieOrDebug(); -+ // return 0; -+} -+ -+int16_t Simulator::ReadH(int64_t addr, Instruction* instr) { -+ // if ((addr & 1) == 0) { -+ local_monitor_.NotifyLoad(); -+ int16_t* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, static_cast(*ptr)); -+ return *ptr; -+ // } -+ // PrintF("Unaligned signed halfword read at 0x%08" PRIx64 -+ // " , pc=0x%08" V8PRIxPTR "\n", -+ // addr, reinterpret_cast(instr)); -+ // DieOrDebug(); -+ // return 0; -+} -+ -+void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) { -+ // if ((addr & 1) == 0) { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, HALF); -+ uint16_t* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ return; -+ // } -+ // PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64 -+ // " , pc=0x%08" V8PRIxPTR "\n", -+ // addr, reinterpret_cast(instr)); -+ // DieOrDebug(); -+} -+ -+void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) { -+ // if ((addr & 1) == 0) { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, HALF); -+ int16_t* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ return; -+ // } -+ // PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR -+ // "\n", -+ // addr, reinterpret_cast(instr)); -+ // DieOrDebug(); -+} -+ -+uint32_t Simulator::ReadBU(int64_t addr) { -+ local_monitor_.NotifyLoad(); -+ uint8_t* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, static_cast(*ptr)); -+ return *ptr & 0xFF; -+} -+ -+int32_t Simulator::ReadB(int64_t addr) { -+ local_monitor_.NotifyLoad(); -+ int8_t* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, static_cast(*ptr)); -+ return *ptr; -+} -+ -+void Simulator::WriteB(int64_t addr, uint8_t value) { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, BYTE); -+ uint8_t* ptr = reinterpret_cast(addr); -+ *ptr = value; -+} -+ -+void Simulator::WriteB(int64_t addr, int8_t value) { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ TraceMemWr(addr, value, BYTE); -+ int8_t* ptr = reinterpret_cast(addr); -+ *ptr = value; -+} -+ -+template -+T Simulator::ReadMem(int64_t addr, Instruction* instr) { -+ int alignment_mask = (1 << sizeof(T)) - 1; -+ if ((addr & alignment_mask) == 0) { -+ local_monitor_.NotifyLoad(); -+ T* ptr = reinterpret_cast(addr); -+ TraceMemRd(addr, *ptr); -+ return *ptr; -+ } -+ PrintF("Unaligned read of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR -+ "\n", -+ sizeof(T), addr, reinterpret_cast(instr)); -+ base::OS::Abort(); -+ return 0; -+} -+ -+template -+void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) { -+ int alignment_mask = (1 << sizeof(T)) - 1; -+ if ((addr & alignment_mask) == 0) { -+ local_monitor_.NotifyStore(); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); -+ T* ptr = reinterpret_cast(addr); -+ *ptr = value; -+ TraceMemWr(addr, value); -+ return; -+ } -+ PrintF("Unaligned write of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR -+ "\n", -+ sizeof(T), addr, reinterpret_cast(instr)); -+ base::OS::Abort(); -+} -+ -+// Returns the limit of the stack area to enable checking for stack overflows. -+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { -+ // The simulator uses a separate JS stack. If we have exhausted the C stack, -+ // we also drop down the JS limit to reflect the exhaustion on the JS stack. -+ if (GetCurrentStackPosition() < c_limit) { -+ return reinterpret_cast(get_sp()); -+ } -+ -+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes -+ // to prevent overrunning the stack when pushing values. -+ return reinterpret_cast(stack_) + 1024; -+} -+ -+// Unsupported instructions use Format to print an error and stop execution. -+void Simulator::Format(Instruction* instr, const char* format) { -+ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n", -+ reinterpret_cast(instr), format); -+ UNIMPLEMENTED(); -+} -+ -+// Calls into the V8 runtime are based on this very simple interface. -+// Note: To be able to return two values from some calls the code in runtime.cc -+// uses the ObjectPair which is essentially two 32-bit values stuffed into a -+// 64-bit value. With the code below we assume that all runtime calls return -+// 64 bits of result. If they don't, the v1 result register contains a bogus -+// value, which is fine because it is caller-saved. -+ -+using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1, -+ int64_t arg2, int64_t arg3, -+ int64_t arg4, int64_t arg5, -+ int64_t arg6, int64_t arg7, -+ int64_t arg8, int64_t arg9); -+ -+// These prototypes handle the four types of FP calls. -+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1); -+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1); -+using SimulatorRuntimeFPCall = double (*)(double darg0); -+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0); -+ -+// This signature supports direct call in to API function native callback -+// (refer to InvocationCallback in v8.h). -+using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0); -+using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1); -+ -+// This signature supports direct call to accessor getter callback. -+using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1); -+using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1, -+ void* arg2); -+ -+// Software interrupt instructions are used by the simulator to call into the -+// C-based V8 runtime. They are also used for debugging with simulator. -+void Simulator::SoftwareInterrupt() { -+ int32_t opcode_hi15 = instr_.Bits(31, 17); -+ CHECK_EQ(opcode_hi15, 0x15); -+ uint32_t code = instr_.Bits(14, 0); -+ // We first check if we met a call_rt_redirected. -+ if (instr_.InstructionBits() == rtCallRedirInstr) { -+ Redirection* redirection = Redirection::FromInstruction(instr_.instr()); -+ -+ int64_t* stack_pointer = reinterpret_cast(get_register(sp)); -+ -+ int64_t arg0 = get_register(a0); -+ int64_t arg1 = get_register(a1); -+ int64_t arg2 = get_register(a2); -+ int64_t arg3 = get_register(a3); -+ int64_t arg4 = get_register(a4); -+ int64_t arg5 = get_register(a5); -+ int64_t arg6 = get_register(a6); -+ int64_t arg7 = get_register(a7); -+ int64_t arg8 = stack_pointer[0]; -+ int64_t arg9 = stack_pointer[1]; -+ STATIC_ASSERT(kMaxCParameters == 10); -+ -+ bool fp_call = -+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) || -+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || -+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || -+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); -+ -+ { -+ // With the hard floating point calling convention, double -+ // arguments are passed in FPU registers. Fetch the arguments -+ // from there and call the builtin using soft floating point -+ // convention. -+ switch (redirection->type()) { -+ case ExternalReference::BUILTIN_FP_FP_CALL: -+ case ExternalReference::BUILTIN_COMPARE_CALL: -+ arg0 = get_fpu_register(f0); -+ arg1 = get_fpu_register(f1); -+ arg2 = get_fpu_register(f2); -+ arg3 = get_fpu_register(f3); -+ break; -+ case ExternalReference::BUILTIN_FP_CALL: -+ arg0 = get_fpu_register(f0); -+ arg1 = get_fpu_register(f1); -+ break; -+ case ExternalReference::BUILTIN_FP_INT_CALL: -+ arg0 = get_fpu_register(f0); -+ arg1 = get_fpu_register(f1); -+ arg2 = get_register(a2); -+ break; -+ default: -+ break; -+ } -+ } -+ -+ // This is dodgy but it works because the C entry stubs are never moved. -+ // See comment in codegen-arm.cc and bug 1242173. -+ int64_t saved_ra = get_register(ra); -+ -+ intptr_t external = -+ reinterpret_cast(redirection->external_function()); -+ -+ // Based on CpuFeatures::IsSupported(FPU), Loong64 will use either hardware -+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this -+ // simulator. Soft-float has additional abstraction of ExternalReference, -+ // to support serialization. -+ if (fp_call) { -+ double dval0, dval1; // one or two double parameters -+ int32_t ival; // zero or one integer parameters -+ int64_t iresult = 0; // integer return value -+ double dresult = 0; // double return value -+ GetFpArgs(&dval0, &dval1, &ival); -+ SimulatorRuntimeCall generic_target = -+ reinterpret_cast(external); -+ if (::v8::internal::FLAG_trace_sim) { -+ switch (redirection->type()) { -+ case ExternalReference::BUILTIN_FP_FP_CALL: -+ case ExternalReference::BUILTIN_COMPARE_CALL: -+ PrintF("Call to host function at %p with args %f, %f", -+ reinterpret_cast(FUNCTION_ADDR(generic_target)), -+ dval0, dval1); -+ break; -+ case ExternalReference::BUILTIN_FP_CALL: -+ PrintF("Call to host function at %p with arg %f", -+ reinterpret_cast(FUNCTION_ADDR(generic_target)), -+ dval0); -+ break; -+ case ExternalReference::BUILTIN_FP_INT_CALL: -+ PrintF("Call to host function at %p with args %f, %d", -+ reinterpret_cast(FUNCTION_ADDR(generic_target)), -+ dval0, ival); -+ break; -+ default: -+ UNREACHABLE(); -+ break; -+ } -+ } -+ switch (redirection->type()) { -+ case ExternalReference::BUILTIN_COMPARE_CALL: { -+ SimulatorRuntimeCompareCall target = -+ reinterpret_cast(external); -+ iresult = target(dval0, dval1); -+ set_register(v0, static_cast(iresult)); -+ // set_register(v1, static_cast(iresult >> 32)); -+ break; -+ } -+ case ExternalReference::BUILTIN_FP_FP_CALL: { -+ SimulatorRuntimeFPFPCall target = -+ reinterpret_cast(external); -+ dresult = target(dval0, dval1); -+ SetFpResult(dresult); -+ break; -+ } -+ case ExternalReference::BUILTIN_FP_CALL: { -+ SimulatorRuntimeFPCall target = -+ reinterpret_cast(external); -+ dresult = target(dval0); -+ SetFpResult(dresult); -+ break; -+ } -+ case ExternalReference::BUILTIN_FP_INT_CALL: { -+ SimulatorRuntimeFPIntCall target = -+ reinterpret_cast(external); -+ dresult = target(dval0, ival); -+ SetFpResult(dresult); -+ break; -+ } -+ default: -+ UNREACHABLE(); -+ break; -+ } -+ if (::v8::internal::FLAG_trace_sim) { -+ switch (redirection->type()) { -+ case ExternalReference::BUILTIN_COMPARE_CALL: -+ PrintF("Returned %08x\n", static_cast(iresult)); -+ break; -+ case ExternalReference::BUILTIN_FP_FP_CALL: -+ case ExternalReference::BUILTIN_FP_CALL: -+ case ExternalReference::BUILTIN_FP_INT_CALL: -+ PrintF("Returned %f\n", dresult); -+ break; -+ default: -+ UNREACHABLE(); -+ break; -+ } -+ } -+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { -+ if (::v8::internal::FLAG_trace_sim) { -+ PrintF("Call to host function at %p args %08" PRIx64 " \n", -+ reinterpret_cast(external), arg0); -+ } -+ SimulatorRuntimeDirectApiCall target = -+ reinterpret_cast(external); -+ target(arg0); -+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) { -+ if (::v8::internal::FLAG_trace_sim) { -+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 -+ " \n", -+ reinterpret_cast(external), arg0, arg1); -+ } -+ SimulatorRuntimeProfilingApiCall target = -+ reinterpret_cast(external); -+ target(arg0, Redirection::ReverseRedirection(arg1)); -+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { -+ if (::v8::internal::FLAG_trace_sim) { -+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 -+ " \n", -+ reinterpret_cast(external), arg0, arg1); -+ } -+ SimulatorRuntimeDirectGetterCall target = -+ reinterpret_cast(external); -+ target(arg0, arg1); -+ } else if (redirection->type() == -+ ExternalReference::PROFILING_GETTER_CALL) { -+ if (::v8::internal::FLAG_trace_sim) { -+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 -+ " %08" PRIx64 " \n", -+ reinterpret_cast(external), arg0, arg1, arg2); -+ } -+ SimulatorRuntimeProfilingGetterCall target = -+ reinterpret_cast(external); -+ target(arg0, arg1, Redirection::ReverseRedirection(arg2)); -+ } else { -+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL || -+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR); -+ SimulatorRuntimeCall target = -+ reinterpret_cast(external); -+ if (::v8::internal::FLAG_trace_sim) { -+ PrintF( -+ "Call to host function at %p " -+ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 -+ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 -+ " , %08" PRIx64 " , %08" PRIx64 " \n", -+ reinterpret_cast(FUNCTION_ADDR(target)), arg0, arg1, arg2, -+ arg3, arg4, arg5, arg6, arg7, arg8, arg9); -+ } -+ ObjectPair result = -+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); -+ set_register(v0, (int64_t)(result.x)); -+ set_register(v1, (int64_t)(result.y)); -+ } -+ if (::v8::internal::FLAG_trace_sim) { -+ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1), -+ get_register(v0)); -+ } -+ set_register(ra, saved_ra); -+ set_pc(get_register(ra)); -+ -+ } else if (code <= kMaxStopCode) { -+ if (IsWatchpoint(code)) { -+ PrintWatchpoint(code); -+ } else { -+ IncreaseStopCounter(code); -+ HandleStop(code, instr_.instr()); -+ } -+ } else { -+ // All remaining break_ codes, and all traps are handled here. -+ Loong64Debugger dbg(this); -+ dbg.Debug(); -+ } -+} -+ -+// Stop helper functions. -+bool Simulator::IsWatchpoint(uint64_t code) { -+ return (code <= kMaxWatchpointCode); -+} -+ -+void Simulator::PrintWatchpoint(uint64_t code) { -+ Loong64Debugger dbg(this); -+ ++break_count_; -+ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64 -+ " ) ----------" -+ "----------------------------------", -+ code, break_count_, icount_); -+ dbg.PrintAllRegs(); // Print registers and continue running. -+} -+ -+void Simulator::HandleStop(uint64_t code, Instruction* instr) { -+ // Stop if it is enabled, otherwise go on jumping over the stop -+ // and the message address. -+ if (IsEnabledStop(code)) { -+ Loong64Debugger dbg(this); -+ dbg.Stop(instr); -+ } -+} -+ -+bool Simulator::IsStopInstruction(Instruction* instr) { -+ int32_t opcode_hi15 = instr->Bits(31, 17); -+ uint32_t code = static_cast(instr->Bits(14, 0)); -+ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode && -+ code <= kMaxStopCode; -+} -+ -+bool Simulator::IsEnabledStop(uint64_t code) { -+ DCHECK_LE(code, kMaxStopCode); -+ DCHECK_GT(code, kMaxWatchpointCode); -+ return !(watched_stops_[code].count & kStopDisabledBit); -+} -+ -+void Simulator::EnableStop(uint64_t code) { -+ if (!IsEnabledStop(code)) { -+ watched_stops_[code].count &= ~kStopDisabledBit; -+ } -+} -+ -+void Simulator::DisableStop(uint64_t code) { -+ if (IsEnabledStop(code)) { -+ watched_stops_[code].count |= kStopDisabledBit; -+ } -+} -+ -+void Simulator::IncreaseStopCounter(uint64_t code) { -+ DCHECK_LE(code, kMaxStopCode); -+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) { -+ PrintF("Stop counter for code %" PRId64 -+ " has overflowed.\n" -+ "Enabling this code and reseting the counter to 0.\n", -+ code); -+ watched_stops_[code].count = 0; -+ EnableStop(code); -+ } else { -+ watched_stops_[code].count++; -+ } -+} -+ -+// Print a stop status. -+void Simulator::PrintStopInfo(uint64_t code) { -+ if (code <= kMaxWatchpointCode) { -+ PrintF("That is a watchpoint, not a stop.\n"); -+ return; -+ } else if (code > kMaxStopCode) { -+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1); -+ return; -+ } -+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled"; -+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit; -+ // Don't print the state of unused breakpoints. -+ if (count != 0) { -+ if (watched_stops_[code].desc) { -+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n", -+ code, code, state, count, watched_stops_[code].desc); -+ } else { -+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code, -+ code, state, count); -+ } -+ } -+} -+ -+void Simulator::SignalException(Exception e) { -+ FATAL("Error: Exception %i raised.", static_cast(e)); -+} -+ -+template -+static T FPAbs(T a); -+ -+template <> -+double FPAbs(double a) { -+ return fabs(a); -+} -+ -+template <> -+float FPAbs(float a) { -+ return fabsf(a); -+} -+ -+template -+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) { -+ if (std::isnan(a) && std::isnan(b)) { -+ *result = a; -+ } else if (std::isnan(a)) { -+ *result = b; -+ } else if (std::isnan(b)) { -+ *result = a; -+ } else if (b == a) { -+ // Handle -0.0 == 0.0 case. -+ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax -+ // negates the result. -+ *result = std::signbit(b) - static_cast(kind) ? b : a; -+ } else { -+ return false; -+ } -+ return true; -+} -+ -+template -+static T FPUMin(T a, T b) { -+ T result; -+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { -+ return result; -+ } else { -+ return b < a ? b : a; -+ } -+} -+ -+template -+static T FPUMax(T a, T b) { -+ T result; -+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) { -+ return result; -+ } else { -+ return b > a ? b : a; -+ } -+} -+ -+template -+static T FPUMinA(T a, T b) { -+ T result; -+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { -+ if (FPAbs(a) < FPAbs(b)) { -+ result = a; -+ } else if (FPAbs(b) < FPAbs(a)) { -+ result = b; -+ } else { -+ result = a < b ? a : b; -+ } -+ } -+ return result; -+} -+ -+template -+static T FPUMaxA(T a, T b) { -+ T result; -+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { -+ if (FPAbs(a) > FPAbs(b)) { -+ result = a; -+ } else if (FPAbs(b) > FPAbs(a)) { -+ result = b; -+ } else { -+ result = a > b ? a : b; -+ } -+ } -+ return result; -+} -+ -+enum class KeepSign : bool { no = false, yes }; -+ -+template ::value, -+ int>::type = 0> -+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) { -+ DCHECK(std::isnan(arg)); -+ T qNaN = std::numeric_limits::quiet_NaN(); -+ if (keepSign == KeepSign::yes) { -+ return std::copysign(qNaN, result); -+ } -+ return qNaN; -+} -+ -+template -+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) { -+ if (std::isnan(first)) { -+ return FPUCanonalizeNaNArg(result, first, keepSign); -+ } -+ return result; -+} -+ -+template -+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) { -+ if (std::isnan(first)) { -+ return FPUCanonalizeNaNArg(result, first, keepSign); -+ } -+ return FPUCanonalizeNaNArgs(result, keepSign, args...); -+} -+ -+template -+T FPUCanonalizeOperation(Func f, T first, Args... args) { -+ return FPUCanonalizeOperation(f, KeepSign::no, first, args...); -+} -+ -+template -+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) { -+ T result = f(first, args...); -+ if (std::isnan(result)) { -+ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...); -+ } -+ return result; -+} -+ -+// Handle execution based on instruction types. -+void Simulator::DecodeTypeOp6() { -+ int64_t alu_out; -+ // Next pc. -+ int64_t next_pc = bad_ra; -+ -+ // Branch instructions common part. -+ auto BranchAndLinkHelper = [this, &next_pc]() { -+ int64_t current_pc = get_pc(); -+ set_register(ra, current_pc + kInstrSize); -+ int32_t offs26_low16 = -+ static_cast(instr_.Bits(25, 10) << 16) >> 16; -+ int32_t offs26_high10 = static_cast(instr_.Bits(9, 0) << 22) >> 6; -+ int32_t offs26 = offs26_low16 | offs26_high10; -+ next_pc = current_pc + (offs26 << 2); -+ printf_instr("Offs26: %08x\n", offs26); -+ set_pc(next_pc); -+ }; -+ -+ auto BranchOff16Helper = [this, &next_pc](bool do_branch) { -+ int64_t current_pc = get_pc(); -+ int32_t offs16 = static_cast(instr_.Bits(25, 10) << 16) >> 16; -+ printf_instr("Offs16: %08x\n", offs16); -+ int32_t offs = do_branch ? (offs16 << 2) : kInstrSize; -+ next_pc = current_pc + offs; -+ set_pc(next_pc); -+ }; -+ -+ auto BranchOff21Helper = [this, &next_pc](bool do_branch) { -+ int64_t current_pc = get_pc(); -+ int32_t offs21_low16 = -+ static_cast(instr_.Bits(25, 10) << 16) >> 16; -+ int32_t offs21_high5 = static_cast(instr_.Bits(4, 0) << 27) >> 11; -+ int32_t offs = offs21_low16 | offs21_high5; -+ printf_instr("Offs21: %08x\n", offs); -+ offs = do_branch ? (offs << 2) : kInstrSize; -+ next_pc = current_pc + offs; -+ set_pc(next_pc); -+ }; -+ -+ auto BranchOff26Helper = [this, &next_pc]() { -+ int64_t current_pc = get_pc(); -+ int32_t offs26_low16 = -+ static_cast(instr_.Bits(25, 10) << 16) >> 16; -+ int32_t offs26_high10 = static_cast(instr_.Bits(9, 0) << 22) >> 6; -+ int32_t offs26 = offs26_low16 | offs26_high10; -+ next_pc = current_pc + (offs26 << 2); -+ printf_instr("Offs26: %08x\n", offs26); -+ set_pc(next_pc); -+ }; -+ -+ auto JumpOff16Helper = [this, &next_pc]() { -+ int32_t offs16 = static_cast(instr_.Bits(25, 10) << 16) >> 16; -+ printf_instr("JIRL\t %s: %016lx, %s: %016lx, offs16: %x\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), offs16); -+ set_register(rd_reg(), get_pc() + kInstrSize); -+ next_pc = rj() + (offs16 << 2); -+ set_pc(next_pc); -+ }; -+ -+ switch (instr_.Bits(31, 26) << 26) { -+ case ADDU16I_D: { -+ printf_instr("ADDU16I_D\t %s: %016lx, %s: %016lx, si16: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si16()); -+ int32_t si16_upper = static_cast(si16()) << 16; -+ alu_out = static_cast(si16_upper) + rj(); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case BEQZ: -+ printf_instr("BEQZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj()); -+ BranchOff21Helper(rj() == 0); -+ break; -+ case BNEZ: -+ printf_instr("BNEZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj()); -+ BranchOff21Helper(rj() != 0); -+ break; -+ case BCZ: { -+ if (instr_.Bits(9, 8) == 0b00) { -+ // BCEQZ -+ printf_instr("BCEQZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False"); -+ BranchOff21Helper(cj() == false); -+ } else if (instr_.Bits(9, 8) == 0b01) { -+ // BCNEZ -+ printf_instr("BCNEZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False"); -+ BranchOff21Helper(cj() == true); -+ } else { -+ UNREACHABLE(); -+ } -+ break; -+ } -+ case JIRL: -+ JumpOff16Helper(); -+ break; -+ case B: -+ printf_instr("B\t "); -+ BranchOff26Helper(); -+ break; -+ case BL: -+ printf_instr("BL\t "); -+ BranchAndLinkHelper(); -+ break; -+ case BEQ: -+ printf_instr("BEQ\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), -+ rj(), Registers::Name(rd_reg()), rd()); -+ BranchOff16Helper(rj() == rd()); -+ break; -+ case BNE: -+ printf_instr("BNE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), -+ rj(), Registers::Name(rd_reg()), rd()); -+ BranchOff16Helper(rj() != rd()); -+ break; -+ case BLT: -+ printf_instr("BLT\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), -+ rj(), Registers::Name(rd_reg()), rd()); -+ BranchOff16Helper(rj() < rd()); -+ break; -+ case BGE: -+ printf_instr("BGE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), -+ rj(), Registers::Name(rd_reg()), rd()); -+ BranchOff16Helper(rj() >= rd()); -+ break; -+ case BLTU: -+ printf_instr("BLTU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), -+ rj(), Registers::Name(rd_reg()), rd()); -+ BranchOff16Helper(rj_u() < rd_u()); -+ break; -+ case BGEU: -+ printf_instr("BGEU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), -+ rj(), Registers::Name(rd_reg()), rd()); -+ BranchOff16Helper(rj_u() >= rd_u()); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::DecodeTypeOp7() { -+ int64_t alu_out; -+ -+ switch (instr_.Bits(31, 25) << 25) { -+ case LU12I_W: { -+ printf_instr("LU12I_W\t %s: %016lx, si20: %d\n", -+ Registers::Name(rd_reg()), rd(), si20()); -+ int32_t si20_upper = static_cast(si20() << 12); -+ SetResult(rd_reg(), static_cast(si20_upper)); -+ break; -+ } -+ case LU32I_D: { -+ printf_instr("LU32I_D\t %s: %016lx, si20: %d\n", -+ Registers::Name(rd_reg()), rd(), si20()); -+ int32_t si20_signExtend = static_cast(si20() << 12) >> 12; -+ int64_t lower_32bit_mask = 0xFFFFFFFF; -+ alu_out = (static_cast(si20_signExtend) << 32) | -+ (rd() & lower_32bit_mask); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case PCADDI: { -+ printf_instr("PCADDI\t %s: %016lx, si20: %d\n", Registers::Name(rd_reg()), -+ rd(), si20()); -+ int32_t si20_signExtend = static_cast(si20() << 12) >> 10; -+ int64_t current_pc = get_pc(); -+ alu_out = static_cast(si20_signExtend) + current_pc; -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case PCALAU12I: { -+ printf_instr("PCALAU12I\t %s: %016lx, si20: %d\n", -+ Registers::Name(rd_reg()), rd(), si20()); -+ int32_t si20_signExtend = static_cast(si20() << 12); -+ int64_t current_pc = get_pc(); -+ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000; -+ alu_out = static_cast(si20_signExtend) + current_pc; -+ SetResult(rd_reg(), alu_out & clear_lower12bit_mask); -+ break; -+ } -+ case PCADDU12I: { -+ printf_instr("PCADDU12I\t %s: %016lx, si20: %d\n", -+ Registers::Name(rd_reg()), rd(), si20()); -+ int32_t si20_signExtend = static_cast(si20() << 12); -+ int64_t current_pc = get_pc(); -+ alu_out = static_cast(si20_signExtend) + current_pc; -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case PCADDU18I: { -+ printf_instr("PCADDU18I\t %s: %016lx, si20: %d\n", -+ Registers::Name(rd_reg()), rd(), si20()); -+ int64_t si20_signExtend = (static_cast(si20()) << 44) >> 26; -+ int64_t current_pc = get_pc(); -+ alu_out = si20_signExtend + current_pc; -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::DecodeTypeOp8() { -+ int64_t addr = 0x0; -+ int64_t si14_se = (static_cast(si14()) << 50) >> 48; -+ -+ switch (instr_.Bits(31, 24) << 24) { -+ case LDPTR_W: -+ printf_instr("LDPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ set_register(rd_reg(), ReadW(rj() + si14_se, instr_.instr())); -+ break; -+ case STPTR_W: -+ printf_instr("STPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ WriteW(rj() + si14_se, static_cast(rd()), instr_.instr()); -+ break; -+ case LDPTR_D: -+ printf_instr("LDPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ set_register(rd_reg(), Read2W(rj() + si14_se, instr_.instr())); -+ break; -+ case STPTR_D: -+ printf_instr("STPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ Write2W(rj() + si14_se, rd(), instr_.instr()); -+ break; -+ case LL_W: { -+ printf_instr("LL_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ addr = si14_se + rj(); -+ set_register(rd_reg(), ReadW(addr, instr_.instr())); -+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr, -+ &global_monitor_thread_); -+ break; -+ } -+ case SC_W: { -+ printf_instr("SC_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ addr = si14_se + rj(); -+ WriteConditionalW(addr, static_cast(rd()), instr_.instr(), -+ rd_reg()); -+ break; -+ } -+ case LL_D: { -+ printf_instr("LL_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ addr = si14_se + rj(); -+ set_register(rd_reg(), Read2W(addr, instr_.instr())); -+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr, -+ &global_monitor_thread_); -+ break; -+ } -+ case SC_D: { -+ printf_instr("SC_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si14_se); -+ addr = si14_se + rj(); -+ WriteConditional2W(addr, rd(), instr_.instr(), rd_reg()); -+ break; -+ } -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::DecodeTypeOp10() { -+ int64_t alu_out = 0x0; -+ int64_t si12_se = (static_cast(si12()) << 52) >> 52; -+ uint64_t si12_ze = (static_cast(ui12()) << 52) >> 52; -+ -+ switch (instr_.Bits(31, 22) << 22) { -+ case BSTR_W: { -+ CHECK_EQ(instr_.Bit(21), 1); -+ uint8_t lsbw_ = lsbw(); -+ uint8_t msbw_ = msbw(); -+ CHECK_LE(lsbw_, msbw_); -+ uint8_t size = msbw_ - lsbw_ + 1; -+ uint64_t mask = (1ULL << size) - 1; -+ if (instr_.Bit(15) == 0) { -+ // BSTRINS_W -+ printf_instr( -+ "BSTRINS_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), -+ msbw_, lsbw_); -+ alu_out = static_cast((rd_u() & ~(mask << lsbw_)) | -+ ((rj_u() & mask) << lsbw_)); -+ } else { -+ // BSTRPICK_W -+ printf_instr( -+ "BSTRPICK_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), -+ msbw_, lsbw_); -+ alu_out = static_cast((rj_u() & (mask << lsbw_)) >> lsbw_); -+ } -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case BSTRINS_D: { -+ uint8_t lsbd_ = lsbd(); -+ uint8_t msbd_ = msbd(); -+ CHECK_LE(lsbd_, msbd_); -+ printf_instr( -+ "BSTRINS_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), -+ msbd_, lsbd_); -+ uint8_t size = msbd_ - lsbd_ + 1; -+ if (size < 64) { -+ uint64_t mask = (1ULL << size) - 1; -+ alu_out = (rd_u() & ~(mask << lsbd_)) | ((rj_u() & mask) << lsbd_); -+ SetResult(rd_reg(), alu_out); -+ } else if (size == 64) { -+ SetResult(rd_reg(), rj()); -+ } -+ break; -+ } -+ case BSTRPICK_D: { -+ uint8_t lsbd_ = lsbd(); -+ uint8_t msbd_ = msbd(); -+ CHECK_LE(lsbd_, msbd_); -+ printf_instr( -+ "BSTRPICK_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), -+ msbd_, lsbd_); -+ uint8_t size = msbd_ - lsbd_ + 1; -+ if (size < 64) { -+ uint64_t mask = (1ULL << size) - 1; -+ alu_out = (rj_u() & (mask << lsbd_)) >> lsbd_; -+ SetResult(rd_reg(), alu_out); -+ } else if (size == 64) { -+ SetResult(rd_reg(), rj()); -+ } -+ break; -+ } -+ case SLTI: -+ printf_instr("SLTI\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_se); -+ SetResult(rd_reg(), rj() < si12_se ? 1 : 0); -+ break; -+ case SLTUI: -+ printf_instr("SLTUI\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_se); -+ SetResult(rd_reg(), rj_u() < static_cast(si12_se) ? 1 : 0); -+ break; -+ case ADDI_W: { -+ printf_instr("ADDI_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_se); -+ int32_t alu32_out = -+ static_cast(rj()) + static_cast(si12_se); -+ SetResult(rd_reg(), alu32_out); -+ break; -+ } -+ case ADDI_D: -+ printf_instr("ADDI_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_se); -+ SetResult(rd_reg(), rj() + si12_se); -+ break; -+ case LU52I_D: { -+ printf_instr("LU52I_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_se); -+ int64_t si12_se = static_cast(si12()) << 52; -+ uint64_t mask = (1ULL << 52) - 1; -+ alu_out = si12_se + (rj() & mask); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case ANDI: -+ printf_instr("ANDI\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ SetResult(rd_reg(), rj() & si12_ze); -+ break; -+ case ORI: -+ printf_instr("ORI\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ SetResult(rd_reg(), rj_u() | si12_ze); -+ break; -+ case XORI: -+ printf_instr("XORI\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ SetResult(rd_reg(), rj_u() ^ si12_ze); -+ break; -+ case LD_B: -+ printf_instr("LD_B\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ set_register(rd_reg(), ReadB(rj() + si12_se)); -+ break; -+ case LD_H: -+ printf_instr("LD_H\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ set_register(rd_reg(), ReadH(rj() + si12_se, instr_.instr())); -+ break; -+ case LD_W: -+ printf_instr("LD_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ set_register(rd_reg(), ReadW(rj() + si12_se, instr_.instr())); -+ break; -+ case LD_D: -+ printf_instr("LD_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ set_register(rd_reg(), Read2W(rj() + si12_se, instr_.instr())); -+ break; -+ case ST_B: -+ printf_instr("ST_B\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ WriteB(rj() + si12_se, static_cast(rd())); -+ break; -+ case ST_H: -+ printf_instr("ST_H\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ WriteH(rj() + si12_se, static_cast(rd()), instr_.instr()); -+ break; -+ case ST_W: -+ printf_instr("ST_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ WriteW(rj() + si12_se, static_cast(rd()), instr_.instr()); -+ break; -+ case ST_D: -+ printf_instr("ST_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ Write2W(rj() + si12_se, rd(), instr_.instr()); -+ break; -+ case LD_BU: -+ printf_instr("LD_BU\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ set_register(rd_reg(), ReadBU(rj() + si12_se)); -+ break; -+ case LD_HU: -+ printf_instr("LD_HU\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ set_register(rd_reg(), ReadHU(rj() + si12_se, instr_.instr())); -+ break; -+ case LD_WU: -+ printf_instr("LD_WU\t %s: %016lx, %s: %016lx, si12: %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), si12_ze); -+ set_register(rd_reg(), ReadWU(rj() + si12_se, instr_.instr())); -+ break; -+ case FLD_S: { -+ printf_instr("FLD_S\t %s: %016f, %s: %016lx, si12: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ Registers::Name(rj_reg()), rj(), si12_ze); -+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits. -+ set_fpu_register_word( -+ fd_reg(), ReadW(rj() + si12_se, instr_.instr(), FLOAT_DOUBLE)); -+ break; -+ } -+ case FST_S: { -+ printf_instr("FST_S\t %s: %016f, %s: %016lx, si12: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ Registers::Name(rj_reg()), rj(), si12_ze); -+ int32_t alu_out_32 = static_cast(get_fpu_register(fd_reg())); -+ WriteW(rj() + si12_se, alu_out_32, instr_.instr()); -+ break; -+ } -+ case FLD_D: { -+ printf_instr("FLD_D\t %s: %016f, %s: %016lx, si12: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ Registers::Name(rj_reg()), rj(), si12_ze); -+ set_fpu_register_double(fd_reg(), ReadD(rj() + si12_se, instr_.instr())); -+ TraceMemRd(rj() + si12_se, get_fpu_register(fd_reg()), DOUBLE); -+ break; -+ } -+ case FST_D: { -+ printf_instr("FST_D\t %s: %016f, %s: %016lx, si12: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ Registers::Name(rj_reg()), rj(), si12_ze); -+ WriteD(rj() + si12_se, get_fpu_register_double(fd_reg()), instr_.instr()); -+ TraceMemWr(rj() + si12_se, get_fpu_register(fd_reg()), DWORD); -+ break; -+ } -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::DecodeTypeOp12() { -+ switch (instr_.Bits(31, 20) << 20) { -+ case FMADD_S: -+ printf_instr("FMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fk_reg()), fk_float(), -+ FPURegisters::Name(fa_reg()), fa_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUFloatResult(fd_reg(), std::fma(fj_float(), fk_float(), fa_float())); -+ break; -+ case FMADD_D: -+ printf_instr("FMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fk_reg()), fk_double(), -+ FPURegisters::Name(fa_reg()), fa_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetFPUDoubleResult(fd_reg(), -+ std::fma(fj_double(), fk_double(), fa_double())); -+ break; -+ case FMSUB_S: -+ printf_instr("FMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fk_reg()), fk_float(), -+ FPURegisters::Name(fa_reg()), fa_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUFloatResult(fd_reg(), -+ std::fma(fj_float(), fk_float(), -fa_float())); -+ break; -+ case FMSUB_D: -+ printf_instr("FMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fk_reg()), fk_double(), -+ FPURegisters::Name(fa_reg()), fa_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetFPUDoubleResult(fd_reg(), -+ std::fma(fj_double(), fk_double(), -fa_double())); -+ break; -+ case FNMADD_S: -+ printf_instr("FNMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fk_reg()), fk_float(), -+ FPURegisters::Name(fa_reg()), fa_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUFloatResult(fd_reg(), -+ std::fma(-fj_float(), fk_float(), -fa_float())); -+ break; -+ case FNMADD_D: -+ printf_instr("FNMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fk_reg()), fk_double(), -+ FPURegisters::Name(fa_reg()), fa_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetFPUDoubleResult(fd_reg(), -+ std::fma(-fj_double(), fk_double(), -fa_double())); -+ break; -+ case FNMSUB_S: -+ printf_instr("FNMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fk_reg()), fk_float(), -+ FPURegisters::Name(fa_reg()), fa_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUFloatResult(fd_reg(), -+ std::fma(-fj_float(), fk_float(), fa_float())); -+ break; -+ case FNMSUB_D: -+ printf_instr("FNMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fk_reg()), fk_double(), -+ FPURegisters::Name(fa_reg()), fa_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetFPUDoubleResult(fd_reg(), -+ std::fma(-fj_double(), fk_double(), fa_double())); -+ break; -+ case FCMP_COND_S: { -+ CHECK_EQ(instr_.Bits(4, 3), 0); -+ float fj = fj_float(); -+ float fk = fk_float(); -+ switch (cond()) { -+ case CAF: { -+ printf_instr("FCMP_CAF_S fcc%d\n", cd_reg()); -+ set_cf_register(cd_reg(), false); -+ break; -+ } -+ case CUN: { -+ printf_instr("FCMP_CUN_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CEQ: { -+ printf_instr("FCMP_CEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), fj == fk); -+ break; -+ } -+ case CUEQ: { -+ printf_instr("FCMP_CUEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj == fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CLT: { -+ printf_instr("FCMP_CLT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), fj < fk); -+ break; -+ } -+ case CULT: { -+ printf_instr("FCMP_CULT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj < fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CLE: { -+ printf_instr("FCMP_CLE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), fj <= fk); -+ break; -+ } -+ case CULE: { -+ printf_instr("FCMP_CULE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj <= fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CNE: { -+ printf_instr("FCMP_CNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk)); -+ break; -+ } -+ case COR: { -+ printf_instr("FCMP_COR_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk)); -+ break; -+ } -+ case CUNE: { -+ printf_instr("FCMP_CUNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj != fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case SAF: -+ case SUN: -+ case SEQ: -+ case SUEQ: -+ case SLT: -+ case SULT: -+ case SLE: -+ case SULE: -+ case SNE: -+ case SOR: -+ case SUNE: -+ UNIMPLEMENTED(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ break; -+ } -+ case FCMP_COND_D: { -+ CHECK_EQ(instr_.Bits(4, 3), 0); -+ double fj = fj_double(); -+ double fk = fk_double(); -+ switch (cond()) { -+ case CAF: { -+ printf_instr("FCMP_CAF_D fcc%d\n", cd_reg()); -+ set_cf_register(cd_reg(), false); -+ break; -+ } -+ case CUN: { -+ printf_instr("FCMP_CUN_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CEQ: { -+ printf_instr("FCMP_CEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), fj == fk); -+ break; -+ } -+ case CUEQ: { -+ printf_instr("FCMP_CUEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj == fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CLT: { -+ printf_instr("FCMP_CLT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), fj < fk); -+ break; -+ } -+ case CULT: { -+ printf_instr("FCMP_CULT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj < fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CLE: { -+ printf_instr("FCMP_CLE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), fj <= fk); -+ break; -+ } -+ case CULE: { -+ printf_instr("FCMP_CULE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj <= fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case CNE: { -+ printf_instr("FCMP_CNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk)); -+ break; -+ } -+ case COR: { -+ printf_instr("FCMP_COR_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk)); -+ break; -+ } -+ case CUNE: { -+ printf_instr("FCMP_CUNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), -+ FPURegisters::Name(fj_reg()), fj, -+ FPURegisters::Name(fk_reg()), fk); -+ set_cf_register(cd_reg(), -+ (fj != fk) || std::isnan(fj) || std::isnan(fk)); -+ break; -+ } -+ case SAF: -+ case SUN: -+ case SEQ: -+ case SUEQ: -+ case SLT: -+ case SULT: -+ case SLE: -+ case SULE: -+ case SNE: -+ case SOR: -+ case SUNE: -+ UNIMPLEMENTED(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ break; -+ } -+ case FSEL: { -+ CHECK_EQ(instr_.Bits(19, 18), 0); -+ printf_instr("FSEL fcc%d, %s: %016f, %s: %016f, %s: %016f\n", ca_reg(), -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ if (ca() == 0) { -+ SetFPUDoubleResult(fd_reg(), fj_double()); -+ } else { -+ SetFPUDoubleResult(fd_reg(), fk_double()); -+ } -+ break; -+ } -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::DecodeTypeOp14() { -+ int64_t alu_out = 0x0; -+ int32_t alu32_out = 0x0; -+ -+ switch (instr_.Bits(31, 18) << 18) { -+ case ALSL: { -+ uint8_t sa = sa2() + 1; -+ alu32_out = -+ (static_cast(rj()) << sa) + static_cast(rk()); -+ if (instr_.Bit(17) == 0) { -+ // ALSL_W -+ printf_instr("ALSL_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk(), sa2()); -+ SetResult(rd_reg(), alu32_out); -+ } else { -+ // ALSL_WU -+ printf_instr("ALSL_WU\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk(), sa2()); -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ } -+ break; -+ } -+ case BYTEPICK_W: { -+ CHECK_EQ(instr_.Bit(17), 0); -+ printf_instr("BYTEPICK_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk(), sa2()); -+ uint8_t sa = sa2() * 8; -+ if (sa == 0) { -+ alu32_out = static_cast(rk()); -+ } else { -+ int32_t mask = (1 << 31) >> (sa - 1); -+ int32_t rk_hi = (static_cast(rk()) & (~mask)) << sa; -+ int32_t rj_lo = (static_cast(rj()) & mask) >> (32 - sa); -+ alu32_out = rk_hi | rj_lo; -+ } -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ break; -+ } -+ case BYTEPICK_D: { -+ printf_instr("BYTEPICK_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa3: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk(), sa3()); -+ uint8_t sa = sa3() * 8; -+ if (sa == 0) { -+ alu_out = rk(); -+ } else { -+ int64_t mask = (1ULL << 63) >> (sa - 1); -+ int64_t rk_hi = (rk() & (~mask)) << sa; -+ int64_t rj_lo = (rj() & mask) >> (64 - sa); -+ alu_out = rk_hi | rj_lo; -+ } -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case ALSL_D: { -+ printf_instr("ALSL_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk(), sa2()); -+ CHECK_EQ(instr_.Bit(17), 0); -+ uint8_t sa = sa2() + 1; -+ alu_out = (rj() << sa) + rk(); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case SLLI: { -+ DCHECK_EQ(instr_.Bit(17), 0); -+ if (instr_.Bits(17, 15) == 0b001) { -+ // SLLI_W -+ printf_instr("SLLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui5()); -+ alu32_out = static_cast(rj()) << ui5(); -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ } else if ((instr_.Bits(17, 16) == 0b01)) { -+ // SLLI_D -+ printf_instr("SLLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui6()); -+ SetResult(rd_reg(), rj() << ui6()); -+ } -+ break; -+ } -+ case SRLI: { -+ DCHECK_EQ(instr_.Bit(17), 0); -+ if (instr_.Bits(17, 15) == 0b001) { -+ // SRLI_W -+ printf_instr("SRLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui5()); -+ alu32_out = static_cast(rj()) >> ui5(); -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ } else if (instr_.Bits(17, 16) == 0b01) { -+ // SRLI_D -+ printf_instr("SRLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui6()); -+ SetResult(rd_reg(), rj_u() >> ui6()); -+ } -+ break; -+ } -+ case SRAI: { -+ DCHECK_EQ(instr_.Bit(17), 0); -+ if (instr_.Bits(17, 15) == 0b001) { -+ // SRAI_W -+ printf_instr("SRAI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui5()); -+ alu32_out = static_cast(rj()) >> ui5(); -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ } else if (instr_.Bits(17, 16) == 0b01) { -+ // SRAI_D -+ printf_instr("SRAI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui6()); -+ SetResult(rd_reg(), rj() >> ui6()); -+ } -+ break; -+ } -+ case ROTRI: { -+ DCHECK_EQ(instr_.Bit(17), 0); -+ if (instr_.Bits(17, 15) == 0b001) { -+ // ROTRI_W -+ printf_instr("ROTRI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui5()); -+ alu32_out = static_cast( -+ base::bits::RotateRight32(static_cast(rj_u()), -+ static_cast(ui5()))); -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ } else if (instr_.Bits(17, 16) == 0b01) { -+ // ROTRI_D -+ printf_instr("ROTRI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), ui6()); -+ alu_out = -+ static_cast(base::bits::RotateRight64(rj_u(), ui6())); -+ SetResult(rd_reg(), alu_out); -+ printf_instr("ROTRI, %s, %s, %d\n", Registers::Name(rd_reg()), -+ Registers::Name(rj_reg()), ui6()); -+ } -+ break; -+ } -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::DecodeTypeOp17() { -+ int64_t alu_out; -+ -+ switch (instr_.Bits(31, 15) << 15) { -+ case ADD_W: { -+ printf_instr("ADD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ int32_t alu32_out = static_cast(rj() + rk()); -+ // Sign-extend result of 32bit operation into 64bit register. -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ break; -+ } -+ case ADD_D: -+ printf_instr("ADD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() + rk()); -+ break; -+ case SUB_W: { -+ printf_instr("SUB_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ int32_t alu32_out = static_cast(rj() - rk()); -+ // Sign-extend result of 32bit operation into 64bit register. -+ SetResult(rd_reg(), static_cast(alu32_out)); -+ break; -+ } -+ case SUB_D: -+ printf_instr("SUB_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() - rk()); -+ break; -+ case SLT: -+ printf_instr("SLT\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() < rk() ? 1 : 0); -+ break; -+ case SLTU: -+ printf_instr("SLTU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj_u() < rk_u() ? 1 : 0); -+ break; -+ case MASKEQZ: -+ printf_instr("MASKEQZ\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rk() == 0 ? rj() : 0); -+ break; -+ case MASKNEZ: -+ printf_instr("MASKNEZ\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rk() != 0 ? rj() : 0); -+ break; -+ case NOR: -+ printf_instr("NOR\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), ~(rj() | rk())); -+ break; -+ case AND: -+ printf_instr("AND\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() & rk()); -+ break; -+ case OR: -+ printf_instr("OR\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() | rk()); -+ break; -+ case XOR: -+ printf_instr("XOR\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() ^ rk()); -+ break; -+ case ORN: -+ printf_instr("ORN\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() | (~rk())); -+ break; -+ case ANDN: -+ printf_instr("ANDN\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() & (~rk())); -+ break; -+ case SLL_W: -+ printf_instr("SLL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), (int32_t)rj() << (rk_u() % 32)); -+ break; -+ case SRL_W: { -+ printf_instr("SRL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ alu_out = static_cast((uint32_t)rj_u() >> (rk_u() % 32)); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case SRA_W: -+ printf_instr("SRA_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), (int32_t)rj() >> (rk_u() % 32)); -+ break; -+ case SLL_D: -+ printf_instr("SLL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() << (rk_u() % 64)); -+ break; -+ case SRL_D: { -+ printf_instr("SRL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ alu_out = static_cast(rj_u() >> (rk_u() % 64)); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case SRA_D: -+ printf_instr("SRA_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() >> (rk_u() % 64)); -+ break; -+ case ROTR_W: { -+ printf_instr("ROTR_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ alu_out = static_cast( -+ base::bits::RotateRight32(static_cast(rj_u()), -+ static_cast(rk_u() % 32))); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case ROTR_D: { -+ printf_instr("ROTR_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ alu_out = static_cast( -+ base::bits::RotateRight64((rj_u()), (rk_u() % 64))); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case MUL_W: { -+ printf_instr("MUL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ alu_out = static_cast(rj()) * static_cast(rk()); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case MULH_W: { -+ printf_instr("MULH_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ int32_t rj_lo = static_cast(rj()); -+ int32_t rk_lo = static_cast(rk()); -+ alu_out = static_cast(rj_lo) * static_cast(rk_lo); -+ SetResult(rd_reg(), alu_out >> 32); -+ break; -+ } -+ case MULH_WU: { -+ printf_instr("MULH_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ uint32_t rj_lo = static_cast(rj_u()); -+ uint32_t rk_lo = static_cast(rk_u()); -+ alu_out = static_cast(rj_lo) * static_cast(rk_lo); -+ SetResult(rd_reg(), alu_out >> 32); -+ break; -+ } -+ case MUL_D: -+ printf_instr("MUL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), rj() * rk()); -+ break; -+ case MULH_D: -+ printf_instr("MULH_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), MultiplyHighSigned(rj(), rk())); -+ break; -+ case MULH_DU: -+ printf_instr("MULH_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ SetResult(rd_reg(), MultiplyHighUnsigned(rj_u(), rk_u())); -+ break; -+ case MULW_D_W: { -+ printf_instr("MULW_D_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ int64_t rj_i32 = static_cast(rj()); -+ int64_t rk_i32 = static_cast(rk()); -+ SetResult(rd_reg(), rj_i32 * rk_i32); -+ break; -+ } -+ case MULW_D_WU: { -+ printf_instr("MULW_D_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ uint64_t rj_u32 = static_cast(rj_u()); -+ uint64_t rk_u32 = static_cast(rk_u()); -+ SetResult(rd_reg(), rj_u32 * rk_u32); -+ break; -+ } -+ case DIV_W: { -+ printf_instr("DIV_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ int32_t rj_i32 = static_cast(rj()); -+ int32_t rk_i32 = static_cast(rk()); -+ if (rj_i32 == INT_MIN && rk_i32 == -1) { -+ SetResult(rd_reg(), INT_MIN); -+ } else if (rk_i32 != 0) { -+ SetResult(rd_reg(), rj_i32 / rk_i32); -+ } -+ break; -+ } -+ case MOD_W: { -+ printf_instr("MOD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ int32_t rj_i32 = static_cast(rj()); -+ int32_t rk_i32 = static_cast(rk()); -+ if (rj_i32 == INT_MIN && rk_i32 == -1) { -+ SetResult(rd_reg(), 0); -+ } else if (rk_i32 != 0) { -+ SetResult(rd_reg(), rj_i32 % rk_i32); -+ } -+ break; -+ } -+ case DIV_WU: { -+ printf_instr("DIV_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ uint32_t rj_u32 = static_cast(rj()); -+ uint32_t rk_u32 = static_cast(rk()); -+ if (rk_u32 != 0) { -+ SetResult(rd_reg(), static_cast(rj_u32 / rk_u32)); -+ } -+ break; -+ } -+ case MOD_WU: { -+ printf_instr("MOD_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ uint32_t rj_u32 = static_cast(rj()); -+ uint32_t rk_u32 = static_cast(rk()); -+ if (rk_u32 != 0) { -+ SetResult(rd_reg(), static_cast(rj_u32 % rk_u32)); -+ } -+ break; -+ } -+ case DIV_D: { -+ printf_instr("DIV_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ if (rj() == LONG_MIN && rk() == -1) { -+ SetResult(rd_reg(), LONG_MIN); -+ } else if (rk() != 0) { -+ SetResult(rd_reg(), rj() / rk()); -+ } -+ break; -+ } -+ case MOD_D: { -+ printf_instr("MOD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ if (rj() == LONG_MIN && rk() == -1) { -+ SetResult(rd_reg(), 0); -+ } else if (rk() != 0) { -+ SetResult(rd_reg(), rj() % rk()); -+ } -+ break; -+ } -+ case DIV_DU: { -+ printf_instr("DIV_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ if (rk_u() != 0) { -+ SetResult(rd_reg(), static_cast(rj_u() / rk_u())); -+ } -+ break; -+ } -+ case MOD_DU: { -+ printf_instr("MOD_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ if (rk_u() != 0) { -+ SetResult(rd_reg(), static_cast(rj_u() % rk_u())); -+ } -+ break; -+ } -+ case BREAK: -+ printf_instr("BREAK\t code: %x\n", instr_.Bits(14, 0)); -+ SoftwareInterrupt(); -+ break; -+ case FADD_S: { -+ printf_instr("FADD_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult( -+ fd_reg(), -+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; }, -+ fj_float(), fk_float())); -+ break; -+ } -+ case FADD_D: { -+ printf_instr("FADD_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), -+ FPUCanonalizeOperation( -+ [](double lhs, double rhs) { return lhs + rhs; }, -+ fj_double(), fk_double())); -+ break; -+ } -+ case FSUB_S: { -+ printf_instr("FSUB_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult( -+ fd_reg(), -+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; }, -+ fj_float(), fk_float())); -+ break; -+ } -+ case FSUB_D: { -+ printf_instr("FSUB_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), -+ FPUCanonalizeOperation( -+ [](double lhs, double rhs) { return lhs - rhs; }, -+ fj_double(), fk_double())); -+ break; -+ } -+ case FMUL_S: { -+ printf_instr("FMUL_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult( -+ fd_reg(), -+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; }, -+ fj_float(), fk_float())); -+ break; -+ } -+ case FMUL_D: { -+ printf_instr("FMUL_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), -+ FPUCanonalizeOperation( -+ [](double lhs, double rhs) { return lhs * rhs; }, -+ fj_double(), fk_double())); -+ break; -+ } -+ case FDIV_S: { -+ printf_instr("FDIV_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult( -+ fd_reg(), -+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; }, -+ fj_float(), fk_float())); -+ break; -+ } -+ case FDIV_D: { -+ printf_instr("FDIV_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), -+ FPUCanonalizeOperation( -+ [](double lhs, double rhs) { return lhs / rhs; }, -+ fj_double(), fk_double())); -+ break; -+ } -+ case FMAX_S: -+ printf_instr("FMAX_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult(fd_reg(), FPUMax(fk_float(), fj_float())); -+ break; -+ case FMAX_D: -+ printf_instr("FMAX_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), FPUMax(fk_double(), fj_double())); -+ break; -+ case FMIN_S: -+ printf_instr("FMIN_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult(fd_reg(), FPUMin(fk_float(), fj_float())); -+ break; -+ case FMIN_D: -+ printf_instr("FMIN_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), FPUMin(fk_double(), fj_double())); -+ break; -+ case FMAXA_S: -+ printf_instr("FMAXA_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult(fd_reg(), FPUMaxA(fk_float(), fj_float())); -+ break; -+ case FMAXA_D: -+ printf_instr("FMAXA_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), FPUMaxA(fk_double(), fj_double())); -+ break; -+ case FMINA_S: -+ printf_instr("FMINA_S\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float(), -+ FPURegisters::Name(fk_reg()), fk_float()); -+ SetFPUFloatResult(fd_reg(), FPUMinA(fk_float(), fj_float())); -+ break; -+ case FMINA_D: -+ printf_instr("FMINA_D\t %s: %016f, %s, %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double(), -+ FPURegisters::Name(fk_reg()), fk_double()); -+ SetFPUDoubleResult(fd_reg(), FPUMinA(fk_double(), fj_double())); -+ break; -+ case LDX_B: -+ printf_instr("LDX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ set_register(rd_reg(), ReadB(rj() + rk())); -+ break; -+ case LDX_H: -+ printf_instr("LDX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ set_register(rd_reg(), ReadH(rj() + rk(), instr_.instr())); -+ break; -+ case LDX_W: -+ printf_instr("LDX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ set_register(rd_reg(), ReadW(rj() + rk(), instr_.instr())); -+ break; -+ case LDX_D: -+ printf_instr("LDX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ set_register(rd_reg(), Read2W(rj() + rk(), instr_.instr())); -+ break; -+ case STX_B: -+ printf_instr("STX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ WriteB(rj() + rk(), static_cast(rd())); -+ break; -+ case STX_H: -+ printf_instr("STX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ WriteH(rj() + rk(), static_cast(rd()), instr_.instr()); -+ break; -+ case STX_W: -+ printf_instr("STX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ WriteW(rj() + rk(), static_cast(rd()), instr_.instr()); -+ break; -+ case STX_D: -+ printf_instr("STX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ Write2W(rj() + rk(), rd(), instr_.instr()); -+ break; -+ case LDX_BU: -+ printf_instr("LDX_BU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ set_register(rd_reg(), ReadBU(rj() + rk())); -+ break; -+ case LDX_HU: -+ printf_instr("LDX_HU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ set_register(rd_reg(), ReadHU(rj() + rk(), instr_.instr())); -+ break; -+ case LDX_WU: -+ printf_instr("LDX_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj(), Registers::Name(rk_reg()), rk()); -+ set_register(rd_reg(), ReadWU(rj() + rk(), instr_.instr())); -+ break; -+ case FLDX_S: -+ printf_instr("FLDX_S\t %s: %016f, %s: %016lx, %s: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), -+ rk()); -+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits. -+ set_fpu_register_word(fd_reg(), -+ ReadW(rj() + rk(), instr_.instr(), FLOAT_DOUBLE)); -+ break; -+ case FLDX_D: -+ printf_instr("FLDX_D\t %s: %016f, %s: %016lx, %s: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), -+ rk()); -+ set_fpu_register_double(fd_reg(), ReadD(rj() + rk(), instr_.instr())); -+ break; -+ case FSTX_S: -+ printf_instr("FSTX_S\t %s: %016f, %s: %016lx, %s: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), -+ rk()); -+ WriteW(rj() + rk(), static_cast(get_fpu_register(fd_reg())), -+ instr_.instr()); -+ break; -+ case FSTX_D: -+ printf_instr("FSTX_D\t %s: %016f, %s: %016lx, %s: %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), -+ rk()); -+ WriteD(rj() + rk(), get_fpu_register_double(fd_reg()), instr_.instr()); -+ break; -+ case AMSWAP_W: -+ printf("Sim UNIMPLEMENTED: AMSWAP_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMSWAP_D: -+ printf("Sim UNIMPLEMENTED: AMSWAP_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMADD_W: -+ printf("Sim UNIMPLEMENTED: AMADD_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMADD_D: -+ printf("Sim UNIMPLEMENTED: AMADD_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMAND_W: -+ printf("Sim UNIMPLEMENTED: AMAND_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMAND_D: -+ printf("Sim UNIMPLEMENTED: AMAND_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMOR_W: -+ printf("Sim UNIMPLEMENTED: AMOR_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMOR_D: -+ printf("Sim UNIMPLEMENTED: AMOR_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMXOR_W: -+ printf("Sim UNIMPLEMENTED: AMXOR_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMXOR_D: -+ printf("Sim UNIMPLEMENTED: AMXOR_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMAX_W: -+ printf("Sim UNIMPLEMENTED: AMMAX_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMAX_D: -+ printf("Sim UNIMPLEMENTED: AMMAX_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_W: -+ printf("Sim UNIMPLEMENTED: AMMIN_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_D: -+ printf("Sim UNIMPLEMENTED: AMMIN_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMAX_WU: -+ printf("Sim UNIMPLEMENTED: AMMAX_WU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMAX_DU: -+ printf("Sim UNIMPLEMENTED: AMMAX_DU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_WU: -+ printf("Sim UNIMPLEMENTED: AMMIN_WU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_DU: -+ printf("Sim UNIMPLEMENTED: AMMIN_DU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMSWAP_DB_W: { -+ printf_instr("AMSWAP_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int32_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), ReadW(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditionalW(rj(), static_cast(rk()), instr_.instr(), -+ rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMSWAP_DB_D: { -+ printf_instr("AMSWAP_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int64_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), Read2W(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditional2W(rj(), rk(), instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMADD_DB_W: { -+ printf_instr("AMADD_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int32_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), ReadW(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditionalW(rj(), -+ static_cast(static_cast(rk()) + -+ static_cast(rd())), -+ instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMADD_DB_D: { -+ printf_instr("AMADD_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int64_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), Read2W(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditional2W(rj(), rk() + rd(), instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMAND_DB_W: { -+ printf_instr("AMAND_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int32_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), ReadW(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditionalW(rj(), -+ static_cast(static_cast(rk()) & -+ static_cast(rd())), -+ instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMAND_DB_D: { -+ printf_instr("AMAND_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int64_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), Read2W(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditional2W(rj(), rk() & rd(), instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMOR_DB_W: { -+ printf_instr("AMOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int32_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), ReadW(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditionalW(rj(), -+ static_cast(static_cast(rk()) | -+ static_cast(rd())), -+ instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMOR_DB_D: { -+ printf_instr("AMOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int64_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), Read2W(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditional2W(rj(), rk() | rd(), instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMXOR_DB_W: { -+ printf_instr("AMXOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int32_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), ReadW(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditionalW(rj(), -+ static_cast(static_cast(rk()) ^ -+ static_cast(rd())), -+ instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMXOR_DB_D: { -+ printf_instr("AMXOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), -+ rk(), Registers::Name(rj_reg()), rj()); -+ int64_t rdvalue; -+ do { -+ { -+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); -+ set_register(rd_reg(), Read2W(rj(), instr_.instr())); -+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); -+ GlobalMonitor::Get()->NotifyLoadLinked_Locked( -+ rj(), &global_monitor_thread_); -+ } -+ rdvalue = get_register(rd_reg()); -+ WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), rd_reg()); -+ } while (!get_register(rd_reg())); -+ set_register(rd_reg(), rdvalue); -+ } break; -+ case AMMAX_DB_W: -+ printf("Sim UNIMPLEMENTED: AMMAX_DB_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMAX_DB_D: -+ printf("Sim UNIMPLEMENTED: AMMAX_DB_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_DB_W: -+ printf("Sim UNIMPLEMENTED: AMMIN_DB_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_DB_D: -+ printf("Sim UNIMPLEMENTED: AMMIN_DB_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMAX_DB_WU: -+ printf("Sim UNIMPLEMENTED: AMMAX_DB_WU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMAX_DB_DU: -+ printf("Sim UNIMPLEMENTED: AMMAX_DB_DU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_DB_WU: -+ printf("Sim UNIMPLEMENTED: AMMIN_DB_WU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case AMMIN_DB_DU: -+ printf("Sim UNIMPLEMENTED: AMMIN_DB_DU\n"); -+ UNIMPLEMENTED(); -+ break; -+ case DBAR: -+ printf_instr("DBAR\n"); -+ break; -+ case IBAR: -+ printf("Sim UNIMPLEMENTED: IBAR\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FSCALEB_S: -+ printf("Sim UNIMPLEMENTED: FSCALEB_S\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FSCALEB_D: -+ printf("Sim UNIMPLEMENTED: FSCALEB_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FCOPYSIGN_S: -+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FCOPYSIGN_D: -+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void Simulator::DecodeTypeOp22() { -+ int64_t alu_out; -+ -+ switch (instr_.Bits(31, 10) << 10) { -+ case CLZ_W: { -+ printf_instr("CLZ_W\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ alu_out = base::bits::CountLeadingZeros32(static_cast(rj_u())); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case CTZ_W: { -+ printf_instr("CTZ_W\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ alu_out = base::bits::CountTrailingZeros32(static_cast(rj_u())); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case CLZ_D: { -+ printf_instr("CLZ_D\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ alu_out = base::bits::CountLeadingZeros64(static_cast(rj_u())); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case CTZ_D: { -+ printf_instr("CTZ_D\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ alu_out = base::bits::CountTrailingZeros64(static_cast(rj_u())); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case REVB_2H: { -+ printf_instr("REVB_2H\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint32_t input = static_cast(rj()); -+ uint64_t output = 0; -+ -+ uint32_t mask = 0xFF000000; -+ for (int i = 0; i < 4; i++) { -+ uint32_t tmp = mask & input; -+ if (i % 2 == 0) { -+ tmp = tmp >> 8; -+ } else { -+ tmp = tmp << 8; -+ } -+ output = output | tmp; -+ mask = mask >> 8; -+ } -+ -+ alu_out = static_cast(static_cast(output)); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case REVB_4H: { -+ printf_instr("REVB_4H\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint64_t input = rj_u(); -+ uint64_t output = 0; -+ -+ uint64_t mask = 0xFF00000000000000; -+ for (int i = 0; i < 8; i++) { -+ uint64_t tmp = mask & input; -+ if (i % 2 == 0) { -+ tmp = tmp >> 8; -+ } else { -+ tmp = tmp << 8; -+ } -+ output = output | tmp; -+ mask = mask >> 8; -+ } -+ -+ alu_out = static_cast(output); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case REVB_2W: { -+ printf_instr("REVB_2W\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint64_t input = rj_u(); -+ uint64_t output = 0; -+ -+ uint64_t mask = 0xFF000000FF000000; -+ for (int i = 0; i < 4; i++) { -+ uint64_t tmp = mask & input; -+ if (i <= 1) { -+ tmp = tmp >> (24 - i * 16); -+ } else { -+ tmp = tmp << (i * 16 - 24); -+ } -+ output = output | tmp; -+ mask = mask >> 8; -+ } -+ -+ alu_out = static_cast(output); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case REVB_D: { -+ printf_instr("REVB_D\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint64_t input = rj_u(); -+ uint64_t output = 0; -+ -+ uint64_t mask = 0xFF00000000000000; -+ for (int i = 0; i < 8; i++) { -+ uint64_t tmp = mask & input; -+ if (i <= 3) { -+ tmp = tmp >> (56 - i * 16); -+ } else { -+ tmp = tmp << (i * 16 - 56); -+ } -+ output = output | tmp; -+ mask = mask >> 8; -+ } -+ -+ alu_out = static_cast(output); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case REVH_2W: { -+ printf_instr("REVH_2W\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint64_t input = rj_u(); -+ uint64_t output = 0; -+ -+ uint64_t mask = 0xFFFF000000000000; -+ for (int i = 0; i < 4; i++) { -+ uint64_t tmp = mask & input; -+ if (i % 2 == 0) { -+ tmp = tmp >> 16; -+ } else { -+ tmp = tmp << 16; -+ } -+ output = output | tmp; -+ mask = mask >> 16; -+ } -+ -+ alu_out = static_cast(output); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case REVH_D: { -+ printf_instr("REVH_D\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint64_t input = rj_u(); -+ uint64_t output = 0; -+ -+ uint64_t mask = 0xFFFF000000000000; -+ for (int i = 0; i < 4; i++) { -+ uint64_t tmp = mask & input; -+ if (i <= 1) { -+ tmp = tmp >> (48 - i * 32); -+ } else { -+ tmp = tmp << (i * 32 - 48); -+ } -+ output = output | tmp; -+ mask = mask >> 16; -+ } -+ -+ alu_out = static_cast(output); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case BITREV_4B: { -+ printf_instr("BITREV_4B\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint32_t input = static_cast(rj()); -+ uint32_t output = 0; -+ uint8_t i_byte, o_byte; -+ -+ // Reverse the bit in byte for each individual byte -+ for (int i = 0; i < 4; i++) { -+ output = output >> 8; -+ i_byte = input & 0xFF; -+ -+ // Fast way to reverse bits in byte -+ // Devised by Sean Anderson, July 13, 2001 -+ o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | -+ (i_byte * 0x8020LU & 0x88440LU)) * -+ 0x10101LU >> -+ 16); -+ -+ output = output | (static_cast(o_byte << 24)); -+ input = input >> 8; -+ } -+ -+ alu_out = static_cast(static_cast(output)); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case BITREV_8B: { -+ printf_instr("BITREV_8B\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint64_t input = rj_u(); -+ uint64_t output = 0; -+ uint8_t i_byte, o_byte; -+ -+ // Reverse the bit in byte for each individual byte -+ for (int i = 0; i < 8; i++) { -+ output = output >> 8; -+ i_byte = input & 0xFF; -+ -+ // Fast way to reverse bits in byte -+ // Devised by Sean Anderson, July 13, 2001 -+ o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | -+ (i_byte * 0x8020LU & 0x88440LU)) * -+ 0x10101LU >> -+ 16); -+ -+ output = output | (static_cast(o_byte) << 56); -+ input = input >> 8; -+ } -+ -+ alu_out = static_cast(output); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case BITREV_W: { -+ printf_instr("BITREV_W\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint32_t input = static_cast(rj()); -+ uint32_t output = 0; -+ output = base::bits::ReverseBits(input); -+ alu_out = static_cast(static_cast(output)); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case BITREV_D: { -+ printf_instr("BITREV_D\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ alu_out = static_cast(base::bits::ReverseBits(rj_u())); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case EXT_W_B: { -+ printf_instr("EXT_W_B\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint8_t input = static_cast(rj()); -+ alu_out = static_cast(static_cast(input)); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case EXT_W_H: { -+ printf_instr("EXT_W_H\t %s: %016lx, %s, %016lx\n", -+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), -+ rj()); -+ uint16_t input = static_cast(rj()); -+ alu_out = static_cast(static_cast(input)); -+ SetResult(rd_reg(), alu_out); -+ break; -+ } -+ case FABS_S: -+ printf_instr("FABS_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUFloatResult(fd_reg(), std::abs(fj_float())); -+ break; -+ case FABS_D: -+ printf_instr("FABS_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetFPUDoubleResult(fd_reg(), std::abs(fj_double())); -+ break; -+ case FNEG_S: -+ printf_instr("FNEG_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUFloatResult(fd_reg(), -fj_float()); -+ break; -+ case FNEG_D: -+ printf_instr("FNEG_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetFPUDoubleResult(fd_reg(), -fj_double()); -+ break; -+ case FSQRT_S: { -+ printf_instr("FSQRT_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ if (fj_float() >= 0) { -+ SetFPUFloatResult(fd_reg(), std::sqrt(fj_float())); -+ } else { -+ SetFPUFloatResult(fd_reg(), std::sqrt(-1)); // qnan -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ } -+ break; -+ } -+ case FSQRT_D: { -+ printf_instr("FSQRT_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ if (fj_double() >= 0) { -+ SetFPUDoubleResult(fd_reg(), std::sqrt(fj_double())); -+ } else { -+ SetFPUDoubleResult(fd_reg(), std::sqrt(-1)); // qnan -+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); -+ } -+ break; -+ } -+ case FMOV_S: -+ printf_instr("FMOV_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUFloatResult(fd_reg(), fj_float()); -+ break; -+ case FMOV_D: -+ printf_instr("FMOV_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_float(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUDoubleResult(fd_reg(), fj_double()); -+ break; -+ case MOVGR2FR_W: { -+ printf_instr("MOVGR2FR_W\t %s: %016f, %s, %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ Registers::Name(rj_reg()), rj()); -+ set_fpu_register_word(fd_reg(), static_cast(rj())); -+ TraceRegWr(get_fpu_register(fd_reg()), FLOAT_DOUBLE); -+ break; -+ } -+ case MOVGR2FR_D: -+ printf_instr("MOVGR2FR_D\t %s: %016f, %s, %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ Registers::Name(rj_reg()), rj()); -+ SetFPUResult2(fd_reg(), rj()); -+ break; -+ case MOVGR2FRH_W: { -+ printf_instr("MOVGR2FRH_W\t %s: %016f, %s, %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ Registers::Name(rj_reg()), rj()); -+ set_fpu_register_hi_word(fd_reg(), static_cast(rj())); -+ TraceRegWr(get_fpu_register(fd_reg()), DOUBLE); -+ break; -+ } -+ case MOVFR2GR_S: { -+ printf_instr("MOVFR2GR_S\t %s: %016lx, %s, %016f\n", -+ Registers::Name(rd_reg()), rd(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ set_register(rd_reg(), -+ static_cast(get_fpu_register_word(fj_reg()))); -+ TraceRegWr(get_register(rd_reg()), WORD_DWORD); -+ break; -+ } -+ case MOVFR2GR_D: -+ printf_instr("MOVFR2GR_D\t %s: %016lx, %s, %016f\n", -+ Registers::Name(rd_reg()), rd(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetResult(rd_reg(), get_fpu_register(fj_reg())); -+ break; -+ case MOVFRH2GR_S: -+ printf_instr("MOVFRH2GR_S\t %s: %016lx, %s, %016f\n", -+ Registers::Name(rd_reg()), rd(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetResult(rd_reg(), get_fpu_register_hi_word(fj_reg())); -+ break; -+ case MOVGR2FCSR: { -+ printf_instr("MOVGR2FCSR\t fcsr: %016x, %s, %016lx\n", FCSR_, -+ Registers::Name(rj_reg()), rj()); -+ // fcsr could be 0-3 -+ CHECK_LT(rd_reg(), 4); -+ FCSR_ = static_cast(rj()); -+ TraceRegWr(FCSR_); -+ break; -+ } -+ case MOVFCSR2GR: { -+ printf_instr("MOVFCSR2GR\t %s, %016lx, FCSR: %016x\n", -+ Registers::Name(rd_reg()), rd(), FCSR_); -+ // fcsr could be 0-3 -+ CHECK_LT(rj_reg(), 4); -+ SetResult(rd_reg(), FCSR_); -+ break; -+ } -+ case FCVT_S_D: -+ printf_instr("FCVT_S_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ SetFPUFloatResult(fd_reg(), static_cast(fj_double())); -+ break; -+ case FCVT_D_S: -+ printf_instr("FCVT_D_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ SetFPUDoubleResult(fd_reg(), static_cast(fj_float())); -+ break; -+ case FTINTRM_W_S: { -+ printf_instr("FTINTRM_W_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::floor(fj); -+ int32_t result = static_cast(rounded); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_word_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRM_W_D: { -+ printf_instr("FTINTRM_W_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::floor(fj); -+ int32_t result = static_cast(rounded); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRM_L_S: { -+ printf_instr("FTINTRM_L_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::floor(fj); -+ int64_t result = static_cast(rounded); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRM_L_D: { -+ printf_instr("FTINTRM_L_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::floor(fj); -+ int64_t result = static_cast(rounded); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRP_W_S: { -+ printf_instr("FTINTRP_W_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::ceil(fj); -+ int32_t result = static_cast(rounded); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_word_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRP_W_D: { -+ printf_instr("FTINTRP_W_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::ceil(fj); -+ int32_t result = static_cast(rounded); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRP_L_S: { -+ printf_instr("FTINTRP_L_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::ceil(fj); -+ int64_t result = static_cast(rounded); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRP_L_D: { -+ printf_instr("FTINTRP_L_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::ceil(fj); -+ int64_t result = static_cast(rounded); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRZ_W_S: { -+ printf_instr("FTINTRZ_W_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::trunc(fj); -+ int32_t result = static_cast(rounded); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_word_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRZ_W_D: { -+ printf_instr("FTINTRZ_W_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::trunc(fj); -+ int32_t result = static_cast(rounded); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRZ_L_S: { -+ printf_instr("FTINTRZ_L_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::trunc(fj); -+ int64_t result = static_cast(rounded); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRZ_L_D: { -+ printf_instr("FTINTRZ_L_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::trunc(fj); -+ int64_t result = static_cast(rounded); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRNE_W_S: { -+ printf_instr("FTINTRNE_W_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::floor(fj + 0.5); -+ int32_t result = static_cast(rounded); -+ if ((result & 1) != 0 && result - fj == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ result--; -+ } -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_word_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRNE_W_D: { -+ printf_instr("FTINTRNE_W_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::floor(fj + 0.5); -+ int32_t result = static_cast(rounded); -+ if ((result & 1) != 0 && result - fj == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ result--; -+ } -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRNE_L_S: { -+ printf_instr("FTINTRNE_L_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded = std::floor(fj + 0.5); -+ int64_t result = static_cast(rounded); -+ if ((result & 1) != 0 && result - fj == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ result--; -+ } -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINTRNE_L_D: { -+ printf_instr("FTINTRNE_L_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded = std::floor(fj + 0.5); -+ int64_t result = static_cast(rounded); -+ if ((result & 1) != 0 && result - fj == 0.5) { -+ // If the number is halfway between two integers, -+ // round to the even one. -+ result--; -+ } -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINT_W_S: { -+ printf_instr("FTINT_W_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded; -+ int32_t result; -+ round_according_to_fcsr(fj, &rounded, &result); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_word_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINT_W_D: { -+ printf_instr("FTINT_W_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded; -+ int32_t result; -+ round_according_to_fcsr(fj, &rounded, &result); -+ SetFPUWordResult(fd_reg(), result); -+ if (set_fcsr_round_error(fj, rounded)) { -+ set_fpu_register_word_invalid_result(fj, rounded); -+ } -+ break; -+ } -+ case FTINT_L_S: { -+ printf_instr("FTINT_L_S\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float rounded; -+ int64_t result; -+ round64_according_to_fcsr(fj, &rounded, &result); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FTINT_L_D: { -+ printf_instr("FTINT_L_D\t %s: %016f, %s, %016f\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double rounded; -+ int64_t result; -+ round64_according_to_fcsr(fj, &rounded, &result); -+ SetFPUResult(fd_reg(), result); -+ if (set_fcsr_round64_error(fj, rounded)) { -+ set_fpu_register_invalid_result64(fj, rounded); -+ } -+ break; -+ } -+ case FFINT_S_W: { -+ alu_out = get_fpu_register_signed_word(fj_reg()); -+ printf_instr("FFINT_S_W\t %s: %016f, %s, %016x\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), (int)alu_out); -+ SetFPUFloatResult(fd_reg(), static_cast(alu_out)); -+ break; -+ } -+ case FFINT_S_L: { -+ alu_out = get_fpu_register(fj_reg()); -+ printf_instr("FFINT_S_L\t %s: %016f, %s, %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), alu_out); -+ SetFPUFloatResult(fd_reg(), static_cast(alu_out)); -+ break; -+ } -+ case FFINT_D_W: { -+ alu_out = get_fpu_register_signed_word(fj_reg()); -+ printf_instr("FFINT_D_W\t %s: %016f, %s, %016x\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), (int)alu_out); -+ SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); -+ break; -+ } -+ case FFINT_D_L: { -+ alu_out = get_fpu_register(fj_reg()); -+ printf_instr("FFINT_D_L\t %s: %016f, %s, %016lx\n", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), alu_out); -+ SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); -+ break; -+ } -+ case FRINT_S: { -+ printf_instr("FRINT_S\t %s: %016f, %s, %016f mode : ", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_float()); -+ float fj = fj_float(); -+ float result, temp_result; -+ double temp; -+ float upper = std::ceil(fj); -+ float lower = std::floor(fj); -+ switch (get_fcsr_rounding_mode()) { -+ case kRoundToNearest: -+ printf_instr(" kRoundToNearest\n"); -+ if (upper - fj < fj - lower) { -+ result = upper; -+ } else if (upper - fj > fj - lower) { -+ result = lower; -+ } else { -+ temp_result = upper / 2; -+ float reminder = std::modf(temp_result, &temp); -+ if (reminder == 0) { -+ result = upper; -+ } else { -+ result = lower; -+ } -+ } -+ break; -+ case kRoundToZero: -+ printf_instr(" kRoundToZero\n"); -+ result = (fj > 0 ? lower : upper); -+ break; -+ case kRoundToPlusInf: -+ printf_instr(" kRoundToPlusInf\n"); -+ result = upper; -+ break; -+ case kRoundToMinusInf: -+ printf_instr(" kRoundToMinusInf\n"); -+ result = lower; -+ break; -+ } -+ SetFPUFloatResult(fd_reg(), result); -+ if (result != fj) { -+ set_fcsr_bit(kFCSRInexactFlagBit, true); -+ } -+ break; -+ } -+ case FRINT_D: { -+ printf_instr("FRINT_D\t %s: %016f, %s, %016f mode : ", -+ FPURegisters::Name(fd_reg()), fd_double(), -+ FPURegisters::Name(fj_reg()), fj_double()); -+ double fj = fj_double(); -+ double result, temp, temp_result; -+ double upper = std::ceil(fj); -+ double lower = std::floor(fj); -+ switch (get_fcsr_rounding_mode()) { -+ case kRoundToNearest: -+ printf_instr(" kRoundToNearest\n"); -+ if (upper - fj < fj - lower) { -+ result = upper; -+ } else if (upper - fj > fj - lower) { -+ result = lower; -+ } else { -+ temp_result = upper / 2; -+ double reminder = std::modf(temp_result, &temp); -+ if (reminder == 0) { -+ result = upper; -+ } else { -+ result = lower; -+ } -+ } -+ break; -+ case kRoundToZero: -+ printf_instr(" kRoundToZero\n"); -+ result = (fj > 0 ? lower : upper); -+ break; -+ case kRoundToPlusInf: -+ printf_instr(" kRoundToPlusInf\n"); -+ result = upper; -+ break; -+ case kRoundToMinusInf: -+ printf_instr(" kRoundToMinusInf\n"); -+ result = lower; -+ break; -+ } -+ SetFPUDoubleResult(fd_reg(), result); -+ if (result != fj) { -+ set_fcsr_bit(kFCSRInexactFlagBit, true); -+ } -+ break; -+ } -+ case MOVFR2CF: -+ printf("Sim UNIMPLEMENTED: MOVFR2CF\n"); -+ UNIMPLEMENTED(); -+ break; -+ case MOVCF2FR: -+ printf("Sim UNIMPLEMENTED: MOVCF2FR\n"); -+ UNIMPLEMENTED(); -+ break; -+ case MOVGR2CF: -+ printf_instr("MOVGR2CF\t FCC%d, %s: %016lx\n", cd_reg(), -+ Registers::Name(rj_reg()), rj()); -+ set_cf_register(cd_reg(), rj() & 1); -+ break; -+ case MOVCF2GR: -+ printf_instr("MOVCF2GR\t %s: %016lx, FCC%d\n", Registers::Name(rd_reg()), -+ rd(), cj_reg()); -+ SetResult(rd_reg(), cj()); -+ break; -+ case FRECIP_S: -+ printf("Sim UNIMPLEMENTED: FRECIP_S\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FRECIP_D: -+ printf("Sim UNIMPLEMENTED: FRECIP_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FRSQRT_S: -+ printf("Sim UNIMPLEMENTED: FRSQRT_S\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FRSQRT_D: -+ printf("Sim UNIMPLEMENTED: FRSQRT_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FCLASS_S: -+ printf("Sim UNIMPLEMENTED: FCLASS_S\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FCLASS_D: -+ printf("Sim UNIMPLEMENTED: FCLASS_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FLOGB_S: -+ printf("Sim UNIMPLEMENTED: FLOGB_S\n"); -+ UNIMPLEMENTED(); -+ break; -+ case FLOGB_D: -+ printf("Sim UNIMPLEMENTED: FLOGB_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case CLO_W: -+ printf("Sim UNIMPLEMENTED: CLO_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case CTO_W: -+ printf("Sim UNIMPLEMENTED: CTO_W\n"); -+ UNIMPLEMENTED(); -+ break; -+ case CLO_D: -+ printf("Sim UNIMPLEMENTED: CLO_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ case CTO_D: -+ printf("Sim UNIMPLEMENTED: CTO_D\n"); -+ UNIMPLEMENTED(); -+ break; -+ // Unimplemented opcodes raised an error in the configuration step before, -+ // so we can use the default here to set the destination register in common -+ // cases. -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+// Executes the current instruction. -+void Simulator::InstructionDecode(Instruction* instr) { -+ if (v8::internal::FLAG_check_icache) { -+ CheckICache(i_cache(), instr); -+ } -+ pc_modified_ = false; -+ -+ v8::internal::EmbeddedVector buffer; -+ -+ if (::v8::internal::FLAG_trace_sim) { -+ SNPrintF(trace_buf_, " "); -+ disasm::NameConverter converter; -+ disasm::Disassembler dasm(converter); -+ // Use a reasonably large buffer. -+ dasm.InstructionDecode(buffer, reinterpret_cast(instr)); -+ } -+ -+ static int instr_count = 0; -+ USE(instr_count); -+ instr_ = instr; -+ printf_instr("\nInstr%3d: %08x, PC: %016lx\t", instr_count++, -+ instr_.Bits(31, 0), get_pc()); -+ switch (instr_.InstructionType()) { -+ case Instruction::kOp6Type: -+ DecodeTypeOp6(); -+ break; -+ case Instruction::kOp7Type: -+ DecodeTypeOp7(); -+ break; -+ case Instruction::kOp8Type: -+ DecodeTypeOp8(); -+ break; -+ case Instruction::kOp10Type: -+ DecodeTypeOp10(); -+ break; -+ case Instruction::kOp12Type: -+ DecodeTypeOp12(); -+ break; -+ case Instruction::kOp14Type: -+ DecodeTypeOp14(); -+ break; -+ case Instruction::kOp17Type: -+ DecodeTypeOp17(); -+ break; -+ case Instruction::kOp22Type: -+ DecodeTypeOp22(); -+ break; -+ default: { -+ printf("instr_: %x\n", instr_.Bits(31, 0)); -+ UNREACHABLE(); -+ } -+ } -+ -+ if (::v8::internal::FLAG_trace_sim) { -+ PrintF(" 0x%08" PRIxPTR " %-44s %s\n", -+ reinterpret_cast(instr), buffer.begin(), -+ trace_buf_.begin()); -+ } -+ -+ if (!pc_modified_) { -+ set_register(pc, reinterpret_cast(instr) + kInstrSize); -+ } -+} -+ -+void Simulator::Execute() { -+ // Get the PC to simulate. Cannot use the accessor here as we need the -+ // raw PC value and not the one used as input to arithmetic instructions. -+ int64_t program_counter = get_pc(); -+ if (::v8::internal::FLAG_stop_sim_at == 0) { -+ // Fast version of the dispatch loop without checking whether the simulator -+ // should be stopping at a particular executed instruction. -+ while (program_counter != end_sim_pc) { -+ Instruction* instr = reinterpret_cast(program_counter); -+ icount_++; -+ InstructionDecode(instr); -+ program_counter = get_pc(); -+ } -+ } else { -+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when -+ // we reach the particular instruction count. -+ while (program_counter != end_sim_pc) { -+ Instruction* instr = reinterpret_cast(program_counter); -+ icount_++; -+ if (icount_ == static_cast(::v8::internal::FLAG_stop_sim_at)) { -+ Loong64Debugger dbg(this); -+ dbg.Debug(); -+ } else { -+ InstructionDecode(instr); -+ } -+ program_counter = get_pc(); -+ } -+ } -+} -+ -+void Simulator::CallInternal(Address entry) { -+ // Adjust JS-based stack limit to C-based stack limit. -+ isolate_->stack_guard()->AdjustStackLimitForSimulator(); -+ -+ // Prepare to execute the code at entry. -+ set_register(pc, static_cast(entry)); -+ // Put down marker for end of simulation. The simulator will stop simulation -+ // when the PC reaches this value. By saving the "end simulation" value into -+ // the LR the simulation stops when returning to this call point. -+ set_register(ra, end_sim_pc); -+ -+ // Remember the values of callee-saved registers. -+ int64_t s0_val = get_register(s0); -+ int64_t s1_val = get_register(s1); -+ int64_t s2_val = get_register(s2); -+ int64_t s3_val = get_register(s3); -+ int64_t s4_val = get_register(s4); -+ int64_t s5_val = get_register(s5); -+ int64_t s6_val = get_register(s6); -+ int64_t s7_val = get_register(s7); -+ int64_t s8_val = get_register(s8); -+ int64_t gp_val = get_register(gp); -+ int64_t sp_val = get_register(sp); -+ int64_t tp_val = get_register(tp); -+ int64_t fp_val = get_register(fp); -+ -+ // Set up the callee-saved registers with a known value. To be able to check -+ // that they are preserved properly across JS execution. -+ int64_t callee_saved_value = icount_; -+ set_register(s0, callee_saved_value); -+ set_register(s1, callee_saved_value); -+ set_register(s2, callee_saved_value); -+ set_register(s3, callee_saved_value); -+ set_register(s4, callee_saved_value); -+ set_register(s5, callee_saved_value); -+ set_register(s6, callee_saved_value); -+ set_register(s7, callee_saved_value); -+ set_register(s8, callee_saved_value); -+ set_register(gp, callee_saved_value); -+ set_register(tp, callee_saved_value); -+ set_register(fp, callee_saved_value); -+ -+ // Start the simulation. -+ Execute(); -+ -+ // Check that the callee-saved registers have been preserved. -+ CHECK_EQ(callee_saved_value, get_register(s0)); -+ CHECK_EQ(callee_saved_value, get_register(s1)); -+ CHECK_EQ(callee_saved_value, get_register(s2)); -+ CHECK_EQ(callee_saved_value, get_register(s3)); -+ CHECK_EQ(callee_saved_value, get_register(s4)); -+ CHECK_EQ(callee_saved_value, get_register(s5)); -+ CHECK_EQ(callee_saved_value, get_register(s6)); -+ CHECK_EQ(callee_saved_value, get_register(s7)); -+ CHECK_EQ(callee_saved_value, get_register(s8)); -+ CHECK_EQ(callee_saved_value, get_register(gp)); -+ CHECK_EQ(callee_saved_value, get_register(tp)); -+ CHECK_EQ(callee_saved_value, get_register(fp)); -+ -+ // Restore callee-saved registers with the original value. -+ set_register(s0, s0_val); -+ set_register(s1, s1_val); -+ set_register(s2, s2_val); -+ set_register(s3, s3_val); -+ set_register(s4, s4_val); -+ set_register(s5, s5_val); -+ set_register(s6, s6_val); -+ set_register(s7, s7_val); -+ set_register(s8, s8_val); -+ set_register(gp, gp_val); -+ set_register(sp, sp_val); -+ set_register(tp, tp_val); -+ set_register(fp, fp_val); -+} -+ -+intptr_t Simulator::CallImpl(Address entry, int argument_count, -+ const intptr_t* arguments) { -+ constexpr int kRegisterPassedArguments = 8; -+ // Set up arguments. -+ -+ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count); -+ if (reg_arg_count > 0) set_register(a0, arguments[0]); -+ if (reg_arg_count > 1) set_register(a1, arguments[1]); -+ if (reg_arg_count > 2) set_register(a2, arguments[2]); -+ if (reg_arg_count > 3) set_register(a3, arguments[3]); -+ if (reg_arg_count > 4) set_register(a4, arguments[4]); -+ if (reg_arg_count > 5) set_register(a5, arguments[5]); -+ if (reg_arg_count > 6) set_register(a6, arguments[6]); -+ if (reg_arg_count > 7) set_register(a7, arguments[7]); -+ -+ // Remaining arguments passed on stack. -+ int64_t original_stack = get_register(sp); -+ // Compute position of stack on entry to generated code. -+ int stack_args_count = argument_count - reg_arg_count; -+ int stack_args_size = stack_args_count * sizeof(*arguments) + kCArgsSlotsSize; -+ int64_t entry_stack = original_stack - stack_args_size; -+ -+ if (base::OS::ActivationFrameAlignment() != 0) { -+ entry_stack &= -base::OS::ActivationFrameAlignment(); -+ } -+ // Store remaining arguments on stack, from low to high memory. -+ intptr_t* stack_argument = reinterpret_cast(entry_stack); -+ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count, -+ stack_args_count * sizeof(*arguments)); -+ set_register(sp, entry_stack); -+ -+ CallInternal(entry); -+ -+ // Pop stack passed arguments. -+ CHECK_EQ(entry_stack, get_register(sp)); -+ set_register(sp, original_stack); -+ -+ return get_register(v0); -+} -+ -+double Simulator::CallFP(Address entry, double d0, double d1) { -+ const FPURegister fparg2 = f1; -+ set_fpu_register_double(f0, d0); -+ set_fpu_register_double(fparg2, d1); -+ CallInternal(entry); -+ return get_fpu_register_double(f0); -+} -+ -+uintptr_t Simulator::PushAddress(uintptr_t address) { -+ int64_t new_sp = get_register(sp) - sizeof(uintptr_t); -+ uintptr_t* stack_slot = reinterpret_cast(new_sp); -+ *stack_slot = address; -+ set_register(sp, new_sp); -+ return new_sp; -+} -+ -+uintptr_t Simulator::PopAddress() { -+ int64_t current_sp = get_register(sp); -+ uintptr_t* stack_slot = reinterpret_cast(current_sp); -+ uintptr_t address = *stack_slot; -+ set_register(sp, current_sp + sizeof(uintptr_t)); -+ return address; -+} -+ -+Simulator::LocalMonitor::LocalMonitor() -+ : access_state_(MonitorAccess::Open), -+ tagged_addr_(0), -+ size_(TransactionSize::None) {} -+ -+void Simulator::LocalMonitor::Clear() { -+ access_state_ = MonitorAccess::Open; -+ tagged_addr_ = 0; -+ size_ = TransactionSize::None; -+} -+ -+void Simulator::LocalMonitor::NotifyLoad() { -+ if (access_state_ == MonitorAccess::RMW) { -+ // A non linked load could clear the local monitor. As a result, it's -+ // most strict to unconditionally clear the local monitor on load. -+ Clear(); -+ } -+} -+ -+void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr, -+ TransactionSize size) { -+ access_state_ = MonitorAccess::RMW; -+ tagged_addr_ = addr; -+ size_ = size; -+} -+ -+void Simulator::LocalMonitor::NotifyStore() { -+ if (access_state_ == MonitorAccess::RMW) { -+ // A non exclusive store could clear the local monitor. As a result, it's -+ // most strict to unconditionally clear the local monitor on store. -+ Clear(); -+ } -+} -+ -+bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr, -+ TransactionSize size) { -+ if (access_state_ == MonitorAccess::RMW) { -+ if (addr == tagged_addr_ && size_ == size) { -+ Clear(); -+ return true; -+ } else { -+ return false; -+ } -+ } else { -+ DCHECK(access_state_ == MonitorAccess::Open); -+ return false; -+ } -+} -+ -+Simulator::GlobalMonitor::LinkedAddress::LinkedAddress() -+ : access_state_(MonitorAccess::Open), -+ tagged_addr_(0), -+ next_(nullptr), -+ prev_(nullptr), -+ failure_counter_(0) {} -+ -+void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() { -+ access_state_ = MonitorAccess::Open; -+ tagged_addr_ = 0; -+} -+ -+void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked( -+ uintptr_t addr) { -+ access_state_ = MonitorAccess::RMW; -+ tagged_addr_ = addr; -+} -+ -+void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() { -+ if (access_state_ == MonitorAccess::RMW) { -+ // A non exclusive store could clear the global monitor. As a result, it's -+ // most strict to unconditionally clear global monitors on store. -+ Clear_Locked(); -+ } -+} -+ -+bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked( -+ uintptr_t addr, bool is_requesting_thread) { -+ if (access_state_ == MonitorAccess::RMW) { -+ if (is_requesting_thread) { -+ if (addr == tagged_addr_) { -+ Clear_Locked(); -+ // Introduce occasional sc/scd failures. This is to simulate the -+ // behavior of hardware, which can randomly fail due to background -+ // cache evictions. -+ if (failure_counter_++ >= kMaxFailureCounter) { -+ failure_counter_ = 0; -+ return false; -+ } else { -+ return true; -+ } -+ } -+ } else if ((addr & kExclusiveTaggedAddrMask) == -+ (tagged_addr_ & kExclusiveTaggedAddrMask)) { -+ // Check the masked addresses when responding to a successful lock by -+ // another thread so the implementation is more conservative (i.e. the -+ // granularity of locking is as large as possible.) -+ Clear_Locked(); -+ return false; -+ } -+ } -+ return false; -+} -+ -+void Simulator::GlobalMonitor::NotifyLoadLinked_Locked( -+ uintptr_t addr, LinkedAddress* linked_address) { -+ linked_address->NotifyLoadLinked_Locked(addr); -+ PrependProcessor_Locked(linked_address); -+} -+ -+void Simulator::GlobalMonitor::NotifyStore_Locked( -+ LinkedAddress* linked_address) { -+ // Notify each thread of the store operation. -+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) { -+ iter->NotifyStore_Locked(); -+ } -+} -+ -+bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked( -+ uintptr_t addr, LinkedAddress* linked_address) { -+ DCHECK(IsProcessorInLinkedList_Locked(linked_address)); -+ if (linked_address->NotifyStoreConditional_Locked(addr, true)) { -+ // Notify the other processors that this StoreConditional succeeded. -+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) { -+ if (iter != linked_address) { -+ iter->NotifyStoreConditional_Locked(addr, false); -+ } -+ } -+ return true; -+ } else { -+ return false; -+ } -+} -+ -+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked( -+ LinkedAddress* linked_address) const { -+ return head_ == linked_address || linked_address->next_ || -+ linked_address->prev_; -+} -+ -+void Simulator::GlobalMonitor::PrependProcessor_Locked( -+ LinkedAddress* linked_address) { -+ if (IsProcessorInLinkedList_Locked(linked_address)) { -+ return; -+ } -+ -+ if (head_) { -+ head_->prev_ = linked_address; -+ } -+ linked_address->prev_ = nullptr; -+ linked_address->next_ = head_; -+ head_ = linked_address; -+} -+ -+void Simulator::GlobalMonitor::RemoveLinkedAddress( -+ LinkedAddress* linked_address) { -+ base::MutexGuard lock_guard(&mutex); -+ if (!IsProcessorInLinkedList_Locked(linked_address)) { -+ return; -+ } -+ -+ if (linked_address->prev_) { -+ linked_address->prev_->next_ = linked_address->next_; -+ } else { -+ head_ = linked_address->next_; -+ } -+ if (linked_address->next_) { -+ linked_address->next_->prev_ = linked_address->prev_; -+ } -+ linked_address->prev_ = nullptr; -+ linked_address->next_ = nullptr; -+} -+ -+#undef SScanF -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // USE_SIMULATOR -diff --git a/deps/v8/src/execution/loong64/simulator-loong64.h b/deps/v8/src/execution/loong64/simulator-loong64.h -new file mode 100644 -index 00000000..8b53d67b ---- /dev/null -+++ b/deps/v8/src/execution/loong64/simulator-loong64.h -@@ -0,0 +1,646 @@ -+// Copyright 2020 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+// Declares a Simulator for loongisa instructions if we are not generating a -+// native loongisa binary. This Simulator allows us to run and debug loongisa -+// code generation on regular desktop machines. V8 calls into generated code via -+// the GeneratedCode wrapper, which will start execution in the Simulator or -+// forwards to the real entry on a loongisa HW platform. -+ -+#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_ -+#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_ -+ -+// globals.h defines USE_SIMULATOR. -+#include "src/common/globals.h" -+ -+template -+int Compare(const T& a, const T& b) { -+ if (a == b) -+ return 0; -+ else if (a < b) -+ return -1; -+ else -+ return 1; -+} -+ -+// Returns the negative absolute value of its argument. -+template ::value>::type> -+T Nabs(T a) { -+ return a < 0 ? a : -a; -+} -+ -+#if defined(USE_SIMULATOR) -+// Running with a simulator. -+ -+#include "src/base/hashmap.h" -+#include "src/codegen/assembler.h" -+#include "src/codegen/loong64/constants-loong64.h" -+#include "src/execution/simulator-base.h" -+#include "src/utils/allocation.h" -+ -+namespace v8 { -+namespace internal { -+ -+// ----------------------------------------------------------------------------- -+// Utility functions -+ -+class CachePage { -+ public: -+ static const int LINE_VALID = 0; -+ static const int LINE_INVALID = 1; -+ -+ static const int kPageShift = 12; -+ static const int kPageSize = 1 << kPageShift; -+ static const int kPageMask = kPageSize - 1; -+ static const int kLineShift = 2; // The cache line is only 4 bytes right now. -+ static const int kLineLength = 1 << kLineShift; -+ static const int kLineMask = kLineLength - 1; -+ -+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); } -+ -+ char* ValidityByte(int offset) { -+ return &validity_map_[offset >> kLineShift]; -+ } -+ -+ char* CachedData(int offset) { return &data_[offset]; } -+ -+ private: -+ char data_[kPageSize]; // The cached data. -+ static const int kValidityMapSize = kPageSize >> kLineShift; -+ char validity_map_[kValidityMapSize]; // One byte per line. -+}; -+ -+class SimInstructionBase : public InstructionBase { -+ public: -+ Type InstructionType() const { return type_; } -+ inline Instruction* instr() const { return instr_; } -+ inline int32_t operand() const { return operand_; } -+ -+ protected: -+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {} -+ explicit SimInstructionBase(Instruction* instr) {} -+ -+ int32_t operand_; -+ Instruction* instr_; -+ Type type_; -+ -+ private: -+ DISALLOW_ASSIGN(SimInstructionBase); -+}; -+ -+class SimInstruction : public InstructionGetters { -+ public: -+ SimInstruction() {} -+ -+ explicit SimInstruction(Instruction* instr) { *this = instr; } -+ -+ SimInstruction& operator=(Instruction* instr) { -+ operand_ = *reinterpret_cast(instr); -+ instr_ = instr; -+ type_ = InstructionBase::InstructionType(); -+ DCHECK(reinterpret_cast(&operand_) == this); -+ return *this; -+ } -+}; -+ -+class Simulator : public SimulatorBase { -+ public: -+ friend class Loong64Debugger; -+ -+ // Registers are declared in order. -+ enum Register { -+ no_reg = -1, -+ zero_reg = 0, -+ ra, -+ gp, -+ sp, -+ a0, -+ a1, -+ a2, -+ a3, -+ a4, -+ a5, -+ a6, -+ a7, -+ t0, -+ t1, -+ t2, -+ t3, -+ t4, -+ t5, -+ t6, -+ t7, -+ t8, -+ tp, -+ fp, -+ s0, -+ s1, -+ s2, -+ s3, -+ s4, -+ s5, -+ s6, -+ s7, -+ s8, -+ pc, // pc must be the last register. -+ kNumSimuRegisters, -+ // aliases -+ v0 = a0, -+ v1 = a1 -+ }; -+ -+ // Condition flag registers. -+ enum CFRegister { -+ fcc0, -+ fcc1, -+ fcc2, -+ fcc3, -+ fcc4, -+ fcc5, -+ fcc6, -+ fcc7, -+ kNumCFRegisters -+ }; -+ -+ // Floating point registers. -+ enum FPURegister { -+ f0, -+ f1, -+ f2, -+ f3, -+ f4, -+ f5, -+ f6, -+ f7, -+ f8, -+ f9, -+ f10, -+ f11, -+ f12, -+ f13, -+ f14, -+ f15, -+ f16, -+ f17, -+ f18, -+ f19, -+ f20, -+ f21, -+ f22, -+ f23, -+ f24, -+ f25, -+ f26, -+ f27, -+ f28, -+ f29, -+ f30, -+ f31, -+ kNumFPURegisters -+ }; -+ -+ explicit Simulator(Isolate* isolate); -+ ~Simulator(); -+ -+ // The currently executing Simulator instance. Potentially there can be one -+ // for each native thread. -+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate); -+ -+ // Accessors for register state. Reading the pc value adheres to the LOONG64 -+ // architecture specification and is off by a 8 from the currently executing -+ // instruction. -+ void set_register(int reg, int64_t value); -+ void set_register_word(int reg, int32_t value); -+ void set_dw_register(int dreg, const int* dbl); -+ int64_t get_register(int reg) const; -+ double get_double_from_register_pair(int reg); -+ // Same for FPURegisters. -+ void set_fpu_register(int fpureg, int64_t value); -+ void set_fpu_register_word(int fpureg, int32_t value); -+ void set_fpu_register_hi_word(int fpureg, int32_t value); -+ void set_fpu_register_float(int fpureg, float value); -+ void set_fpu_register_double(int fpureg, double value); -+ void set_fpu_register_invalid_result64(float original, float rounded); -+ void set_fpu_register_invalid_result(float original, float rounded); -+ void set_fpu_register_word_invalid_result(float original, float rounded); -+ void set_fpu_register_invalid_result64(double original, double rounded); -+ void set_fpu_register_invalid_result(double original, double rounded); -+ void set_fpu_register_word_invalid_result(double original, double rounded); -+ int64_t get_fpu_register(int fpureg) const; -+ int32_t get_fpu_register_word(int fpureg) const; -+ int32_t get_fpu_register_signed_word(int fpureg) const; -+ int32_t get_fpu_register_hi_word(int fpureg) const; -+ float get_fpu_register_float(int fpureg) const; -+ double get_fpu_register_double(int fpureg) const; -+ void set_cf_register(int cfreg, bool value); -+ bool get_cf_register(int cfreg) const; -+ void set_fcsr_rounding_mode(FPURoundingMode mode); -+ unsigned int get_fcsr_rounding_mode(); -+ void set_fcsr_bit(uint32_t cc, bool value); -+ bool test_fcsr_bit(uint32_t cc); -+ bool set_fcsr_round_error(double original, double rounded); -+ bool set_fcsr_round64_error(double original, double rounded); -+ bool set_fcsr_round_error(float original, float rounded); -+ bool set_fcsr_round64_error(float original, float rounded); -+ void round_according_to_fcsr(double toRound, double* rounded, -+ int32_t* rounded_int); -+ void round64_according_to_fcsr(double toRound, double* rounded, -+ int64_t* rounded_int); -+ void round_according_to_fcsr(float toRound, float* rounded, -+ int32_t* rounded_int); -+ void round64_according_to_fcsr(float toRound, float* rounded, -+ int64_t* rounded_int); -+ // Special case of set_register and get_register to access the raw PC value. -+ void set_pc(int64_t value); -+ int64_t get_pc() const; -+ -+ Address get_sp() const { return static_cast
(get_register(sp)); } -+ -+ // Accessor to the internal simulator stack area. -+ uintptr_t StackLimit(uintptr_t c_limit) const; -+ -+ // Executes LOONG64 instructions until the PC reaches end_sim_pc. -+ void Execute(); -+ -+ template -+ Return Call(Address entry, Args... args) { -+ return VariadicCall(this, &Simulator::CallImpl, entry, args...); -+ } -+ -+ // Alternative: call a 2-argument double function. -+ double CallFP(Address entry, double d0, double d1); -+ -+ // Push an address onto the JS stack. -+ uintptr_t PushAddress(uintptr_t address); -+ -+ // Pop an address from the JS stack. -+ uintptr_t PopAddress(); -+ -+ // Debugger input. -+ void set_last_debugger_input(char* input); -+ char* last_debugger_input() { return last_debugger_input_; } -+ -+ // Redirection support. -+ static void SetRedirectInstruction(Instruction* instruction); -+ -+ // ICache checking. -+ static bool ICacheMatch(void* one, void* two); -+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start, -+ size_t size); -+ -+ // Returns true if pc register contains one of the 'special_values' defined -+ // below (bad_ra, end_sim_pc). -+ bool has_bad_pc() const; -+ -+ private: -+ enum special_values { -+ // Known bad pc value to ensure that the simulator does not execute -+ // without being properly setup. -+ bad_ra = -1, -+ // A pc value used to signal the simulator to stop execution. Generally -+ // the ra is set to this value on transition from native C code to -+ // simulated execution, so that the simulator can "return" to the native -+ // C code. -+ end_sim_pc = -2, -+ // Unpredictable value. -+ Unpredictable = 0xbadbeaf -+ }; -+ -+ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count, -+ const intptr_t* arguments); -+ -+ // Unsupported instructions use Format to print an error and stop execution. -+ void Format(Instruction* instr, const char* format); -+ -+ // Helpers for data value tracing. -+ enum TraceType { -+ BYTE, -+ HALF, -+ WORD, -+ DWORD, -+ FLOAT, -+ DOUBLE, -+ FLOAT_DOUBLE, -+ WORD_DWORD -+ }; -+ -+ // Read and write memory. -+ inline uint32_t ReadBU(int64_t addr); -+ inline int32_t ReadB(int64_t addr); -+ inline void WriteB(int64_t addr, uint8_t value); -+ inline void WriteB(int64_t addr, int8_t value); -+ -+ inline uint16_t ReadHU(int64_t addr, Instruction* instr); -+ inline int16_t ReadH(int64_t addr, Instruction* instr); -+ // Note: Overloaded on the sign of the value. -+ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr); -+ inline void WriteH(int64_t addr, int16_t value, Instruction* instr); -+ -+ inline uint32_t ReadWU(int64_t addr, Instruction* instr); -+ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD); -+ inline void WriteW(int64_t addr, int32_t value, Instruction* instr); -+ void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr, -+ int32_t rt_reg); -+ inline int64_t Read2W(int64_t addr, Instruction* instr); -+ inline void Write2W(int64_t addr, int64_t value, Instruction* instr); -+ inline void WriteConditional2W(int64_t addr, int64_t value, -+ Instruction* instr, int32_t rt_reg); -+ -+ inline double ReadD(int64_t addr, Instruction* instr); -+ inline void WriteD(int64_t addr, double value, Instruction* instr); -+ -+ template -+ T ReadMem(int64_t addr, Instruction* instr); -+ template -+ void WriteMem(int64_t addr, T value, Instruction* instr); -+ -+ // Helper for debugging memory access. -+ inline void DieOrDebug(); -+ -+ void TraceRegWr(int64_t value, TraceType t = DWORD); -+ void TraceMemWr(int64_t addr, int64_t value, TraceType t); -+ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD); -+ template -+ void TraceMemRd(int64_t addr, T value); -+ template -+ void TraceMemWr(int64_t addr, T value); -+ -+ SimInstruction instr_; -+ -+ // Executing is handled based on the instruction type. -+ void DecodeTypeOp6(); -+ void DecodeTypeOp7(); -+ void DecodeTypeOp8(); -+ void DecodeTypeOp10(); -+ void DecodeTypeOp12(); -+ void DecodeTypeOp14(); -+ void DecodeTypeOp17(); -+ void DecodeTypeOp22(); -+ -+ inline int32_t rj_reg() const { return instr_.RjValue(); } -+ inline int64_t rj() const { return get_register(rj_reg()); } -+ inline uint64_t rj_u() const { -+ return static_cast(get_register(rj_reg())); -+ } -+ inline int32_t rk_reg() const { return instr_.RkValue(); } -+ inline int64_t rk() const { return get_register(rk_reg()); } -+ inline uint64_t rk_u() const { -+ return static_cast(get_register(rk_reg())); -+ } -+ inline int32_t rd_reg() const { return instr_.RdValue(); } -+ inline int64_t rd() const { return get_register(rd_reg()); } -+ inline uint64_t rd_u() const { -+ return static_cast(get_register(rd_reg())); -+ } -+ inline int32_t fa_reg() const { return instr_.FaValue(); } -+ inline float fa_float() const { return get_fpu_register_float(fa_reg()); } -+ inline double fa_double() const { return get_fpu_register_double(fa_reg()); } -+ inline int32_t fj_reg() const { return instr_.FjValue(); } -+ inline float fj_float() const { return get_fpu_register_float(fj_reg()); } -+ inline double fj_double() const { return get_fpu_register_double(fj_reg()); } -+ inline int32_t fk_reg() const { return instr_.FkValue(); } -+ inline float fk_float() const { return get_fpu_register_float(fk_reg()); } -+ inline double fk_double() const { return get_fpu_register_double(fk_reg()); } -+ inline int32_t fd_reg() const { return instr_.FdValue(); } -+ inline float fd_float() const { return get_fpu_register_float(fd_reg()); } -+ inline double fd_double() const { return get_fpu_register_double(fd_reg()); } -+ inline int32_t cj_reg() const { return instr_.CjValue(); } -+ inline bool cj() const { return get_cf_register(cj_reg()); } -+ inline int32_t cd_reg() const { return instr_.CdValue(); } -+ inline bool cd() const { return get_cf_register(cd_reg()); } -+ inline int32_t ca_reg() const { return instr_.CaValue(); } -+ inline bool ca() const { return get_cf_register(ca_reg()); } -+ inline uint32_t sa2() const { return instr_.Sa2Value(); } -+ inline uint32_t sa3() const { return instr_.Sa3Value(); } -+ inline uint32_t ui5() const { return instr_.Ui5Value(); } -+ inline uint32_t ui6() const { return instr_.Ui6Value(); } -+ inline uint32_t lsbw() const { return instr_.LsbwValue(); } -+ inline uint32_t msbw() const { return instr_.MsbwValue(); } -+ inline uint32_t lsbd() const { return instr_.LsbdValue(); } -+ inline uint32_t msbd() const { return instr_.MsbdValue(); } -+ inline uint32_t cond() const { return instr_.CondValue(); } -+ inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; } -+ inline uint32_t ui12() const { return instr_.Ui12Value(); } -+ inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; } -+ inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; } -+ inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; } -+ -+ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) { -+ set_register(rd_reg, alu_out); -+ TraceRegWr(alu_out); -+ } -+ -+ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) { -+ set_fpu_register_word(fd_reg, alu_out); -+ TraceRegWr(get_fpu_register(fd_reg), WORD); -+ } -+ -+ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) { -+ set_fpu_register_word(fd_reg, alu_out); -+ TraceRegWr(get_fpu_register(fd_reg)); -+ } -+ -+ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) { -+ set_fpu_register(fd_reg, alu_out); -+ TraceRegWr(get_fpu_register(fd_reg)); -+ } -+ -+ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) { -+ set_fpu_register(fd_reg, alu_out); -+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); -+ } -+ -+ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) { -+ set_fpu_register_float(fd_reg, alu_out); -+ TraceRegWr(get_fpu_register(fd_reg), FLOAT); -+ } -+ -+ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) { -+ set_fpu_register_double(fd_reg, alu_out); -+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); -+ } -+ -+ // Used for breakpoints. -+ void SoftwareInterrupt(); -+ -+ // Stop helper functions. -+ bool IsWatchpoint(uint64_t code); -+ void PrintWatchpoint(uint64_t code); -+ void HandleStop(uint64_t code, Instruction* instr); -+ bool IsStopInstruction(Instruction* instr); -+ bool IsEnabledStop(uint64_t code); -+ void EnableStop(uint64_t code); -+ void DisableStop(uint64_t code); -+ void IncreaseStopCounter(uint64_t code); -+ void PrintStopInfo(uint64_t code); -+ -+ // Executes one instruction. -+ void InstructionDecode(Instruction* instr); -+ // Execute one instruction placed in a branch delay slot. -+ -+ // ICache. -+ static void CheckICache(base::CustomMatcherHashMap* i_cache, -+ Instruction* instr); -+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start, -+ size_t size); -+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache, -+ void* page); -+ -+ enum Exception { -+ none, -+ kIntegerOverflow, -+ kIntegerUnderflow, -+ kDivideByZero, -+ kNumExceptions -+ }; -+ -+ // Exceptions. -+ void SignalException(Exception e); -+ -+ // Handle arguments and return value for runtime FP functions. -+ void GetFpArgs(double* x, double* y, int32_t* z); -+ void SetFpResult(const double& result); -+ -+ void CallInternal(Address entry); -+ -+ // Architecture state. -+ // Registers. -+ int64_t registers_[kNumSimuRegisters]; -+ // Floating point Registers. -+ int64_t FPUregisters_[kNumFPURegisters]; -+ // Condition flags Registers. -+ bool CFregisters_[kNumCFRegisters]; -+ // FPU control register. -+ uint32_t FCSR_; -+ -+ // Simulator support. -+ // Allocate 1MB for stack. -+ size_t stack_size_; -+ char* stack_; -+ bool pc_modified_; -+ int64_t icount_; -+ int break_count_; -+ EmbeddedVector trace_buf_; -+ -+ // Debugger input. -+ char* last_debugger_input_; -+ -+ v8::internal::Isolate* isolate_; -+ -+ // Registered breakpoints. -+ Instruction* break_pc_; -+ Instr break_instr_; -+ -+ // Stop is disabled if bit 31 is set. -+ static const uint32_t kStopDisabledBit = 1 << 31; -+ -+ // A stop is enabled, meaning the simulator will stop when meeting the -+ // instruction, if bit 31 of watched_stops_[code].count is unset. -+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times -+ // the breakpoint was hit or gone through. -+ struct StopCountAndDesc { -+ uint32_t count; -+ char* desc; -+ }; -+ StopCountAndDesc watched_stops_[kMaxStopCode + 1]; -+ -+ // Synchronization primitives. -+ enum class MonitorAccess { -+ Open, -+ RMW, -+ }; -+ -+ enum class TransactionSize { -+ None = 0, -+ Word = 4, -+ DoubleWord = 8, -+ }; -+ -+ // The least-significant bits of the address are ignored. The number of bits -+ // is implementation-defined, between 3 and minimum page size. -+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1); -+ -+ class LocalMonitor { -+ public: -+ LocalMonitor(); -+ -+ // These functions manage the state machine for the local monitor, but do -+ // not actually perform loads and stores. NotifyStoreConditional only -+ // returns true if the store conditional is allowed; the global monitor will -+ // still have to be checked to see whether the memory should be updated. -+ void NotifyLoad(); -+ void NotifyLoadLinked(uintptr_t addr, TransactionSize size); -+ void NotifyStore(); -+ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size); -+ -+ private: -+ void Clear(); -+ -+ MonitorAccess access_state_; -+ uintptr_t tagged_addr_; -+ TransactionSize size_; -+ }; -+ -+ class GlobalMonitor { -+ public: -+ class LinkedAddress { -+ public: -+ LinkedAddress(); -+ -+ private: -+ friend class GlobalMonitor; -+ // These functions manage the state machine for the global monitor, but do -+ // not actually perform loads and stores. -+ void Clear_Locked(); -+ void NotifyLoadLinked_Locked(uintptr_t addr); -+ void NotifyStore_Locked(); -+ bool NotifyStoreConditional_Locked(uintptr_t addr, -+ bool is_requesting_thread); -+ -+ MonitorAccess access_state_; -+ uintptr_t tagged_addr_; -+ LinkedAddress* next_; -+ LinkedAddress* prev_; -+ // A scd can fail due to background cache evictions. Rather than -+ // simulating this, we'll just occasionally introduce cases where an -+ // store conditional fails. This will happen once after every -+ // kMaxFailureCounter exclusive stores. -+ static const int kMaxFailureCounter = 5; -+ int failure_counter_; -+ }; -+ -+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*. -+ base::Mutex mutex; -+ -+ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address); -+ void NotifyStore_Locked(LinkedAddress* linked_address); -+ bool NotifyStoreConditional_Locked(uintptr_t addr, -+ LinkedAddress* linked_address); -+ -+ // Called when the simulator is destroyed. -+ void RemoveLinkedAddress(LinkedAddress* linked_address); -+ -+ static GlobalMonitor* Get(); -+ -+ private: -+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton. -+ GlobalMonitor() = default; -+ friend class base::LeakyObject; -+ -+ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const; -+ void PrependProcessor_Locked(LinkedAddress* linked_address); -+ -+ LinkedAddress* head_ = nullptr; -+ }; -+ -+ LocalMonitor local_monitor_; -+ GlobalMonitor::LinkedAddress global_monitor_thread_; -+}; -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // defined(USE_SIMULATOR) -+#endif // V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_ -diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc -index 72f28363..98c50263 100644 ---- a/deps/v8/src/execution/mips64/simulator-mips64.cc -+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc -@@ -28,6 +28,8 @@ namespace internal { - DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor, - Simulator::GlobalMonitor::Get) - -+// #define PRINT_SIM_LOG -+ - // Util functions. - inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); } - -@@ -57,6 +59,17 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) { - return u1 * v1 + w2 + (w1 >> 32); - } - -+#ifdef PRINT_SIM_LOG -+inline void printf_instr(const char* _Format, ...) { -+ va_list varList; -+ va_start(varList, _Format); -+ vprintf(_Format, varList); -+ va_end(varList); -+} -+#else -+#define printf_instr(...) -+#endif -+ - // This macro provides a platform independent use of sscanf. The reason for - // SScanF not being implemented in a platform independent was through - // ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time -@@ -2195,6 +2208,7 @@ void Simulator::SoftwareInterrupt() { - uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1; - // We first check if we met a call_rt_redirected. - if (instr_.InstructionBits() == rtCallRedirInstr) { -+ printf_instr("Simulator::SoftwareInterrupt: BREAK 0xFFFFF\n"); - Redirection* redirection = Redirection::FromInstruction(instr_.instr()); - - int64_t* stack_pointer = reinterpret_cast(get_register(sp)); -@@ -2723,6 +2737,9 @@ void Simulator::DecodeTypeRegisterSRsType() { - KeepSign::yes, fs)); - break; - case SQRT_S: -+ printf_instr("sqrt_s\t %s: %016f, %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd, -+ FPURegisters::Name(fs_reg()), fs); - SetFPUFloatResult( - fd_reg(), - FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs)); -@@ -3115,6 +3132,10 @@ void Simulator::DecodeTypeRegisterDRsType() { - [](double lhs, double rhs) { return lhs + rhs; }, fs, ft)); - break; - case SUB_D: -+ printf_instr("sub_d\t %s: %016f, %s: %016f, %s: %016f\n", -+ FPURegisters::Name(fd_reg()), fd, -+ FPURegisters::Name(fs_reg()), fs, -+ FPURegisters::Name(ft_reg()), ft); - SetFPUDoubleResult( - fd_reg(), - FPUCanonalizeOperation( -@@ -3381,6 +3402,10 @@ void Simulator::DecodeTypeRegisterWRsType() { - int64_t alu_out = 0x12345678; - switch (instr_.FunctionFieldRaw()) { - case CVT_S_W: // Convert word to float (single). -+ printf_instr( -+ "CVT_S_W \t %s: %016f, %s: %016x\n", FPURegisters::Name(fd_reg()), -+ get_fpu_register_float(fd_reg()), FPURegisters::Name(fs_reg()), -+ get_fpu_register_signed_word(fs_reg())); - alu_out = get_fpu_register_signed_word(fs_reg()); - SetFPUFloatResult(fd_reg(), static_cast(alu_out)); - break; -@@ -3476,6 +3501,10 @@ void Simulator::DecodeTypeRegisterLRsType() { - SetFPUDoubleResult(fd_reg(), static_cast(i64)); - break; - case CVT_S_L: -+ printf_instr("CVT_S_L \t %s: %016f, %s: %016x\n", -+ FPURegisters::Name(fd_reg()), -+ get_fpu_register_float(fd_reg()), -+ FPURegisters::Name(fs_reg()), get_fpu_register(fs_reg())); - i64 = get_fpu_register(fs_reg()); - SetFPUFloatResult(fd_reg(), static_cast(i64)); - break; -@@ -3569,11 +3598,17 @@ void Simulator::DecodeTypeRegisterCOP1() { - SetResult(rt_reg(), FCSR_); - break; - case MFC1: -+ printf_instr("MFC1 \t %s: %016lx, %s: %016f\n", Registers::Name(rt_reg()), -+ rt(), FPURegisters::Name(fs_reg()), -+ get_fpu_register_float(fs_reg())); - set_register(rt_reg(), - static_cast(get_fpu_register_word(fs_reg()))); - TraceRegWr(get_register(rt_reg()), WORD_DWORD); - break; - case DMFC1: -+ printf_instr( -+ "DMFC1 \t %s: %016lx, %s: %016f\n", Registers::Name(rt_reg()), rt(), -+ FPURegisters::Name(fs_reg()), get_fpu_register_double(fs_reg())); - SetResult(rt_reg(), get_fpu_register(fs_reg())); - break; - case MFHC1: -@@ -3593,12 +3628,18 @@ void Simulator::DecodeTypeRegisterCOP1() { - break; - } - case MTC1: -+ printf_instr( -+ "MTC1 \t %s: %016f, %s: %016lx\n", FPURegisters::Name(fs_reg()), -+ get_fpu_register_float(fs_reg()), Registers::Name(rt_reg()), rt()); - // Hardware writes upper 32-bits to zero on mtc1. - set_fpu_register_hi_word(fs_reg(), 0); - set_fpu_register_word(fs_reg(), static_cast(rt())); - TraceRegWr(get_fpu_register(fs_reg()), FLOAT_DOUBLE); - break; - case DMTC1: -+ printf_instr( -+ "DMTC1 \t %s: %016f, %s: %016lx\n", FPURegisters::Name(fs_reg()), -+ get_fpu_register_float(fs_reg()), Registers::Name(rt_reg()), rt()); - SetFPUResult2(fs_reg(), rt()); - break; - case MTHC1: -@@ -3683,6 +3724,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - case JR: { - int64_t next_pc = rs(); - int64_t current_pc = get_pc(); -+ printf_instr("JALR\t %s: %016lx\n", Registers::Name(rs_reg()), rs()); - Instruction* branch_delay_instr = - reinterpret_cast(current_pc + kInstrSize); - BranchDelayInstructionDecode(branch_delay_instr); -@@ -3694,6 +3736,8 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - int64_t next_pc = rs(); - int64_t current_pc = get_pc(); - int32_t return_addr_reg = rd_reg(); -+ printf_instr("JALR\t %s: %016lx, %s: %016lx\n", Registers::Name(rd_reg()), -+ get_register(rd_reg()), Registers::Name(rs_reg()), rs()); - Instruction* branch_delay_instr = - reinterpret_cast(current_pc + kInstrSize); - BranchDelayInstructionDecode(branch_delay_instr); -@@ -3703,21 +3747,36 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - break; - } - case SLL: -+ printf_instr("SLL\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - SetResult(rd_reg(), static_cast(rt()) << sa()); - break; - case DSLL: -+ printf_instr("DSLL\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - SetResult(rd_reg(), rt() << sa()); - break; - case DSLL32: -+ printf_instr("DSLL32\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - SetResult(rd_reg(), rt() << sa() << 32); - break; - case SRL: - if (rs_reg() == 0) { -+ printf_instr("SRL\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - // Regular logical right shift of a word by a fixed number of - // bits instruction. RS field is always equal to 0. - // Sign-extend the 32-bit result. - alu_out = static_cast(static_cast(rt_u()) >> sa()); - } else if (rs_reg() == 1) { -+ printf_instr("ROTR\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - // Logical right-rotate of a word by a fixed number of bits. This - // is special case of SRL instruction, added in MIPS32 Release 2. - // RS field is equal to 00001. -@@ -3731,11 +3790,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - break; - case DSRL: - if (rs_reg() == 0) { -+ printf_instr("DSRL\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - // Regular logical right shift of a word by a fixed number of - // bits instruction. RS field is always equal to 0. - // Sign-extend the 64-bit result. - alu_out = static_cast(rt_u() >> sa()); - } else if (rs_reg() == 1) { -+ printf_instr("DROTR\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - // Logical right-rotate of a word by a fixed number of bits. This - // is special case of SRL instruction, added in MIPS32 Release 2. - // RS field is equal to 00001. -@@ -3747,11 +3812,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - break; - case DSRL32: - if (rs_reg() == 0) { -+ printf_instr("DSRL32\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - // Regular logical right shift of a word by a fixed number of - // bits instruction. RS field is always equal to 0. - // Sign-extend the 64-bit result. - alu_out = static_cast(rt_u() >> sa() >> 32); - } else if (rs_reg() == 1) { -+ printf_instr("DROTR32\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - // Logical right-rotate of a word by a fixed number of bits. This - // is special case of SRL instruction, added in MIPS32 Release 2. - // RS field is equal to 00001. -@@ -3763,26 +3834,51 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - SetResult(rd_reg(), alu_out); - break; - case SRA: -+ printf_instr("SRA\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - SetResult(rd_reg(), (int32_t)rt() >> sa()); - break; - case DSRA: -+ printf_instr("DSRA\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - SetResult(rd_reg(), rt() >> sa()); - break; - case DSRA32: -+ printf_instr("DSRA32\t %s: %016lx, %s: %016lx, sa: %02x\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), sa()); - SetResult(rd_reg(), rt() >> sa() >> 32); - break; - case SLLV: -+ printf_instr("SLLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - SetResult(rd_reg(), (int32_t)rt() << rs()); - break; - case DSLLV: -+ printf_instr("DSLLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - SetResult(rd_reg(), rt() << rs()); - break; - case SRLV: - if (sa() == 0) { -+ printf_instr("SRLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - // Regular logical right-shift of a word by a variable number of - // bits instruction. SA field is always equal to 0. - alu_out = static_cast((uint32_t)rt_u() >> rs()); - } else { -+ printf_instr("ROTRV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - // Logical right-rotate of a word by a variable number of bits. - // This is special case od SRLV instruction, added in MIPS32 - // Release 2. SA field is equal to 00001. -@@ -3794,10 +3890,18 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - break; - case DSRLV: - if (sa() == 0) { -+ printf_instr("SRLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - // Regular logical right-shift of a word by a variable number of - // bits instruction. SA field is always equal to 0. - alu_out = static_cast(rt_u() >> rs()); - } else { -+ printf_instr("DROTRV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - // Logical right-rotate of a word by a variable number of bits. - // This is special case od SRLV instruction, added in MIPS32 - // Release 2. SA field is equal to 00001. -@@ -3807,9 +3911,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - SetResult(rd_reg(), alu_out); - break; - case SRAV: -+ printf_instr("SRAV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - SetResult(rd_reg(), (int32_t)rt() >> rs()); - break; - case DSRAV: -+ printf_instr("DSRAV\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), -+ rs()); - SetResult(rd_reg(), rt() >> rs()); - break; - case LSA: { -@@ -4018,6 +4130,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - break; - case ADD: - case DADD: -+ printf_instr("DADD\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - if (HaveSameSign(rs(), rt())) { - if (rs() > 0) { - if (rs() > (Registers::kMaxValue - rt())) { -@@ -4032,16 +4148,28 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - SetResult(rd_reg(), rs() + rt()); - break; - case ADDU: { -+ printf_instr("ADDU\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - int32_t alu32_out = static_cast(rs() + rt()); - // Sign-extend result of 32bit operation into 64bit register. - SetResult(rd_reg(), static_cast(alu32_out)); - break; - } - case DADDU: -+ printf_instr("DADDU\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), rs() + rt()); - break; - case SUB: - case DSUB: -+ printf_instr("DSUB\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - if (!HaveSameSign(rs(), rt())) { - if (rs() > 0) { - if (rs() > (Registers::kMaxValue + rt())) { -@@ -4056,30 +4184,62 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - SetResult(rd_reg(), rs() - rt()); - break; - case SUBU: { -+ printf_instr("SUBU\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - int32_t alu32_out = static_cast(rs() - rt()); - // Sign-extend result of 32bit operation into 64bit register. - SetResult(rd_reg(), static_cast(alu32_out)); - break; - } - case DSUBU: -+ printf_instr("DSUBU\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), rs() - rt()); - break; - case AND: -+ printf_instr("AND\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), rs() & rt()); - break; - case OR: -+ printf_instr("OR\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), rs() | rt()); - break; - case XOR: -+ printf_instr("XOR\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), rs() ^ rt()); - break; - case NOR: -+ printf_instr("NOR\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), ~(rs() | rt())); - break; - case SLT: -+ printf_instr("SLT\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), rs() < rt() ? 1 : 0); - break; - case SLTU: -+ printf_instr("SLTU\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - SetResult(rd_reg(), rs_u() < rt_u() ? 1 : 0); - break; - // Break and trap instructions. -@@ -4106,9 +4266,14 @@ void Simulator::DecodeTypeRegisterSPECIAL() { - break; - case SYNC: - // TODO(palfia): Ignore sync instruction for now. -+ printf_instr("sync\n"); - break; - // Conditional moves. - case MOVN: -+ printf_instr("MOVN\t %s: %016lx, %s: %016lx, %s: %016lx\n", -+ Registers::Name(rd_reg()), get_register(rd_reg()), -+ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), -+ rt()); - if (rt()) { - SetResult(rd_reg(), rs()); - } -@@ -4173,6 +4338,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() { - // Interpret sa field as 5-bit lsb of extract. - uint16_t lsb = sa(); - uint16_t size = msbd + 1; -+ printf_instr("EXT\t %s: %016lx, %s: %016lx, pos: %d, size: %d\n", -+ Registers::Name(rt_reg()), get_register(rt_reg()), -+ Registers::Name(rs_reg()), rs(), lsb, size); - uint64_t mask = (1ULL << size) - 1; - alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); - SetResult(rt_reg(), alu_out); -@@ -4184,6 +4352,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() { - // Interpret sa field as 5-bit lsb of extract. - uint16_t lsb = sa(); - uint16_t size = msbd + 1; -+ printf_instr("DEXT\t %s: %016lx, %s: %016lx, pos: %d, size: %d\n", -+ Registers::Name(rt_reg()), get_register(rt_reg()), -+ Registers::Name(rs_reg()), rs(), lsb, size); - uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1; - alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); - SetResult(rt_reg(), alu_out); -@@ -6553,6 +6724,7 @@ void Simulator::DecodeTypeImmediate() { - [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) { - execute_branch_delay_instruction = true; - int64_t current_pc = get_pc(); -+ printf_instr("Offs16: %04x\n", instr_.Imm16Value()); - set_register(31, current_pc + 2 * kInstrSize); - if (do_branch) { - int16_t imm16 = instr_.Imm16Value(); -@@ -6565,6 +6737,7 @@ void Simulator::DecodeTypeImmediate() { - auto BranchHelper = [this, &next_pc, - &execute_branch_delay_instruction](bool do_branch) { - execute_branch_delay_instruction = true; -+ printf_instr("Offs16: %04x\n", instr_.Imm16Value()); - int64_t current_pc = get_pc(); - if (do_branch) { - int16_t imm16 = instr_.Imm16Value(); -@@ -6601,6 +6774,7 @@ void Simulator::DecodeTypeImmediate() { - auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) { - int64_t current_pc = get_pc(); - CheckForbiddenSlot(current_pc); -+ printf_instr("Offs: %08x\n", instr_.ImmValue(bits)); - if (do_branch) { - int32_t imm = instr_.ImmValue(bits); - imm <<= 32 - bits; -@@ -6613,6 +6787,7 @@ void Simulator::DecodeTypeImmediate() { - auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) { - int64_t current_pc = get_pc(); - CheckForbiddenSlot(current_pc); -+ printf_instr("Offs: %08x\n", instr_.ImmValue(bits)); - if (do_branch) { - int32_t imm = instr_.ImmValue(bits); - imm <<= 32 - bits; -@@ -6707,15 +6882,19 @@ void Simulator::DecodeTypeImmediate() { - case REGIMM: - switch (instr_.RtFieldRaw()) { - case BLTZ: -+ printf_instr("BLTZ\t %s: %016lx, ", Registers::Name(rs_reg), rs); - BranchHelper(rs < 0); - break; - case BGEZ: -+ printf_instr("BGEZ\t %s: %016lx, ", Registers::Name(rs_reg), rs); - BranchHelper(rs >= 0); - break; - case BLTZAL: -+ printf_instr("BLTZAL\t %s: %016lx, ", Registers::Name(rs_reg), rs); - BranchAndLinkHelper(rs < 0); - break; - case BGEZAL: -+ printf_instr("BGEZAL\t %s: %016lx, ", Registers::Name(rs_reg), rs); - BranchAndLinkHelper(rs >= 0); - break; - case DAHI: -@@ -6732,9 +6911,13 @@ void Simulator::DecodeTypeImmediate() { - // When comparing to zero, the encoding of rt field is always 0, so we don't - // need to replace rt with zero. - case BEQ: -+ printf_instr("BEQ\t %s: %016lx, %s: %016lx, ", Registers::Name(rs_reg), -+ rs, Registers::Name(rt_reg), rt); - BranchHelper(rs == rt); - break; - case BNE: -+ printf_instr("BNE\t %s: %016lx, %s: %016lx, ", Registers::Name(rs_reg), -+ rs, Registers::Name(rt_reg), rt); - BranchHelper(rs != rt); - break; - case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6) -@@ -6754,6 +6937,7 @@ void Simulator::DecodeTypeImmediate() { - BranchHelper(rs <= 0); - } - } else { // BLEZ -+ printf_instr("BLEZ\t %s: %016lx", Registers::Name(rs_reg), rs); - BranchHelper(rs <= 0); - } - break; -@@ -6774,6 +6958,7 @@ void Simulator::DecodeTypeImmediate() { - BranchHelper(rs > 0); - } - } else { // BGTZ -+ printf_instr("BGTZ\t %s: %016lx", Registers::Name(rs_reg), rs); - BranchHelper(rs > 0); - } - break; -@@ -6791,6 +6976,7 @@ void Simulator::DecodeTypeImmediate() { - } - } - } else { // BLEZL -+ printf_instr("BLEZL\t %s: %016lx", Registers::Name(rs_reg), rs); - BranchAndLinkHelper(rs <= 0); - } - break; -@@ -6808,6 +6994,7 @@ void Simulator::DecodeTypeImmediate() { - } - } - } else { // BGTZL -+ printf_instr("BGTZL\t %s: %016lx", Registers::Name(rs_reg), rs); - BranchAndLinkHelper(rs > 0); - } - break; -@@ -6846,6 +7033,9 @@ void Simulator::DecodeTypeImmediate() { - } - } - } else { // ADDI -+ printf_instr("ADDI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - if (HaveSameSign(rs, se_imm16)) { - if (rs > 0) { - if (rs <= Registers::kMaxValue - se_imm16) { -@@ -6876,27 +7066,48 @@ void Simulator::DecodeTypeImmediate() { - break; - // ------------- Arithmetic instructions. - case ADDIU: { -+ printf_instr("ADDIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - int32_t alu32_out = static_cast(rs + se_imm16); - // Sign-extend result of 32bit operation into 64bit register. - SetResult(rt_reg, static_cast(alu32_out)); - break; - } - case DADDIU: -+ printf_instr("DADDIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - SetResult(rt_reg, rs + se_imm16); - break; - case SLTI: -+ printf_instr("SLTI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - SetResult(rt_reg, rs < se_imm16 ? 1 : 0); - break; - case SLTIU: -+ printf_instr("SLTIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - SetResult(rt_reg, rs_u < static_cast(se_imm16) ? 1 : 0); - break; - case ANDI: -+ printf_instr("ANDI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ oe_imm16); - SetResult(rt_reg, rs & oe_imm16); - break; - case ORI: -+ printf_instr("ORI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ oe_imm16); - SetResult(rt_reg, rs | oe_imm16); - break; - case XORI: -+ printf_instr("XORI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ oe_imm16); - SetResult(rt_reg, rs ^ oe_imm16); - break; - case LUI: -@@ -6907,6 +7118,8 @@ void Simulator::DecodeTypeImmediate() { - SetResult(rt_reg, static_cast(alu32_out)); - } else { - // LUI instruction. -+ printf_instr("LUI\t %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, se_imm16); - int32_t alu32_out = static_cast(oe_imm16 << 16); - // Sign-extend result of 32bit operation into 64bit register. - SetResult(rt_reg, static_cast(alu32_out)); -@@ -6919,12 +7132,21 @@ void Simulator::DecodeTypeImmediate() { - break; - // ------------- Memory instructions. - case LB: -+ printf_instr("LB\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - set_register(rt_reg, ReadB(rs + se_imm16)); - break; - case LH: -+ printf_instr("LH\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr())); - break; - case LWL: { -+ printf_instr("LWL\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - local_monitor_.NotifyLoad(); - // al_offset is offset of the effective address within an aligned word. - uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; -@@ -6938,21 +7160,39 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case LW: -+ printf_instr("LW\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr())); - break; - case LWU: -+ printf_instr("LWU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - set_register(rt_reg, ReadWU(rs + se_imm16, instr_.instr())); - break; - case LD: -+ printf_instr("LD\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - set_register(rt_reg, Read2W(rs + se_imm16, instr_.instr())); - break; - case LBU: -+ printf_instr("LBU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - set_register(rt_reg, ReadBU(rs + se_imm16)); - break; - case LHU: -+ printf_instr("LHU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr())); - break; - case LWR: { -+ printf_instr("LWR\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - // al_offset is offset of the effective address within an aligned word. - uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; - uint8_t byte_shift = kInt32AlignmentMask - al_offset; -@@ -6965,6 +7205,9 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case LDL: { -+ printf_instr("LDL\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - // al_offset is offset of the effective address within an aligned word. - uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; - uint8_t byte_shift = kInt64AlignmentMask - al_offset; -@@ -6977,6 +7220,9 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case LDR: { -+ printf_instr("LDR\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - // al_offset is offset of the effective address within an aligned word. - uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; - uint8_t byte_shift = kInt64AlignmentMask - al_offset; -@@ -6989,12 +7235,21 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case SB: -+ printf_instr("SB\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - WriteB(rs + se_imm16, static_cast(rt)); - break; - case SH: -+ printf_instr("SH\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - WriteH(rs + se_imm16, static_cast(rt), instr_.instr()); - break; - case SWL: { -+ printf_instr("SWL\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; - uint8_t byte_shift = kInt32AlignmentMask - al_offset; - uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0; -@@ -7005,12 +7260,21 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case SW: -+ printf_instr("SW\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - WriteW(rs + se_imm16, static_cast(rt), instr_.instr()); - break; - case SD: -+ printf_instr("SD\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - Write2W(rs + se_imm16, rt, instr_.instr()); - break; - case SWR: { -+ printf_instr("SWR\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; - uint32_t mask = (1 << al_offset * 8) - 1; - addr = rs + se_imm16 - al_offset; -@@ -7020,6 +7284,9 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case SDL: { -+ printf_instr("SDL\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; - uint8_t byte_shift = kInt64AlignmentMask - al_offset; - uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0; -@@ -7030,6 +7297,9 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case SDR: { -+ printf_instr("SDR\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; - uint64_t mask = (1UL << al_offset * 8) - 1; - addr = rs + se_imm16 - al_offset; -@@ -7055,6 +7325,9 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case LLD: { -+ printf_instr("LLD\t %s: %016lx, %s: %016lx, imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - DCHECK(kArchVariant != kMips64r6); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - addr = rs + se_imm16; -@@ -7065,6 +7338,9 @@ void Simulator::DecodeTypeImmediate() { - break; - } - case SCD: { -+ printf_instr("SCD\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", -+ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, -+ se_imm16); - DCHECK(kArchVariant != kMips64r6); - addr = rs + se_imm16; - WriteConditional2W(addr, rt, instr_.instr(), rt_reg); -@@ -7080,11 +7356,17 @@ void Simulator::DecodeTypeImmediate() { - TraceMemRd(addr, get_fpu_register(ft_reg), DOUBLE); - break; - case SWC1: { -+ printf_instr("SWC1\t %s: %016f, %s: %016lx, imm16: %04lx\n", -+ FPURegisters::Name(ft_reg), get_fpu_register_float(ft_reg), -+ Registers::Name(rs_reg), rs, se_imm16); - int32_t alu_out_32 = static_cast(get_fpu_register(ft_reg)); - WriteW(rs + se_imm16, alu_out_32, instr_.instr()); - break; - } - case SDC1: -+ printf_instr("SDC1\t %s: %016f, %s: %016lx, imm16: %04lx\n", -+ FPURegisters::Name(ft_reg), get_fpu_register_double(ft_reg), -+ Registers::Name(rs_reg), rs, se_imm16); - WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr()); - TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg), DWORD); - break; -@@ -7257,6 +7539,8 @@ void Simulator::DecodeTypeJump() { - int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000; - // Next pc. - int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2); -+ printf_instr("%s\t", simInstr.IsLinkingInstruction() ? "JAL" : "J"); -+ printf_instr("offs26: %x\n", instr_.Bits(25, 0)); - - // Execute branch delay slot. - // We don't check for end_sim_pc. First it should not be met as the current pc -@@ -7291,7 +7575,11 @@ void Simulator::InstructionDecode(Instruction* instr) { - dasm.InstructionDecode(buffer, reinterpret_cast(instr)); - } - -+ static int instr_count = 0; -+ USE(instr_count); - instr_ = instr; -+ printf_instr("\nInstr%3d: %08x, PC: %lx\t", instr_count++, instr_.Bits(31, 0), -+ get_pc()); - switch (instr_.InstructionType()) { - case Instruction::kRegisterType: - DecodeTypeRegister(); -diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h -index 58aa753a..abcc10d9 100644 ---- a/deps/v8/src/execution/simulator-base.h -+++ b/deps/v8/src/execution/simulator-base.h -@@ -87,7 +87,7 @@ class SimulatorBase { - static typename std::enable_if::value, intptr_t>::type - ConvertArg(T arg) { - static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize"); --#if V8_TARGET_ARCH_MIPS64 -+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 - // The MIPS64 calling convention is to sign extend all values, even unsigned - // ones. - using signed_t = typename std::make_signed::type; -diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h -index a4e07b23..6d02114f 100644 ---- a/deps/v8/src/execution/simulator.h -+++ b/deps/v8/src/execution/simulator.h -@@ -24,6 +24,8 @@ - #include "src/execution/mips/simulator-mips.h" - #elif V8_TARGET_ARCH_MIPS64 - #include "src/execution/mips64/simulator-mips64.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/execution/loong64/simulator-loong64.h" - #elif V8_TARGET_ARCH_S390 - #include "src/execution/s390/simulator-s390.h" - #else -diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h -index 30d5f091..618b31f3 100644 ---- a/deps/v8/src/flags/flag-definitions.h -+++ b/deps/v8/src/flags/flag-definitions.h -@@ -1270,7 +1270,7 @@ DEFINE_BOOL(check_icache, false, - "Check icache flushes in ARM and MIPS simulator") - DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions") - #if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \ -- defined(V8_TARGET_ARCH_PPC64) -+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_LOONG64) - DEFINE_INT(sim_stack_alignment, 16, - "Stack alignment in bytes in simulator. This must be a power of two " - "and it must be at least 16. 16 is default.") -diff --git a/deps/v8/src/heap/cppgc/asm/loong64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/loong64/push_registers_asm.cc -new file mode 100644 -index 00000000..c9e6f5d2 ---- /dev/null -+++ b/deps/v8/src/heap/cppgc/asm/loong64/push_registers_asm.cc -@@ -0,0 +1,48 @@ -+// Copyright 2020 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+// Push all callee-saved registers to get them on the stack for conservative -+// stack scanning. -+// -+// See asm/x64/push_registers_clang.cc for why the function is not generated -+// using clang. -+// -+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the -+// GN toolchain (e.g. ChromeOS) and not provide them. -+asm(".text \n" -+ ".global PushAllRegistersAndIterateStack \n" -+ ".type PushAllRegistersAndIterateStack, %function \n" -+ ".hidden PushAllRegistersAndIterateStack \n" -+ "PushAllRegistersAndIterateStack: \n" -+ // Push all callee-saved registers and save return address. -+ " addi.d $sp, $sp, -96 \n" -+ " st.d $ra, $sp, 88 \n" -+ " st.d $s8, $sp, 80 \n" -+ " st.d $sp, $sp, 72 \n" -+ " st.d $fp, $sp, 64 \n" -+ " st.d $s7, $sp, 56 \n" -+ " st.d $s6, $sp, 48 \n" -+ " st.d $s5, $sp, 40 \n" -+ " st.d $s4, $sp, 32 \n" -+ " st.d $s3, $sp, 24 \n" -+ " st.d $s2, $sp, 16 \n" -+ " st.d $s1, $sp, 8 \n" -+ " st.d $s0, $sp, 0 \n" -+ // Maintain frame pointer. -+ " addi.d $s8, $sp, 0 \n" -+ // Pass 1st parameter (a0) unchanged (Stack*). -+ // Pass 2nd parameter (a1) unchanged (StackVisitor*). -+ // Save 3rd parameter (a2; IterateStackCallback). -+ " addi.d $a3, $a2, 0 \n" -+ // Call the callback. -+ // Pass 3rd parameter as sp (stack pointer). -+ " addi.d $a2, $sp, 0 \n" -+ " jirl $ra, $a3, 0 \n" -+ // Load return address. -+ " ld.d $ra, $sp, 88 \n" -+ // Restore frame pointer. -+ " ld.d $s8, $sp, 80 \n" -+ // Discard all callee-saved registers. -+ " addi.d $sp, $sp, 96 \n" -+ " jirl $zero, $ra, 0 \n"); -diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc -index 49adee5b..313ab3f7 100644 ---- a/deps/v8/src/interpreter/interpreter-assembler.cc -+++ b/deps/v8/src/interpreter/interpreter-assembler.cc -@@ -1346,7 +1346,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(TNode target_bytecode) { - - // static - bool InterpreterAssembler::TargetSupportsUnalignedAccess() { --#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 -+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 - return false; - #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \ -diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc -index 0443657d..430e4693 100644 ---- a/deps/v8/src/libsampler/sampler.cc -+++ b/deps/v8/src/libsampler/sampler.cc -@@ -415,6 +415,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) { - state->pc = reinterpret_cast(mcontext.pc); - state->sp = reinterpret_cast(mcontext.gregs[29]); - state->fp = reinterpret_cast(mcontext.gregs[30]); -+#elif V8_HOST_ARCH_LOONG64 -+ state->pc = reinterpret_cast(mcontext.__pc); -+ state->sp = reinterpret_cast(mcontext.__gregs[3]); -+ state->fp = reinterpret_cast(mcontext.__gregs[22]); - #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 - #if V8_LIBC_GLIBC - state->pc = reinterpret_cast(ucontext->uc_mcontext.regs->nip); -diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc -index 00edcc8c..6a3133d8 100644 ---- a/deps/v8/src/logging/log.cc -+++ b/deps/v8/src/logging/log.cc -@@ -587,6 +587,8 @@ void LowLevelLogger::LogCodeInfo() { - const char arch[] = "ppc64"; - #elif V8_TARGET_ARCH_MIPS - const char arch[] = "mips"; -+#elif V8_TARGET_ARCH_LOONG64 -+ const char arch[] = "loong64"; - #elif V8_TARGET_ARCH_ARM64 - const char arch[] = "arm64"; - #elif V8_TARGET_ARCH_S390 -diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc -index bd9f39b7..46d31cf6 100644 ---- a/deps/v8/src/objects/backing-store.cc -+++ b/deps/v8/src/objects/backing-store.cc -@@ -29,7 +29,7 @@ constexpr bool kUseGuardRegions = true; - constexpr bool kUseGuardRegions = false; - #endif - --#if V8_TARGET_ARCH_MIPS64 -+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 - // MIPS64 has a user space of 2^40 bytes on most processors, - // address space limits needs to be smaller. - constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB -diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h -index ea6f52cc..7ba2beff 100644 ---- a/deps/v8/src/objects/code.h -+++ b/deps/v8/src/objects/code.h -@@ -421,6 +421,8 @@ class Code : public HeapObject { - static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28; - #elif V8_TARGET_ARCH_MIPS64 - static constexpr int kHeaderPaddingSize = 28; -+#elif V8_TARGET_ARCH_LOONG64 -+ static constexpr int kHeaderPaddingSize = 28; - #elif V8_TARGET_ARCH_X64 - static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28; - #elif V8_TARGET_ARCH_ARM -diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc -index 00bff91c..a95f3a74 100644 ---- a/deps/v8/src/profiler/tick-sample.cc -+++ b/deps/v8/src/profiler/tick-sample.cc -@@ -104,7 +104,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate, - state->sp = reinterpret_cast(simulator->sp()); - state->fp = reinterpret_cast(simulator->fp()); - state->lr = reinterpret_cast(simulator->lr()); --#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 -+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 - if (!simulator->has_bad_pc()) { - state->pc = reinterpret_cast(simulator->get_pc()); - } -diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc -new file mode 100644 -index 00000000..22b40fde ---- /dev/null -+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc -@@ -0,0 +1,1266 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#if V8_TARGET_ARCH_LOONG64 -+ -+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h" -+ -+#include "src/codegen/assembler-inl.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/logging/log.h" -+#include "src/objects/objects-inl.h" -+#include "src/regexp/regexp-macro-assembler.h" -+#include "src/regexp/regexp-stack.h" -+#include "src/snapshot/embedded/embedded-data.h" -+#include "src/strings/unicode.h" -+ -+namespace v8 { -+namespace internal { -+ -+/* clang-format off -+ * -+ * This assembler uses the following register assignment convention -+ * - t3 : Temporarily stores the index of capture start after a matching pass -+ * for a global regexp. -+ * - a5 : Pointer to current Code object including heap object tag. -+ * - a6 : Current position in input, as negative offset from end of string. -+ * Please notice that this is the byte offset, not the character offset! -+ * - a7 : Currently loaded character. Must be loaded using -+ * LoadCurrentCharacter before using any of the dispatch methods. -+ * - t0 : Points to tip of backtrack stack -+ * - t1 : Unused. -+ * - t2 : End of input (points to byte after last character in input). -+ * - fp : Frame pointer. Used to access arguments, local variables and -+ * RegExp registers. -+ * - sp : Points to tip of C stack. -+ * -+ * The remaining registers are free for computations. -+ * Each call to a public method should retain this convention. -+ * -+ * TODO(plind): O32 documented here with intent of having single 32/64 codebase -+ * in the future. -+ * -+ * The O32 stack will have the following structure: -+ * -+ * - fp[72] Isolate* isolate (address of the current isolate) -+ * - fp[68] direct_call (if 1, direct call from JavaScript code, -+ * if 0, call through the runtime system). -+ * - fp[64] stack_area_base (High end of the memory area to use as -+ * backtracking stack). -+ * - fp[60] capture array size (may fit multiple sets of matches) -+ * - fp[44..59] MIPS O32 four argument slots -+ * - fp[40] int* capture_array (int[num_saved_registers_], for output). -+ * --- sp when called --- -+ * - fp[36] return address (lr). -+ * - fp[32] old frame pointer (r11). -+ * - fp[0..31] backup of registers s0..s7. -+ * --- frame pointer ---- -+ * - fp[-4] end of input (address of end of string). -+ * - fp[-8] start of input (address of first character in string). -+ * - fp[-12] start index (character index of start). -+ * - fp[-16] void* input_string (location of a handle containing the string). -+ * - fp[-20] success counter (only for global regexps to count matches). -+ * - fp[-24] Offset of location before start of input (effectively character -+ * string start - 1). Used to initialize capture registers to a -+ * non-position. -+ * - fp[-28] At start (if 1, we are starting at the start of the -+ * string, otherwise 0) -+ * - fp[-32] register 0 (Only positions must be stored in the first -+ * - register 1 num_saved_registers_ registers) -+ * - ... -+ * - register num_registers-1 -+ * --- sp --- -+ * -+ * -+ * The N64 stack will have the following structure: -+ * -+ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate -+ * kStackFrameHeader -+ * --- sp when called --- -+ * - fp[72] ra Return from RegExp code (ra). kReturnAddress -+ * - fp[64] s9, old-fp Old fp, callee saved(s9). -+ * - fp[0..63] s0..s7 Callee-saved registers s0..s7. -+ * --- frame pointer ---- -+ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall -+ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd -+ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters -+ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput -+ * - fp[-40] end of input (address of end of string). kInputEnd -+ * - fp[-48] start of input (address of first character in string). kInputStart -+ * - fp[-56] start index (character index of start). kStartIndex -+ * - fp[-64] void* input_string (location of a handle containing the string). kInputString -+ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures -+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne -+ * position -1). Used to initialize capture registers to a -+ * non-position. -+ * --------- The following output registers are 32-bit values. --------- -+ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero -+ * - register 1 num_saved_registers_ registers) -+ * - ... -+ * - register num_registers-1 -+ * --- sp --- -+ * -+ * The first num_saved_registers_ registers are initialized to point to -+ * "character -1" in the string (i.e., char_size() bytes before the first -+ * character of the string). The remaining registers start out as garbage. -+ * -+ * The data up to the return address must be placed there by the calling -+ * code and the remaining arguments are passed in registers, e.g. by calling the -+ * code entry as cast to a function with the signature: -+ * int (*match)(String input_string, -+ * int start_index, -+ * Address start, -+ * Address end, -+ * int* capture_output_array, -+ * int num_capture_registers, -+ * byte* stack_area_base, -+ * bool direct_call = false, -+ * Isolate* isolate); -+ * The call is performed by NativeRegExpMacroAssembler::Execute() -+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper. -+ * -+ * clang-format on -+ */ -+ -+#define __ ACCESS_MASM(masm_) -+ -+const int RegExpMacroAssemblerLOONG64::kRegExpCodeSize; -+ -+RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, -+ Mode mode, -+ int registers_to_save) -+ : NativeRegExpMacroAssembler(isolate, zone), -+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes, -+ NewAssemblerBuffer(kRegExpCodeSize))), -+ mode_(mode), -+ num_registers_(registers_to_save), -+ num_saved_registers_(registers_to_save), -+ entry_label_(), -+ start_label_(), -+ success_label_(), -+ backtrack_label_(), -+ exit_label_(), -+ internal_failure_label_() { -+ masm_->set_root_array_available(false); -+ -+ DCHECK_EQ(0, registers_to_save % 2); -+ __ jmp(&entry_label_); // We'll write the entry code later. -+ // If the code gets too big or corrupted, an internal exception will be -+ // raised, and we will exit right away. -+ __ bind(&internal_failure_label_); -+ __ li(a0, Operand(FAILURE)); -+ __ Ret(); -+ __ bind(&start_label_); // And then continue from here. -+} -+ -+RegExpMacroAssemblerLOONG64::~RegExpMacroAssemblerLOONG64() { -+ delete masm_; -+ // Unuse labels in case we throw away the assembler without calling GetCode. -+ entry_label_.Unuse(); -+ start_label_.Unuse(); -+ success_label_.Unuse(); -+ backtrack_label_.Unuse(); -+ exit_label_.Unuse(); -+ check_preempt_label_.Unuse(); -+ stack_overflow_label_.Unuse(); -+ internal_failure_label_.Unuse(); -+} -+ -+int RegExpMacroAssemblerLOONG64::stack_limit_slack() { -+ return RegExpStack::kStackLimitSlack; -+} -+ -+void RegExpMacroAssemblerLOONG64::AdvanceCurrentPosition(int by) { -+ if (by != 0) { -+ __ Add_d(current_input_offset(), current_input_offset(), -+ Operand(by * char_size())); -+ } -+} -+ -+void RegExpMacroAssemblerLOONG64::AdvanceRegister(int reg, int by) { -+ DCHECK_LE(0, reg); -+ DCHECK_GT(num_registers_, reg); -+ if (by != 0) { -+ __ Ld_d(a0, register_location(reg)); -+ __ Add_d(a0, a0, Operand(by)); -+ __ St_d(a0, register_location(reg)); -+ } -+} -+ -+void RegExpMacroAssemblerLOONG64::Backtrack() { -+ CheckPreemption(); -+ if (has_backtrack_limit()) { -+ Label next; -+ __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCount)); -+ __ Add_d(a0, a0, Operand(1)); -+ __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCount)); -+ __ Branch(&next, ne, a0, Operand(backtrack_limit())); -+ -+ // Exceeded limits are treated as a failed match. -+ Fail(); -+ -+ __ bind(&next); -+ } -+ // Pop Code offset from backtrack stack, add Code and jump to location. -+ Pop(a0); -+ __ Add_d(a0, a0, code_pointer()); -+ __ Jump(a0); -+} -+ -+void RegExpMacroAssemblerLOONG64::Bind(Label* label) { __ bind(label); } -+ -+void RegExpMacroAssemblerLOONG64::CheckCharacter(uint32_t c, Label* on_equal) { -+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckCharacterGT(uc16 limit, Label* on_greater) { -+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckAtStart(int cp_offset, Label* on_at_start) { -+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ __ Add_d(a0, current_input_offset(), -+ Operand(-char_size() + cp_offset * char_size())); -+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckNotAtStart(int cp_offset, -+ Label* on_not_at_start) { -+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ __ Add_d(a0, current_input_offset(), -+ Operand(-char_size() + cp_offset * char_size())); -+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckCharacterLT(uc16 limit, Label* on_less) { -+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckGreedyLoop(Label* on_equal) { -+ Label backtrack_non_equal; -+ __ Ld_w(a0, MemOperand(backtrack_stackpointer(), 0)); -+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0)); -+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), -+ Operand(kIntSize)); -+ __ bind(&backtrack_non_equal); -+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase( -+ int start_reg, bool read_backward, Label* on_no_match) { -+ Label fallthrough; -+ __ Ld_d(a0, register_location(start_reg)); // Index of start of capture. -+ __ Ld_d(a1, register_location(start_reg + 1)); // Index of end of capture. -+ __ Sub_d(a1, a1, a0); // Length of capture. -+ -+ // At this point, the capture registers are either both set or both cleared. -+ // If the capture length is zero, then the capture is either empty or cleared. -+ // Fall through in both cases. -+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); -+ -+ if (read_backward) { -+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ __ Add_d(t1, t1, a1); -+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); -+ } else { -+ __ Add_d(t1, a1, current_input_offset()); -+ // Check that there are enough characters left in the input. -+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); -+ } -+ -+ if (mode_ == LATIN1) { -+ Label success; -+ Label fail; -+ Label loop_check; -+ -+ // a0 - offset of start of capture. -+ // a1 - length of capture. -+ __ Add_d(a0, a0, Operand(end_of_input_address())); -+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset())); -+ if (read_backward) { -+ __ Sub_d(a2, a2, Operand(a1)); -+ } -+ __ Add_d(a1, a0, Operand(a1)); -+ -+ // a0 - Address of start of capture. -+ // a1 - Address of end of capture. -+ // a2 - Address of current input position. -+ -+ Label loop; -+ __ bind(&loop); -+ __ Ld_bu(a3, MemOperand(a0, 0)); -+ __ addi_d(a0, a0, char_size()); -+ __ Ld_bu(a4, MemOperand(a2, 0)); -+ __ addi_d(a2, a2, char_size()); -+ -+ __ Branch(&loop_check, eq, a4, Operand(a3)); -+ -+ // Mismatch, try case-insensitive match (converting letters to lower-case). -+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case. -+ __ Or(a4, a4, Operand(0x20)); // Also convert input character. -+ __ Branch(&fail, ne, a4, Operand(a3)); -+ __ Sub_d(a3, a3, Operand('a')); -+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a')); -+ // Latin-1: Check for values in range [224,254] but not 247. -+ __ Sub_d(a3, a3, Operand(224 - 'a')); -+ // Weren't Latin-1 letters. -+ __ Branch(&fail, hi, a3, Operand(254 - 224)); -+ // Check for 247. -+ __ Branch(&fail, eq, a3, Operand(247 - 224)); -+ -+ __ bind(&loop_check); -+ __ Branch(&loop, lt, a0, Operand(a1)); -+ __ jmp(&success); -+ -+ __ bind(&fail); -+ GoTo(on_no_match); -+ -+ __ bind(&success); -+ // Compute new value of character position after the matched part. -+ __ Sub_d(current_input_offset(), a2, end_of_input_address()); -+ if (read_backward) { -+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture. -+ __ Ld_d(a2, -+ register_location(start_reg + 1)); // Index of end of capture. -+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1)); -+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2)); -+ } -+ } else { -+ DCHECK(mode_ == UC16); -+ // Put regexp engine registers on stack. -+ RegList regexp_registers_to_retain = current_input_offset().bit() | -+ current_character().bit() | -+ backtrack_stackpointer().bit(); -+ __ MultiPush(regexp_registers_to_retain); -+ -+ int argument_count = 4; -+ __ PrepareCallCFunction(argument_count, a2); -+ -+ // a0 - offset of start of capture. -+ // a1 - length of capture. -+ -+ // Put arguments into arguments registers. -+ // Parameters are -+ // a0: Address byte_offset1 - Address captured substring's start. -+ // a1: Address byte_offset2 - Address of current character position. -+ // a2: size_t byte_length - length of capture in bytes(!). -+ // a3: Isolate* isolate. -+ -+ // Address of start of capture. -+ __ Add_d(a0, a0, Operand(end_of_input_address())); -+ // Length of capture. -+ __ mov(a2, a1); -+ // Save length in callee-save register for use on return. -+ __ mov(s3, a1); -+ // Address of current input position. -+ __ Add_d(a1, current_input_offset(), Operand(end_of_input_address())); -+ if (read_backward) { -+ __ Sub_d(a1, a1, Operand(s3)); -+ } -+ // Isolate. -+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate()))); -+ -+ { -+ AllowExternalCallThatCantCauseGC scope(masm_); -+ ExternalReference function = -+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); -+ __ CallCFunction(function, argument_count); -+ } -+ -+ // Restore regexp engine registers. -+ __ MultiPop(regexp_registers_to_retain); -+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); -+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); -+ -+ // Check if function returned non-zero for success or zero for failure. -+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); -+ // On success, increment position by length of capture. -+ if (read_backward) { -+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(s3)); -+ } else { -+ __ Add_d(current_input_offset(), current_input_offset(), Operand(s3)); -+ } -+ } -+ -+ __ bind(&fallthrough); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckNotBackReference(int start_reg, -+ bool read_backward, -+ Label* on_no_match) { -+ Label fallthrough; -+ -+ // Find length of back-referenced capture. -+ __ Ld_d(a0, register_location(start_reg)); -+ __ Ld_d(a1, register_location(start_reg + 1)); -+ __ Sub_d(a1, a1, a0); // Length to check. -+ -+ // At this point, the capture registers are either both set or both cleared. -+ // If the capture length is zero, then the capture is either empty or cleared. -+ // Fall through in both cases. -+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); -+ -+ if (read_backward) { -+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ __ Add_d(t1, t1, a1); -+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); -+ } else { -+ __ Add_d(t1, a1, current_input_offset()); -+ // Check that there are enough characters left in the input. -+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); -+ } -+ -+ // Compute pointers to match string and capture string. -+ __ Add_d(a0, a0, Operand(end_of_input_address())); -+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset())); -+ if (read_backward) { -+ __ Sub_d(a2, a2, Operand(a1)); -+ } -+ __ Add_d(a1, a1, Operand(a0)); -+ -+ Label loop; -+ __ bind(&loop); -+ if (mode_ == LATIN1) { -+ __ Ld_bu(a3, MemOperand(a0, 0)); -+ __ addi_d(a0, a0, char_size()); -+ __ Ld_bu(a4, MemOperand(a2, 0)); -+ __ addi_d(a2, a2, char_size()); -+ } else { -+ DCHECK(mode_ == UC16); -+ __ Ld_hu(a3, MemOperand(a0, 0)); -+ __ addi_d(a0, a0, char_size()); -+ __ Ld_hu(a4, MemOperand(a2, 0)); -+ __ addi_d(a2, a2, char_size()); -+ } -+ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4)); -+ __ Branch(&loop, lt, a0, Operand(a1)); -+ -+ // Move current character position to position after match. -+ __ Sub_d(current_input_offset(), a2, end_of_input_address()); -+ if (read_backward) { -+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture. -+ __ Ld_d(a2, register_location(start_reg + 1)); // Index of end of capture. -+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1)); -+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2)); -+ } -+ __ bind(&fallthrough); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckNotCharacter(uint32_t c, -+ Label* on_not_equal) { -+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckCharacterAfterAnd(uint32_t c, uint32_t mask, -+ Label* on_equal) { -+ __ And(a0, current_character(), Operand(mask)); -+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); -+ BranchOrBacktrack(on_equal, eq, a0, rhs); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterAnd(uint32_t c, -+ uint32_t mask, -+ Label* on_not_equal) { -+ __ And(a0, current_character(), Operand(mask)); -+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); -+ BranchOrBacktrack(on_not_equal, ne, a0, rhs); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterMinusAnd( -+ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) { -+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus); -+ __ Sub_d(a0, current_character(), Operand(minus)); -+ __ And(a0, a0, Operand(mask)); -+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckCharacterInRange(uc16 from, uc16 to, -+ Label* on_in_range) { -+ __ Sub_d(a0, current_character(), Operand(from)); -+ // Unsigned lower-or-same condition. -+ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckCharacterNotInRange( -+ uc16 from, uc16 to, Label* on_not_in_range) { -+ __ Sub_d(a0, current_character(), Operand(from)); -+ // Unsigned higher condition. -+ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckBitInTable(Handle table, -+ Label* on_bit_set) { -+ __ li(a0, Operand(table)); -+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) { -+ __ And(a1, current_character(), Operand(kTableSize - 1)); -+ __ Add_d(a0, a0, a1); -+ } else { -+ __ Add_d(a0, a0, current_character()); -+ } -+ -+ __ Ld_bu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize)); -+ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg)); -+} -+ -+bool RegExpMacroAssemblerLOONG64::CheckSpecialCharacterClass(uc16 type, -+ Label* on_no_match) { -+ // Range checks (c in min..max) are generally implemented by an unsigned -+ // (c - min) <= (max - min) check. -+ switch (type) { -+ case 's': -+ // Match space-characters. -+ if (mode_ == LATIN1) { -+ // One byte space characters are '\t'..'\r', ' ' and \u00a0. -+ Label success; -+ __ Branch(&success, eq, current_character(), Operand(' ')); -+ // Check range 0x09..0x0D. -+ __ Sub_d(a0, current_character(), Operand('\t')); -+ __ Branch(&success, ls, a0, Operand('\r' - '\t')); -+ // \u00a0 (NBSP). -+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t')); -+ __ bind(&success); -+ return true; -+ } -+ return false; -+ case 'S': -+ // The emitted code for generic character classes is good enough. -+ return false; -+ case 'd': -+ // Match Latin1 digits ('0'..'9'). -+ __ Sub_d(a0, current_character(), Operand('0')); -+ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0')); -+ return true; -+ case 'D': -+ // Match non Latin1-digits. -+ __ Sub_d(a0, current_character(), Operand('0')); -+ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0')); -+ return true; -+ case '.': { -+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). -+ __ Xor(a0, current_character(), Operand(0x01)); -+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. -+ __ Sub_d(a0, a0, Operand(0x0B)); -+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B)); -+ if (mode_ == UC16) { -+ // Compare original value to 0x2028 and 0x2029, using the already -+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for -+ // 0x201D (0x2028 - 0x0B) or 0x201E. -+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B)); -+ BranchOrBacktrack(on_no_match, ls, a0, Operand(1)); -+ } -+ return true; -+ } -+ case 'n': { -+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). -+ __ Xor(a0, current_character(), Operand(0x01)); -+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. -+ __ Sub_d(a0, a0, Operand(0x0B)); -+ if (mode_ == LATIN1) { -+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B)); -+ } else { -+ Label done; -+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B)); -+ // Compare original value to 0x2028 and 0x2029, using the already -+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for -+ // 0x201D (0x2028 - 0x0B) or 0x201E. -+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B)); -+ BranchOrBacktrack(on_no_match, hi, a0, Operand(1)); -+ __ bind(&done); -+ } -+ return true; -+ } -+ case 'w': { -+ if (mode_ != LATIN1) { -+ // Table is 256 entries, so all Latin1 characters can be tested. -+ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z')); -+ } -+ ExternalReference map = -+ ExternalReference::re_word_character_map(isolate()); -+ __ li(a0, Operand(map)); -+ __ Add_d(a0, a0, current_character()); -+ __ Ld_bu(a0, MemOperand(a0, 0)); -+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); -+ return true; -+ } -+ case 'W': { -+ Label done; -+ if (mode_ != LATIN1) { -+ // Table is 256 entries, so all Latin1 characters can be tested. -+ __ Branch(&done, hi, current_character(), Operand('z')); -+ } -+ ExternalReference map = -+ ExternalReference::re_word_character_map(isolate()); -+ __ li(a0, Operand(map)); -+ __ Add_d(a0, a0, current_character()); -+ __ Ld_bu(a0, MemOperand(a0, 0)); -+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg)); -+ if (mode_ != LATIN1) { -+ __ bind(&done); -+ } -+ return true; -+ } -+ case '*': -+ // Match any character. -+ return true; -+ // No custom implementation (yet): s(UC16), S(UC16). -+ default: -+ return false; -+ } -+} -+ -+void RegExpMacroAssemblerLOONG64::Fail() { -+ __ li(a0, Operand(FAILURE)); -+ __ jmp(&exit_label_); -+} -+ -+Handle RegExpMacroAssemblerLOONG64::GetCode(Handle source) { -+ Label return_v0; -+ if (0 /* todo masm_->has_exception()*/) { -+ // If the code gets corrupted due to long regular expressions and lack of -+ // space on trampolines, an internal exception flag is set. If this case -+ // is detected, we will jump into exit sequence right away. -+ //__ bind_to(&entry_label_, internal_failure_label_.pos()); -+ } else { -+ // Finalize code - write the entry point code now we know how many -+ // registers we need. -+ -+ // Entry code: -+ __ bind(&entry_label_); -+ -+ // Tell the system that we have a stack frame. Because the type is MANUAL, -+ // no is generated. -+ FrameScope scope(masm_, StackFrame::MANUAL); -+ -+ // Actually emit code to start a new stack frame. -+ // Push arguments -+ // Save callee-save registers. -+ // Start new stack frame. -+ // Store link register in existing stack-cell. -+ // Order here should correspond to order of offset constants in header file. -+ // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs -+ // or dont save. -+ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | s3.bit() | -+ s4.bit() | s5.bit() | s6.bit() | s7.bit(); -+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit(); -+ -+ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit(); -+ -+ __ MultiPush(ra.bit(), fp.bit(), argument_registers | registers_to_retain); -+ // Set frame pointer in space for it if this is not a direct call -+ // from generated code. -+ // TODO(plind): this 8 is the # of argument regs, should have definition. -+ __ Add_d(frame_pointer(), sp, Operand(8 * kPointerSize)); -+ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize); -+ __ mov(a0, zero_reg); -+ __ push(a0); // Make room for success counter and initialize it to 0. -+ STATIC_ASSERT(kStringStartMinusOne == -+ kSuccessfulCaptures - kSystemPointerSize); -+ __ push(a0); // Make room for "string start - 1" constant. -+ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize); -+ __ push(a0); // The backtrack counter -+ -+ // Check if we have space on the stack for registers. -+ Label stack_limit_hit; -+ Label stack_ok; -+ -+ ExternalReference stack_limit = -+ ExternalReference::address_of_jslimit(masm_->isolate()); -+ __ li(a0, Operand(stack_limit)); -+ __ Ld_d(a0, MemOperand(a0, 0)); -+ __ Sub_d(a0, sp, a0); -+ // Handle it if the stack pointer is already below the stack limit. -+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg)); -+ // Check if there is room for the variable number of registers above -+ // the stack limit. -+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize)); -+ // Exit with OutOfMemory exception. There is not enough space on the stack -+ // for our working registers. -+ __ li(a0, Operand(EXCEPTION)); -+ __ jmp(&return_v0); -+ -+ __ bind(&stack_limit_hit); -+ CallCheckStackGuardState(a0); -+ // If returned value is non-zero, we exit with the returned value as result. -+ __ Branch(&return_v0, ne, a0, Operand(zero_reg)); -+ -+ __ bind(&stack_ok); -+ // Allocate space on stack for registers. -+ __ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize)); -+ // Load string end. -+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); -+ // Load input start. -+ __ Ld_d(a0, MemOperand(frame_pointer(), kInputStart)); -+ // Find negative length (offset of start relative to end). -+ __ Sub_d(current_input_offset(), a0, end_of_input_address()); -+ // Set a0 to address of char before start of the input string -+ // (effectively string position -1). -+ __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndex)); -+ __ Sub_d(a0, current_input_offset(), Operand(char_size())); -+ __ slli_d(t1, a1, (mode_ == UC16) ? 1 : 0); -+ __ Sub_d(a0, a0, t1); -+ // Store this value in a local variable, for use when clearing -+ // position registers. -+ __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ -+ // Initialize code pointer register -+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); -+ -+ Label load_char_start_regexp, start_regexp; -+ // Load newline if index is at start, previous character otherwise. -+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); -+ __ li(current_character(), Operand('\n')); -+ __ jmp(&start_regexp); -+ -+ // Global regexp restarts matching here. -+ __ bind(&load_char_start_regexp); -+ // Load previous char as initial value of current character register. -+ LoadCurrentCharacterUnchecked(-1, 1); -+ __ bind(&start_regexp); -+ -+ // Initialize on-stack registers. -+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. -+ // Fill saved registers with initial value = start offset - 1. -+ if (num_saved_registers_ > 8) { -+ // Address of register 0. -+ __ Add_d(a1, frame_pointer(), Operand(kRegisterZero)); -+ __ li(a2, Operand(num_saved_registers_)); -+ Label init_loop; -+ __ bind(&init_loop); -+ __ St_d(a0, MemOperand(a1, 0)); -+ __ Add_d(a1, a1, Operand(-kPointerSize)); -+ __ Sub_d(a2, a2, Operand(1)); -+ __ Branch(&init_loop, ne, a2, Operand(zero_reg)); -+ } else { -+ for (int i = 0; i < num_saved_registers_; i++) { -+ __ St_d(a0, register_location(i)); -+ } -+ } -+ } -+ -+ // Initialize backtrack stack pointer. -+ __ Ld_d(backtrack_stackpointer(), -+ MemOperand(frame_pointer(), kStackHighEnd)); -+ -+ __ jmp(&start_label_); -+ -+ // Exit code: -+ if (success_label_.is_linked()) { -+ // Save captures when successful. -+ __ bind(&success_label_); -+ if (num_saved_registers_ > 0) { -+ // Copy captures to output. -+ __ Ld_d(a1, MemOperand(frame_pointer(), kInputStart)); -+ __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutput)); -+ __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndex)); -+ __ Sub_d(a1, end_of_input_address(), a1); -+ // a1 is length of input in bytes. -+ if (mode_ == UC16) { -+ __ srli_d(a1, a1, 1); -+ } -+ // a1 is length of input in characters. -+ __ Add_d(a1, a1, Operand(a2)); -+ // a1 is length of string in characters. -+ -+ DCHECK_EQ(0, num_saved_registers_ % 2); -+ // Always an even number of capture registers. This allows us to -+ // unroll the loop once to add an operation between a load of a register -+ // and the following use of that register. -+ for (int i = 0; i < num_saved_registers_; i += 2) { -+ __ Ld_d(a2, register_location(i)); -+ __ Ld_d(a3, register_location(i + 1)); -+ if (i == 0 && global_with_zero_length_check()) { -+ // Keep capture start in a4 for the zero-length check later. -+ __ mov(t3, a2); -+ } -+ if (mode_ == UC16) { -+ __ srai_d(a2, a2, 1); -+ __ Add_d(a2, a2, a1); -+ __ srai_d(a3, a3, 1); -+ __ Add_d(a3, a3, a1); -+ } else { -+ __ Add_d(a2, a1, Operand(a2)); -+ __ Add_d(a3, a1, Operand(a3)); -+ } -+ // V8 expects the output to be an int32_t array. -+ __ St_w(a2, MemOperand(a0, 0)); -+ __ Add_d(a0, a0, kIntSize); -+ __ St_w(a3, MemOperand(a0, 0)); -+ __ Add_d(a0, a0, kIntSize); -+ } -+ } -+ -+ if (global()) { -+ // Restart matching if the regular expression is flagged as global. -+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); -+ __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); -+ __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutput)); -+ // Increment success counter. -+ __ Add_d(a0, a0, 1); -+ __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); -+ // Capture results have been stored, so the number of remaining global -+ // output registers is reduced by the number of stored captures. -+ __ Sub_d(a1, a1, num_saved_registers_); -+ // Check whether we have enough room for another set of capture results. -+ //__ mov(v0, a0); -+ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_)); -+ -+ __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); -+ // Advance the location for output. -+ __ Add_d(a2, a2, num_saved_registers_ * kIntSize); -+ __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutput)); -+ -+ // Prepare a0 to initialize registers with its value in the next run. -+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ -+ if (global_with_zero_length_check()) { -+ // Special case for zero-length matches. -+ // t3: capture start index -+ // Not a zero-length match, restart. -+ __ Branch(&load_char_start_regexp, ne, current_input_offset(), -+ Operand(t3)); -+ // Offset from the end is zero if we already reached the end. -+ __ Branch(&exit_label_, eq, current_input_offset(), -+ Operand(zero_reg)); -+ // Advance current position after a zero-length match. -+ Label advance; -+ __ bind(&advance); -+ __ Add_d(current_input_offset(), current_input_offset(), -+ Operand((mode_ == UC16) ? 2 : 1)); -+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance); -+ } -+ -+ __ Branch(&load_char_start_regexp); -+ } else { -+ __ li(a0, Operand(SUCCESS)); -+ } -+ } -+ // Exit and return v0. -+ __ bind(&exit_label_); -+ if (global()) { -+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); -+ } -+ -+ __ bind(&return_v0); -+ // Skip sp past regexp registers and local variables.. -+ __ mov(sp, frame_pointer()); -+ // Restore registers s0..s7 and return (restoring ra to pc). -+ __ MultiPop(ra.bit(), fp.bit(), registers_to_retain); -+ __ Ret(); -+ -+ // Backtrack code (branch target for conditional backtracks). -+ if (backtrack_label_.is_linked()) { -+ __ bind(&backtrack_label_); -+ Backtrack(); -+ } -+ -+ Label exit_with_exception; -+ -+ // Preempt-code. -+ if (check_preempt_label_.is_linked()) { -+ SafeCallTarget(&check_preempt_label_); -+ // Put regexp engine registers on stack. -+ RegList regexp_registers_to_retain = current_input_offset().bit() | -+ current_character().bit() | -+ backtrack_stackpointer().bit(); -+ __ MultiPush(regexp_registers_to_retain); -+ CallCheckStackGuardState(a0); -+ __ MultiPop(regexp_registers_to_retain); -+ // If returning non-zero, we should end execution with the given -+ // result as return value. -+ __ Branch(&return_v0, ne, a0, Operand(zero_reg)); -+ -+ // String might have moved: Reload end of string from frame. -+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); -+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); -+ SafeReturn(); -+ } -+ -+ // Backtrack stack overflow code. -+ if (stack_overflow_label_.is_linked()) { -+ SafeCallTarget(&stack_overflow_label_); -+ // Reached if the backtrack-stack limit has been hit. -+ // Put regexp engine registers on stack first. -+ RegList regexp_registers = -+ current_input_offset().bit() | current_character().bit(); -+ __ MultiPush(regexp_registers); -+ -+ // Call GrowStack(backtrack_stackpointer(), &stack_base) -+ static const int num_arguments = 3; -+ __ PrepareCallCFunction(num_arguments, a0); -+ __ mov(a0, backtrack_stackpointer()); -+ __ Add_d(a1, frame_pointer(), Operand(kStackHighEnd)); -+ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate()))); -+ ExternalReference grow_stack = -+ ExternalReference::re_grow_stack(masm_->isolate()); -+ __ CallCFunction(grow_stack, num_arguments); -+ // Restore regexp registers. -+ __ MultiPop(regexp_registers); -+ // If return nullptr, we have failed to grow the stack, and -+ // must exit with a stack-overflow exception. -+ __ Branch(&exit_with_exception, eq, a0, Operand(zero_reg)); -+ // Otherwise use return value as new stack pointer. -+ __ mov(backtrack_stackpointer(), a0); -+ // Restore saved registers and continue. -+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); -+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); -+ SafeReturn(); -+ } -+ -+ if (exit_with_exception.is_linked()) { -+ // If any of the code above needed to exit with an exception. -+ __ bind(&exit_with_exception); -+ // Exit with Result EXCEPTION(-1) to signal thrown exception. -+ __ li(a0, Operand(EXCEPTION)); -+ __ jmp(&return_v0); -+ } -+ } -+ -+ CodeDesc code_desc; -+ masm_->GetCode(isolate(), &code_desc); -+ Handle code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP) -+ .set_self_reference(masm_->CodeObject()) -+ .Build(); -+ LOG(masm_->isolate(), -+ RegExpCodeCreateEvent(Handle::cast(code), source)); -+ return Handle::cast(code); -+} -+ -+void RegExpMacroAssemblerLOONG64::GoTo(Label* to) { -+ if (to == nullptr) { -+ Backtrack(); -+ return; -+ } -+ __ jmp(to); -+ return; -+} -+ -+void RegExpMacroAssemblerLOONG64::IfRegisterGE(int reg, int comparand, -+ Label* if_ge) { -+ __ Ld_d(a0, register_location(reg)); -+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand)); -+} -+ -+void RegExpMacroAssemblerLOONG64::IfRegisterLT(int reg, int comparand, -+ Label* if_lt) { -+ __ Ld_d(a0, register_location(reg)); -+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand)); -+} -+ -+void RegExpMacroAssemblerLOONG64::IfRegisterEqPos(int reg, Label* if_eq) { -+ __ Ld_d(a0, register_location(reg)); -+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset())); -+} -+ -+RegExpMacroAssembler::IrregexpImplementation -+RegExpMacroAssemblerLOONG64::Implementation() { -+ return kLOONG64Implementation; -+} -+ -+void RegExpMacroAssemblerLOONG64::PopCurrentPosition() { -+ Pop(current_input_offset()); -+} -+ -+void RegExpMacroAssemblerLOONG64::PopRegister(int register_index) { -+ Pop(a0); -+ __ St_d(a0, register_location(register_index)); -+} -+ -+void RegExpMacroAssemblerLOONG64::PushBacktrack(Label* label) { -+ if (label->is_bound()) { -+ int target = label->pos(); -+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); -+ } else { -+ // TODO: Optimize like arm64 without ld_wu? -+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); -+ Label after_constant; -+ __ Branch(&after_constant); -+ int offset = masm_->pc_offset(); -+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag; -+ //__ emit(0); -+ __ nop(); -+ masm_->label_at_put(label, offset); -+ __ bind(&after_constant); -+ if (is_int12(cp_offset)) { -+ __ Ld_wu(a0, MemOperand(code_pointer(), cp_offset)); -+ } else { -+ __ Add_d(a0, code_pointer(), cp_offset); -+ __ Ld_wu(a0, MemOperand(a0, 0)); -+ } -+ } -+ Push(a0); -+ CheckStackLimit(); -+} -+ -+void RegExpMacroAssemblerLOONG64::PushCurrentPosition() { -+ Push(current_input_offset()); -+} -+ -+void RegExpMacroAssemblerLOONG64::PushRegister(int register_index, -+ StackCheckFlag check_stack_limit) { -+ __ Ld_d(a0, register_location(register_index)); -+ Push(a0); -+ if (check_stack_limit) CheckStackLimit(); -+} -+ -+void RegExpMacroAssemblerLOONG64::ReadCurrentPositionFromRegister(int reg) { -+ __ Ld_d(current_input_offset(), register_location(reg)); -+} -+ -+void RegExpMacroAssemblerLOONG64::ReadStackPointerFromRegister(int reg) { -+ __ Ld_d(backtrack_stackpointer(), register_location(reg)); -+ __ Ld_d(a0, MemOperand(frame_pointer(), kStackHighEnd)); -+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0)); -+} -+ -+void RegExpMacroAssemblerLOONG64::SetCurrentPositionFromEnd(int by) { -+ Label after_position; -+ __ Branch(&after_position, ge, current_input_offset(), -+ Operand(-by * char_size())); -+ __ li(current_input_offset(), -by * char_size()); -+ // On RegExp code entry (where this operation is used), the character before -+ // the current position is expected to be already loaded. -+ // We have advanced the position, so it's safe to read backwards. -+ LoadCurrentCharacterUnchecked(-1, 1); -+ __ bind(&after_position); -+} -+ -+void RegExpMacroAssemblerLOONG64::SetRegister(int register_index, int to) { -+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions! -+ __ li(a0, Operand(to)); -+ __ St_d(a0, register_location(register_index)); -+} -+ -+bool RegExpMacroAssemblerLOONG64::Succeed() { -+ __ jmp(&success_label_); -+ return global(); -+} -+ -+void RegExpMacroAssemblerLOONG64::WriteCurrentPositionToRegister(int reg, -+ int cp_offset) { -+ if (cp_offset == 0) { -+ __ St_d(current_input_offset(), register_location(reg)); -+ } else { -+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size())); -+ __ St_d(a0, register_location(reg)); -+ } -+} -+ -+void RegExpMacroAssemblerLOONG64::ClearRegisters(int reg_from, int reg_to) { -+ DCHECK(reg_from <= reg_to); -+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ for (int reg = reg_from; reg <= reg_to; reg++) { -+ __ St_d(a0, register_location(reg)); -+ } -+} -+ -+void RegExpMacroAssemblerLOONG64::WriteStackPointerToRegister(int reg) { -+ __ Ld_d(a1, MemOperand(frame_pointer(), kStackHighEnd)); -+ __ Sub_d(a0, backtrack_stackpointer(), a1); -+ __ St_d(a0, register_location(reg)); -+} -+ -+bool RegExpMacroAssemblerLOONG64::CanReadUnaligned() { return false; } -+ -+// Private methods: -+ -+void RegExpMacroAssemblerLOONG64::CallCheckStackGuardState(Register scratch) { -+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); -+ DCHECK(!masm_->options().isolate_independent_code); -+ -+ int stack_alignment = base::OS::ActivationFrameAlignment(); -+ -+ // Align the stack pointer and save the original sp value on the stack. -+ __ mov(scratch, sp); -+ __ Sub_d(sp, sp, Operand(kPointerSize)); -+ DCHECK(base::bits::IsPowerOfTwo(stack_alignment)); -+ __ And(sp, sp, Operand(-stack_alignment)); -+ __ St_d(scratch, MemOperand(sp, 0)); -+ -+ __ mov(a2, frame_pointer()); -+ // Code of self. -+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); -+ -+ // We need to make room for the return address on the stack. -+ DCHECK(IsAligned(stack_alignment, kPointerSize)); -+ __ Sub_d(sp, sp, Operand(stack_alignment)); -+ -+ // The stack pointer now points to cell where the return address will be -+ // written. Arguments are in registers, meaning we treat the return address as -+ // argument 5. Since DirectCEntry will handle allocating space for the C -+ // argument slots, we don't need to care about that here. This is how the -+ // stack will look (sp meaning the value of sp at this moment): -+ // [sp + 3] - empty slot if needed for alignment. -+ // [sp + 2] - saved sp. -+ // [sp + 1] - second word reserved for return value. -+ // [sp + 0] - first word reserved for return value. -+ -+ // a0 will point to the return address, placed by DirectCEntry. -+ __ mov(a0, sp); -+ -+ ExternalReference stack_guard_check = -+ ExternalReference::re_check_stack_guard_state(masm_->isolate()); -+ __ li(t7, Operand(stack_guard_check)); -+ -+ EmbeddedData d = EmbeddedData::FromBlob(); -+ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry)); -+ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry); -+ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); -+ __ Call(kScratchReg); -+ -+ // DirectCEntry allocated space for the C argument slots so we have to -+ // drop them with the return address from the stack with loading saved sp. -+ // At this point stack must look: -+ // [sp + 7] - empty slot if needed for alignment. -+ // [sp + 6] - saved sp. -+ // [sp + 5] - second word reserved for return value. -+ // [sp + 4] - first word reserved for return value. -+ // [sp + 3] - C argument slot. -+ // [sp + 2] - C argument slot. -+ // [sp + 1] - C argument slot. -+ // [sp + 0] - C argument slot. -+ __ Ld_d(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize)); -+ -+ __ li(code_pointer(), Operand(masm_->CodeObject())); -+} -+ -+// Helper function for reading a value out of a stack frame. -+template -+static T& frame_entry(Address re_frame, int frame_offset) { -+ return reinterpret_cast(Memory(re_frame + frame_offset)); -+} -+ -+template -+static T* frame_entry_address(Address re_frame, int frame_offset) { -+ return reinterpret_cast(re_frame + frame_offset); -+} -+ -+int64_t RegExpMacroAssemblerLOONG64::CheckStackGuardState(Address* return_address, -+ Address raw_code, -+ Address re_frame) { -+ Code re_code = Code::cast(Object(raw_code)); -+ return NativeRegExpMacroAssembler::CheckStackGuardState( -+ frame_entry(re_frame, kIsolate), -+ static_cast(frame_entry(re_frame, kStartIndex)), -+ static_cast( -+ frame_entry(re_frame, kDirectCall)), -+ return_address, re_code, -+ frame_entry_address
(re_frame, kInputString), -+ frame_entry_address(re_frame, kInputStart), -+ frame_entry_address(re_frame, kInputEnd)); -+} -+ -+MemOperand RegExpMacroAssemblerLOONG64::register_location(int register_index) { -+ DCHECK(register_index < (1 << 30)); -+ if (num_registers_ <= register_index) { -+ num_registers_ = register_index + 1; -+ } -+ return MemOperand(frame_pointer(), -+ kRegisterZero - register_index * kPointerSize); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckPosition(int cp_offset, -+ Label* on_outside_input) { -+ if (cp_offset >= 0) { -+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(), -+ Operand(-cp_offset * char_size())); -+ } else { -+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); -+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size())); -+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1)); -+ } -+} -+ -+void RegExpMacroAssemblerLOONG64::BranchOrBacktrack(Label* to, Condition condition, -+ Register rs, -+ const Operand& rt) { -+ if (condition == al) { // Unconditional. -+ if (to == nullptr) { -+ Backtrack(); -+ return; -+ } -+ __ jmp(to); -+ return; -+ } -+ if (to == nullptr) { -+ __ Branch(&backtrack_label_, condition, rs, rt); -+ return; -+ } -+ __ Branch(to, condition, rs, rt); -+} -+ -+void RegExpMacroAssemblerLOONG64::SafeCall(Label* to, Condition cond, Register rs, -+ const Operand& rt) { -+ __ Branch(to, cond, rs, rt, true); -+} -+ -+void RegExpMacroAssemblerLOONG64::SafeReturn() { -+ __ pop(ra); -+ __ Add_d(t1, ra, Operand(masm_->CodeObject())); -+ __ Jump(t1); -+} -+ -+void RegExpMacroAssemblerLOONG64::SafeCallTarget(Label* name) { -+ __ bind(name); -+ __ Sub_d(ra, ra, Operand(masm_->CodeObject())); -+ __ push(ra); -+} -+ -+void RegExpMacroAssemblerLOONG64::Push(Register source) { -+ DCHECK(source != backtrack_stackpointer()); -+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), -+ Operand(-kIntSize)); -+ __ St_w(source, MemOperand(backtrack_stackpointer(), 0)); -+} -+ -+void RegExpMacroAssemblerLOONG64::Pop(Register target) { -+ DCHECK(target != backtrack_stackpointer()); -+ __ Ld_w(target, MemOperand(backtrack_stackpointer(), 0)); -+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckPreemption() { -+ // Check for preemption. -+ ExternalReference stack_limit = -+ ExternalReference::address_of_jslimit(masm_->isolate()); -+ __ li(a0, Operand(stack_limit)); -+ __ Ld_d(a0, MemOperand(a0, 0)); -+ SafeCall(&check_preempt_label_, ls, sp, Operand(a0)); -+} -+ -+void RegExpMacroAssemblerLOONG64::CheckStackLimit() { -+ ExternalReference stack_limit = -+ ExternalReference::address_of_regexp_stack_limit_address( -+ masm_->isolate()); -+ -+ __ li(a0, Operand(stack_limit)); -+ __ Ld_d(a0, MemOperand(a0, 0)); -+ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0)); -+} -+ -+void RegExpMacroAssemblerLOONG64::LoadCurrentCharacterUnchecked(int cp_offset, -+ int characters) { -+ Register offset = current_input_offset(); -+ if (cp_offset != 0) { -+ // t3 is not being used to store the capture start index at this point. -+ __ Add_d(t3, current_input_offset(), Operand(cp_offset * char_size())); -+ offset = t3; -+ } -+ // We assume that we cannot do unaligned loads on LOONG64, so this function -+ // must only be used to load a single character at a time. -+ DCHECK_EQ(1, characters); -+ __ Add_d(t1, end_of_input_address(), Operand(offset)); -+ if (mode_ == LATIN1) { -+ __ Ld_bu(current_character(), MemOperand(t1, 0)); -+ } else { -+ DCHECK(mode_ == UC16); -+ __ Ld_hu(current_character(), MemOperand(t1, 0)); -+ } -+} -+ -+#undef __ -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_TARGET_ARCH_LOONG64 -diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h -new file mode 100644 -index 00000000..dd6f0123 ---- /dev/null -+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h -@@ -0,0 +1,211 @@ -+// Copyright 2011 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_ -+#define V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_ -+ -+#include "src/codegen/loong64/assembler-loong64.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/regexp/regexp-macro-assembler.h" -+ -+namespace v8 { -+namespace internal { -+ -+class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64 -+ : public NativeRegExpMacroAssembler { -+ public: -+ RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, Mode mode, -+ int registers_to_save); -+ virtual ~RegExpMacroAssemblerLOONG64(); -+ virtual int stack_limit_slack(); -+ virtual void AdvanceCurrentPosition(int by); -+ virtual void AdvanceRegister(int reg, int by); -+ virtual void Backtrack(); -+ virtual void Bind(Label* label); -+ virtual void CheckAtStart(int cp_offset, Label* on_at_start); -+ virtual void CheckCharacter(uint32_t c, Label* on_equal); -+ virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask, -+ Label* on_equal); -+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater); -+ virtual void CheckCharacterLT(uc16 limit, Label* on_less); -+ // A "greedy loop" is a loop that is both greedy and with a simple -+ // body. It has a particularly simple implementation. -+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); -+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start); -+ virtual void CheckNotBackReference(int start_reg, bool read_backward, -+ Label* on_no_match); -+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg, -+ bool read_backward, -+ Label* on_no_match); -+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); -+ virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask, -+ Label* on_not_equal); -+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask, -+ Label* on_not_equal); -+ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range); -+ virtual void CheckCharacterNotInRange(uc16 from, uc16 to, -+ Label* on_not_in_range); -+ virtual void CheckBitInTable(Handle table, Label* on_bit_set); -+ -+ // Checks whether the given offset from the current position is before -+ // the end of the string. -+ virtual void CheckPosition(int cp_offset, Label* on_outside_input); -+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match); -+ virtual void Fail(); -+ virtual Handle GetCode(Handle source); -+ virtual void GoTo(Label* label); -+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); -+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); -+ virtual void IfRegisterEqPos(int reg, Label* if_eq); -+ virtual IrregexpImplementation Implementation(); -+ virtual void LoadCurrentCharacterUnchecked(int cp_offset, -+ int character_count); -+ virtual void PopCurrentPosition(); -+ virtual void PopRegister(int register_index); -+ virtual void PushBacktrack(Label* label); -+ virtual void PushCurrentPosition(); -+ virtual void PushRegister(int register_index, -+ StackCheckFlag check_stack_limit); -+ virtual void ReadCurrentPositionFromRegister(int reg); -+ virtual void ReadStackPointerFromRegister(int reg); -+ virtual void SetCurrentPositionFromEnd(int by); -+ virtual void SetRegister(int register_index, int to); -+ virtual bool Succeed(); -+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); -+ virtual void ClearRegisters(int reg_from, int reg_to); -+ virtual void WriteStackPointerToRegister(int reg); -+ virtual bool CanReadUnaligned(); -+ -+ // Called from RegExp if the stack-guard is triggered. -+ // If the code object is relocated, the return address is fixed before -+ // returning. -+ // {raw_code} is an Address because this is called via ExternalReference. -+ static int64_t CheckStackGuardState(Address* return_address, Address raw_code, -+ Address re_frame); -+ -+ void print_regexp_frame_constants(); -+ -+ private: -+ // Offsets from frame_pointer() of function parameters and stored registers. -+ static const int kFramePointer = 0; -+ -+ // Above the frame pointer - Stored registers and stack passed parameters. -+ // Registers s0 to s7, fp, and ra. -+ static const int kStoredRegisters = kFramePointer; -+ // Return address (stored from link register, read into pc on return). -+ -+ // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp. -+ -+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize; -+ // Stack frame header. -+ static const int kStackFrameHeader = kReturnAddress; -+ // Stack parameters placed by caller. -+ static const int kIsolate = kStackFrameHeader + kPointerSize; -+ -+ // Below the frame pointer. -+ // Register parameters stored by setup code. -+ static const int kDirectCall = kFramePointer - kPointerSize; -+ static const int kStackHighEnd = kDirectCall - kPointerSize; -+ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize; -+ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize; -+ static const int kInputEnd = kRegisterOutput - kPointerSize; -+ static const int kInputStart = kInputEnd - kPointerSize; -+ static const int kStartIndex = kInputStart - kPointerSize; -+ static const int kInputString = kStartIndex - kPointerSize; -+ // When adding local variables remember to push space for them in -+ // the frame in GetCode. -+ static const int kSuccessfulCaptures = kInputString - kPointerSize; -+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize; -+ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize; -+ // First register address. Following registers are below it on the stack. -+ static const int kRegisterZero = kBacktrackCount - kSystemPointerSize; -+ -+ // Initial size of code buffer. -+ static const int kRegExpCodeSize = 1024; -+ -+ // Check whether preemption has been requested. -+ void CheckPreemption(); -+ -+ // Check whether we are exceeding the stack limit on the backtrack stack. -+ void CheckStackLimit(); -+ -+ // Generate a call to CheckStackGuardState. -+ void CallCheckStackGuardState(Register scratch); -+ -+ // The ebp-relative location of a regexp register. -+ MemOperand register_location(int register_index); -+ -+ // Register holding the current input position as negative offset from -+ // the end of the string. -+ inline Register current_input_offset() { return a6; } -+ -+ // The register containing the current character after LoadCurrentCharacter. -+ inline Register current_character() { return a7; } -+ -+ // Register holding address of the end of the input string. -+ inline Register end_of_input_address() { return t2; } -+ -+ // Register holding the frame address. Local variables, parameters and -+ // regexp registers are addressed relative to this. -+ inline Register frame_pointer() { return fp; } -+ -+ // The register containing the backtrack stack top. Provides a meaningful -+ // name to the register. -+ inline Register backtrack_stackpointer() { return t0; } -+ -+ // Register holding pointer to the current code object. -+ inline Register code_pointer() { return a5; } -+ -+ // Byte size of chars in the string to match (decided by the Mode argument). -+ inline int char_size() { return static_cast(mode_); } -+ -+ // Equivalent to a conditional branch to the label, unless the label -+ // is nullptr, in which case it is a conditional Backtrack. -+ void BranchOrBacktrack(Label* to, Condition condition, Register rs, -+ const Operand& rt); -+ -+ // Call and return internally in the generated code in a way that -+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) -+ inline void SafeCall(Label* to, Condition cond, Register rs, -+ const Operand& rt); -+ inline void SafeReturn(); -+ inline void SafeCallTarget(Label* name); -+ -+ // Pushes the value of a register on the backtrack stack. Decrements the -+ // stack pointer by a word size and stores the register's value there. -+ inline void Push(Register source); -+ -+ // Pops a value from the backtrack stack. Reads the word at the stack pointer -+ // and increments it by a word size. -+ inline void Pop(Register target); -+ -+ Isolate* isolate() const { return masm_->isolate(); } -+ -+ MacroAssembler* masm_; -+ -+ // Which mode to generate code for (Latin1 or UC16). -+ Mode mode_; -+ -+ // One greater than maximal register index actually used. -+ int num_registers_; -+ -+ // Number of registers to output at the end (the saved registers -+ // are always 0..num_saved_registers_-1). -+ int num_saved_registers_; -+ -+ // Labels used internally. -+ Label entry_label_; -+ Label start_label_; -+ Label success_label_; -+ Label backtrack_label_; -+ Label exit_label_; -+ Label check_preempt_label_; -+ Label stack_overflow_label_; -+ Label internal_failure_label_; -+}; -+ -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_ -diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h -index 8ec12a0a..e4503090 100644 ---- a/deps/v8/src/regexp/regexp-macro-assembler-arch.h -+++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h -@@ -21,6 +21,8 @@ - #include "src/regexp/mips/regexp-macro-assembler-mips.h" - #elif V8_TARGET_ARCH_MIPS64 - #include "src/regexp/mips64/regexp-macro-assembler-mips64.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h" - #elif V8_TARGET_ARCH_S390 - #include "src/regexp/s390/regexp-macro-assembler-s390.h" - #else -diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc -index 0a122017..37b22105 100644 ---- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc -+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc -@@ -15,8 +15,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer( - : RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) { - IrregexpImplementation type = assembler->Implementation(); - DCHECK_LT(type, 9); -- const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "S390", -- "PPC", "X64", "X87", "Bytecode"}; -+ const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "LOONG64", -+ "S390", "PPC", "X64", "X87", "Bytecode"}; - PrintF("RegExpMacroAssembler%s();\n", impl_names[type]); - } - -diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h -index 289c2a97..0e283e78 100644 ---- a/deps/v8/src/regexp/regexp-macro-assembler.h -+++ b/deps/v8/src/regexp/regexp-macro-assembler.h -@@ -44,6 +44,7 @@ class RegExpMacroAssembler { - kARMImplementation, - kARM64Implementation, - kMIPSImplementation, -+ kLOONG64Implementation, - kS390Implementation, - kPPCImplementation, - kX64Implementation, -diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc -index 7b8da4d8..da50d8f9 100644 ---- a/deps/v8/src/regexp/regexp.cc -+++ b/deps/v8/src/regexp/regexp.cc -@@ -781,6 +781,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data, - #elif V8_TARGET_ARCH_MIPS64 - macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode, - output_register_count)); -+#elif V8_TARGET_ARCH_LOONG64 -+ macro_assembler.reset(new RegExpMacroAssemblerLOONG64(isolate, zone, mode, -+ output_register_count)); - #else - #error "Unsupported architecture" - #endif -diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc -index 34259c6e..14a5372d 100644 ---- a/deps/v8/src/runtime/runtime-atomics.cc -+++ b/deps/v8/src/runtime/runtime-atomics.cc -@@ -20,7 +20,8 @@ namespace internal { - - // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h. - #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ -- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X -+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \ -+ V8_TARGET_ARCH_LOONG64 - - namespace { - -diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h -index 72ca7297..0c54d6d0 100644 ---- a/deps/v8/src/snapshot/deserializer.h -+++ b/deps/v8/src/snapshot/deserializer.h -@@ -28,8 +28,9 @@ class Object; - // Used for platforms with embedded constant pools to trigger deserialization - // of objects found in code. - #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ -- defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \ -- defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL -+ defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_PPC) || \ -+ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_PPC64) || \ -+ V8_EMBEDDED_CONSTANT_POOL - #define V8_CODE_EMBEDS_OBJECT_POINTER 1 - #else - #define V8_CODE_EMBEDS_OBJECT_POINTER 0 -diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h -index 781fb87d..f744596f 100644 ---- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h -+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h -@@ -46,6 +46,14 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = - constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf( - f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26); - -+#elif V8_TARGET_ARCH_LOONG64 -+/*todo*/ -+constexpr RegList kLiftoffAssemblerGpCacheRegs = -+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7); -+ -+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf( -+ f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26); -+ - #elif V8_TARGET_ARCH_ARM - - // r7: cp, r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc. -@@ -90,7 +98,7 @@ constexpr Condition kUnsignedLessEqual = below_equal; - constexpr Condition kUnsignedGreaterThan = above; - constexpr Condition kUnsignedGreaterEqual = above_equal; - --#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 -+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 - - constexpr Condition kEqual = eq; - constexpr Condition kUnequal = ne; -diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h -index 701b4b8e..b85494dc 100644 ---- a/deps/v8/src/wasm/baseline/liftoff-assembler.h -+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h -@@ -1254,6 +1254,8 @@ class LiftoffStackSlots { - #include "src/wasm/baseline/mips/liftoff-assembler-mips.h" - #elif V8_TARGET_ARCH_MIPS64 - #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" -+#elif V8_TARGET_ARCH_LOONG64 -+#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h" - #elif V8_TARGET_ARCH_S390 - #include "src/wasm/baseline/s390/liftoff-assembler-s390.h" - #else -diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h.orig b/deps/v8/src/wasm/baseline/liftoff-assembler.h.orig -new file mode 100644 -index 00000000..701b4b8e ---- /dev/null -+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h.orig -@@ -0,0 +1,1263 @@ -+// Copyright 2017 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ -+#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ -+ -+#include -+#include -+ -+#include "src/base/bits.h" -+#include "src/base/small-vector.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/wasm/baseline/liftoff-assembler-defs.h" -+#include "src/wasm/baseline/liftoff-compiler.h" -+#include "src/wasm/baseline/liftoff-register.h" -+#include "src/wasm/function-body-decoder.h" -+#include "src/wasm/wasm-code-manager.h" -+#include "src/wasm/wasm-module.h" -+#include "src/wasm/wasm-opcodes.h" -+#include "src/wasm/wasm-value.h" -+ -+namespace v8 { -+namespace internal { -+ -+// Forward declarations. -+namespace compiler { -+class CallDescriptor; -+} -+ -+namespace wasm { -+ -+class LiftoffAssembler : public TurboAssembler { -+ public: -+ // Each slot in our stack frame currently has exactly 8 bytes. -+ static constexpr int kStackSlotSize = 8; -+ -+ static constexpr ValueType kWasmIntPtr = -+ kSystemPointerSize == 8 ? kWasmI64 : kWasmI32; -+ -+ class VarState { -+ public: -+ enum Location : uint8_t { kStack, kRegister, kIntConst }; -+ -+ explicit VarState(ValueType type, int offset) -+ : loc_(kStack), type_(type), spill_offset_(offset) {} -+ explicit VarState(ValueType type, LiftoffRegister r, int offset) -+ : loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) { -+ DCHECK_EQ(r.reg_class(), reg_class_for(type)); -+ } -+ explicit VarState(ValueType type, int32_t i32_const, int offset) -+ : loc_(kIntConst), -+ type_(type), -+ i32_const_(i32_const), -+ spill_offset_(offset) { -+ DCHECK(type_ == kWasmI32 || type_ == kWasmI64); -+ } -+ -+ bool operator==(const VarState& other) const { -+ if (loc_ != other.loc_) return false; -+ if (type_ != other.type_) return false; -+ switch (loc_) { -+ case kStack: -+ return true; -+ case kRegister: -+ return reg_ == other.reg_; -+ case kIntConst: -+ return i32_const_ == other.i32_const_; -+ } -+ UNREACHABLE(); -+ } -+ -+ bool is_stack() const { return loc_ == kStack; } -+ bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); } -+ bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); } -+ bool is_reg() const { return loc_ == kRegister; } -+ bool is_const() const { return loc_ == kIntConst; } -+ -+ ValueType type() const { return type_; } -+ -+ Location loc() const { return loc_; } -+ -+ int32_t i32_const() const { -+ DCHECK_EQ(loc_, kIntConst); -+ return i32_const_; -+ } -+ WasmValue constant() const { -+ DCHECK(type_ == kWasmI32 || type_ == kWasmI64); -+ DCHECK_EQ(loc_, kIntConst); -+ return type_ == kWasmI32 ? WasmValue(i32_const_) -+ : WasmValue(int64_t{i32_const_}); -+ } -+ -+ int offset() const { return spill_offset_; } -+ -+ Register gp_reg() const { return reg().gp(); } -+ DoubleRegister fp_reg() const { return reg().fp(); } -+ LiftoffRegister reg() const { -+ DCHECK_EQ(loc_, kRegister); -+ return reg_; -+ } -+ RegClass reg_class() const { return reg().reg_class(); } -+ -+ void MakeStack() { loc_ = kStack; } -+ -+ void MakeRegister(LiftoffRegister r) { -+ reg_ = r; -+ loc_ = kRegister; -+ } -+ -+ // Copy src to this, except for offset, since src and this could have been -+ // from different stack states. -+ void Copy(VarState src) { -+ loc_ = src.loc(); -+ type_ = src.type(); -+ if (loc_ == kRegister) { -+ reg_ = src.reg(); -+ } else if (loc_ == kIntConst) { -+ i32_const_ = src.i32_const(); -+ } -+ } -+ -+ private: -+ Location loc_; -+ // TODO(wasm): This is redundant, the decoder already knows the type of each -+ // stack value. Try to collapse. -+ ValueType type_; -+ -+ union { -+ LiftoffRegister reg_; // used if loc_ == kRegister -+ int32_t i32_const_; // used if loc_ == kIntConst -+ }; -+ int spill_offset_; -+ }; -+ -+ ASSERT_TRIVIALLY_COPYABLE(VarState); -+ -+ struct CacheState { -+ // Allow default construction, move construction, and move assignment. -+ CacheState() = default; -+ CacheState(CacheState&&) V8_NOEXCEPT = default; -+ CacheState& operator=(CacheState&&) V8_NOEXCEPT = default; -+ -+ base::SmallVector stack_state; -+ LiftoffRegList used_registers; -+ uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0}; -+ LiftoffRegList last_spilled_regs; -+ -+ bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const { -+ if (kNeedI64RegPair && rc == kGpRegPair) { -+ LiftoffRegList available_regs = -+ kGpCacheRegList.MaskOut(used_registers).MaskOut(pinned); -+ return available_regs.GetNumRegsSet() >= 2; -+ } else if (kNeedS128RegPair && rc == kFpRegPair) { -+ LiftoffRegList available_regs = -+ kFpCacheRegList.MaskOut(used_registers).MaskOut(pinned); -+ return available_regs.HasAdjacentFpRegsSet(); -+ } -+ DCHECK(rc == kGpReg || rc == kFpReg); -+ LiftoffRegList candidates = GetCacheRegList(rc); -+ return has_unused_register(candidates, pinned); -+ } -+ -+ bool has_unused_register(LiftoffRegList candidates, -+ LiftoffRegList pinned = {}) const { -+ LiftoffRegList available_regs = -+ candidates.MaskOut(used_registers).MaskOut(pinned); -+ return !available_regs.is_empty(); -+ } -+ -+ LiftoffRegister unused_register(RegClass rc, -+ LiftoffRegList pinned = {}) const { -+ if (kNeedI64RegPair && rc == kGpRegPair) { -+ Register low = pinned.set(unused_register(kGpReg, pinned)).gp(); -+ Register high = unused_register(kGpReg, pinned).gp(); -+ return LiftoffRegister::ForPair(low, high); -+ } else if (kNeedS128RegPair && rc == kFpRegPair) { -+ LiftoffRegList available_regs = -+ kFpCacheRegList.MaskOut(used_registers).MaskOut(pinned); -+ DoubleRegister low = -+ available_regs.GetAdjacentFpRegsSet().GetFirstRegSet().fp(); -+ DCHECK(is_free(LiftoffRegister::ForFpPair(low))); -+ return LiftoffRegister::ForFpPair(low); -+ } -+ DCHECK(rc == kGpReg || rc == kFpReg); -+ LiftoffRegList candidates = GetCacheRegList(rc); -+ return unused_register(candidates, pinned); -+ } -+ -+ LiftoffRegister unused_register(LiftoffRegList candidates, -+ LiftoffRegList pinned = {}) const { -+ LiftoffRegList available_regs = -+ candidates.MaskOut(used_registers).MaskOut(pinned); -+ return available_regs.GetFirstRegSet(); -+ } -+ -+ void inc_used(LiftoffRegister reg) { -+ if (reg.is_pair()) { -+ inc_used(reg.low()); -+ inc_used(reg.high()); -+ return; -+ } -+ used_registers.set(reg); -+ DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]); -+ ++register_use_count[reg.liftoff_code()]; -+ } -+ -+ // Returns whether this was the last use. -+ void dec_used(LiftoffRegister reg) { -+ DCHECK(is_used(reg)); -+ if (reg.is_pair()) { -+ dec_used(reg.low()); -+ dec_used(reg.high()); -+ return; -+ } -+ int code = reg.liftoff_code(); -+ DCHECK_LT(0, register_use_count[code]); -+ if (--register_use_count[code] == 0) used_registers.clear(reg); -+ } -+ -+ bool is_used(LiftoffRegister reg) const { -+ if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high()); -+ bool used = used_registers.has(reg); -+ DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0); -+ return used; -+ } -+ -+ uint32_t get_use_count(LiftoffRegister reg) const { -+ if (reg.is_pair()) { -+ DCHECK_EQ(register_use_count[reg.low().liftoff_code()], -+ register_use_count[reg.high().liftoff_code()]); -+ reg = reg.low(); -+ } -+ DCHECK_GT(arraysize(register_use_count), reg.liftoff_code()); -+ return register_use_count[reg.liftoff_code()]; -+ } -+ -+ void clear_used(LiftoffRegister reg) { -+ register_use_count[reg.liftoff_code()] = 0; -+ used_registers.clear(reg); -+ } -+ -+ bool is_free(LiftoffRegister reg) const { return !is_used(reg); } -+ -+ void reset_used_registers() { -+ used_registers = {}; -+ memset(register_use_count, 0, sizeof(register_use_count)); -+ } -+ -+ LiftoffRegister GetNextSpillReg(LiftoffRegList candidates, -+ LiftoffRegList pinned = {}) { -+ LiftoffRegList unpinned = candidates.MaskOut(pinned); -+ DCHECK(!unpinned.is_empty()); -+ // This method should only be called if none of the candidates is free. -+ DCHECK(unpinned.MaskOut(used_registers).is_empty()); -+ LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs); -+ if (unspilled.is_empty()) { -+ unspilled = unpinned; -+ last_spilled_regs = {}; -+ } -+ LiftoffRegister reg = unspilled.GetFirstRegSet(); -+ return reg; -+ } -+ -+ // TODO(clemensb): Don't copy the full parent state (this makes us N^2). -+ void InitMerge(const CacheState& source, uint32_t num_locals, -+ uint32_t arity, uint32_t stack_depth); -+ -+ void Steal(const CacheState& source); -+ -+ void Split(const CacheState& source); -+ -+ uint32_t stack_height() const { -+ return static_cast(stack_state.size()); -+ } -+ -+ private: -+ // Make the copy assignment operator private (to be used from {Split()}). -+ CacheState& operator=(const CacheState&) V8_NOEXCEPT = default; -+ // Disallow copy construction. -+ CacheState(const CacheState&) = delete; -+ }; -+ -+ explicit LiftoffAssembler(std::unique_ptr); -+ ~LiftoffAssembler() override; -+ -+ LiftoffRegister PopToRegister(LiftoffRegList pinned = {}); -+ -+ // Returns the register which holds the value of stack slot {index}. If the -+ // value is not stored in a register yet, a register is allocated for it. The -+ // register is then assigned to the stack slot. The value stack height is not -+ // modified. The top of the stack is index 0, i.e. {PopToRegister()} and -+ // {PeekToRegister(0)} should result in the same register. -+ // {PeekToRegister} already decrements the used count of the register of the -+ // stack slot. Therefore the register must not be popped by {PopToRegister} -+ // but discarded with {stack_state.pop_back(count)}. -+ LiftoffRegister PeekToRegister(int index, LiftoffRegList pinned); -+ -+ // Ensure that the loop inputs are either in a register or spilled to the -+ // stack, so that we can merge different values on the back-edge. -+ void PrepareLoopArgs(int num); -+ -+ int NextSpillOffset(ValueType type) { -+ int offset = TopSpillOffset() + SlotSizeForType(type); -+ if (NeedsAlignment(type)) { -+ offset = RoundUp(offset, SlotSizeForType(type)); -+ } -+ return offset; -+ } -+ -+ int TopSpillOffset() const { -+ return cache_state_.stack_state.empty() -+ ? StaticStackFrameSize() -+ : cache_state_.stack_state.back().offset(); -+ } -+ -+ void PushRegister(ValueType type, LiftoffRegister reg) { -+ DCHECK_EQ(reg_class_for(type), reg.reg_class()); -+ cache_state_.inc_used(reg); -+ cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type)); -+ } -+ -+ void PushConstant(ValueType type, int32_t i32_const) { -+ DCHECK(type == kWasmI32 || type == kWasmI64); -+ cache_state_.stack_state.emplace_back(type, i32_const, -+ NextSpillOffset(type)); -+ } -+ -+ void PushStack(ValueType type) { -+ cache_state_.stack_state.emplace_back(type, NextSpillOffset(type)); -+ } -+ -+ void SpillRegister(LiftoffRegister); -+ -+ uint32_t GetNumUses(LiftoffRegister reg) { -+ return cache_state_.get_use_count(reg); -+ } -+ -+ // Get an unused register for class {rc}, reusing one of {try_first} if -+ // possible. -+ LiftoffRegister GetUnusedRegister( -+ RegClass rc, std::initializer_list try_first, -+ LiftoffRegList pinned) { -+ for (LiftoffRegister reg : try_first) { -+ DCHECK_EQ(reg.reg_class(), rc); -+ if (cache_state_.is_free(reg)) return reg; -+ } -+ return GetUnusedRegister(rc, pinned); -+ } -+ -+ // Get an unused register for class {rc}, potentially spilling to free one. -+ LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) { -+ if (kNeedI64RegPair && rc == kGpRegPair) { -+ LiftoffRegList candidates = kGpCacheRegList; -+ Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp(); -+ Register high = GetUnusedRegister(candidates, pinned).gp(); -+ return LiftoffRegister::ForPair(low, high); -+ } else if (kNeedS128RegPair && rc == kFpRegPair) { -+ // kFpRegPair specific logic here because we need adjacent registers, not -+ // just any two registers (like kGpRegPair). -+ if (cache_state_.has_unused_register(rc, pinned)) { -+ return cache_state_.unused_register(rc, pinned); -+ } -+ DoubleRegister low_fp = SpillAdjacentFpRegisters(pinned).fp(); -+ return LiftoffRegister::ForFpPair(low_fp); -+ } -+ DCHECK(rc == kGpReg || rc == kFpReg); -+ LiftoffRegList candidates = GetCacheRegList(rc); -+ return GetUnusedRegister(candidates, pinned); -+ } -+ -+ // Get an unused register of {candidates}, potentially spilling to free one. -+ LiftoffRegister GetUnusedRegister(LiftoffRegList candidates, -+ LiftoffRegList pinned = {}) { -+ if (cache_state_.has_unused_register(candidates, pinned)) { -+ return cache_state_.unused_register(candidates, pinned); -+ } -+ return SpillOneRegister(candidates, pinned); -+ } -+ -+ void MergeFullStackWith(const CacheState& target, const CacheState& source); -+ void MergeStackWith(const CacheState& target, uint32_t arity); -+ -+ void Spill(VarState* slot); -+ void SpillLocals(); -+ void SpillAllRegisters(); -+ -+ // Clear any uses of {reg} in both the cache and in {possible_uses}. -+ // Any use in the stack is spilled. If any register in {possible_uses} matches -+ // {reg}, then the content of {reg} is moved to a new temporary register, and -+ // all matches in {possible_uses} are rewritten to that temporary register. -+ void ClearRegister(Register reg, -+ std::initializer_list possible_uses, -+ LiftoffRegList pinned); -+ -+ // Spills all passed registers. -+ template -+ void SpillRegisters(Regs... regs) { -+ for (LiftoffRegister r : {LiftoffRegister(regs)...}) { -+ if (cache_state()->is_used(r)) SpillRegister(r); -+ } -+ } -+ -+ // Call this method whenever spilling something, such that the number of used -+ // spill slot can be tracked and the stack frame will be allocated big enough. -+ void RecordUsedSpillOffset(int offset) { -+ if (offset >= max_used_spill_offset_) max_used_spill_offset_ = offset; -+ } -+ -+ // Load parameters into the right registers / stack slots for the call. -+ void PrepareBuiltinCall(const FunctionSig* sig, -+ compiler::CallDescriptor* call_descriptor, -+ std::initializer_list params); -+ -+ // Load parameters into the right registers / stack slots for the call. -+ // Move {*target} into another register if needed and update {*target} to that -+ // register, or {no_reg} if target was spilled to the stack. -+ void PrepareCall(const FunctionSig*, compiler::CallDescriptor*, -+ Register* target = nullptr, -+ Register* target_instance = nullptr); -+ // Process return values of the call. -+ void FinishCall(const FunctionSig*, compiler::CallDescriptor*); -+ -+ // Move {src} into {dst}. {src} and {dst} must be different. -+ void Move(LiftoffRegister dst, LiftoffRegister src, ValueType); -+ -+ // Parallel register move: For a list of tuples , move the -+ // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore -+ // that tuple. -+ struct ParallelRegisterMoveTuple { -+ LiftoffRegister dst; -+ LiftoffRegister src; -+ ValueType type; -+ template -+ ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type) -+ : dst(dst), src(src), type(type) {} -+ }; -+ void ParallelRegisterMove(Vector); -+ -+ void MoveToReturnLocations(const FunctionSig*, -+ compiler::CallDescriptor* descriptor); -+ -+#ifdef ENABLE_SLOW_DCHECKS -+ // Validate that the register use counts reflect the state of the cache. -+ bool ValidateCacheState() const; -+#endif -+ -+ //////////////////////////////////// -+ // Platform-specific part. // -+ //////////////////////////////////// -+ -+ // This function emits machine code to prepare the stack frame, before the -+ // size of the stack frame is known. It returns an offset in the machine code -+ // which can later be patched (via {PatchPrepareStackFrame)} when the size of -+ // the frame is known. -+ inline int PrepareStackFrame(); -+ inline void PatchPrepareStackFrame(int offset, int frame_size); -+ inline void FinishCode(); -+ inline void AbortCompilation(); -+ inline static constexpr int StaticStackFrameSize(); -+ inline static int SlotSizeForType(ValueType type); -+ inline static bool NeedsAlignment(ValueType type); -+ -+ inline void LoadConstant(LiftoffRegister, WasmValue, -+ RelocInfo::Mode rmode = RelocInfo::NONE); -+ inline void LoadFromInstance(Register dst, uint32_t offset, int size); -+ inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset); -+ inline void SpillInstance(Register instance); -+ inline void FillInstanceInto(Register dst); -+ inline void LoadTaggedPointer(Register dst, Register src_addr, -+ Register offset_reg, uint32_t offset_imm, -+ LiftoffRegList pinned); -+ inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, -+ uint32_t offset_imm, LoadType type, LiftoffRegList pinned, -+ uint32_t* protected_load_pc = nullptr, -+ bool is_load_mem = false); -+ inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm, -+ LiftoffRegister src, StoreType type, LiftoffRegList pinned, -+ uint32_t* protected_store_pc = nullptr, -+ bool is_store_mem = false); -+ inline void AtomicLoad(LiftoffRegister dst, Register src_addr, -+ Register offset_reg, uint32_t offset_imm, -+ LoadType type, LiftoffRegList pinned); -+ inline void AtomicStore(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister src, -+ StoreType type, LiftoffRegList pinned); -+ -+ inline void AtomicAdd(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type); -+ -+ inline void AtomicSub(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type); -+ -+ inline void AtomicAnd(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type); -+ -+ inline void AtomicOr(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type); -+ -+ inline void AtomicXor(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type); -+ -+ inline void AtomicExchange(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type); -+ -+ inline void AtomicCompareExchange(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, -+ LiftoffRegister expected, -+ LiftoffRegister new_value, -+ LiftoffRegister value, StoreType type); -+ -+ inline void AtomicFence(); -+ -+ inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, -+ ValueType); -+ inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, -+ ValueType); -+ inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, -+ ValueType); -+ -+ inline void Move(Register dst, Register src, ValueType); -+ inline void Move(DoubleRegister dst, DoubleRegister src, ValueType); -+ -+ inline void Spill(int offset, LiftoffRegister, ValueType); -+ inline void Spill(int offset, WasmValue); -+ inline void Fill(LiftoffRegister, int offset, ValueType); -+ // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e. -+ // 4 bytes on the stack holding half of a 64-bit value. -+ inline void FillI64Half(Register, int offset, RegPairHalf); -+ inline void FillStackSlotsWithZero(int start, int size); -+ -+ // i32 binops. -+ inline void emit_i32_add(Register dst, Register lhs, Register rhs); -+ inline void emit_i32_addi(Register dst, Register lhs, int32_t imm); -+ inline void emit_i32_sub(Register dst, Register lhs, Register rhs); -+ inline void emit_i32_mul(Register dst, Register lhs, Register rhs); -+ inline void emit_i32_divs(Register dst, Register lhs, Register rhs, -+ Label* trap_div_by_zero, -+ Label* trap_div_unrepresentable); -+ inline void emit_i32_divu(Register dst, Register lhs, Register rhs, -+ Label* trap_div_by_zero); -+ inline void emit_i32_rems(Register dst, Register lhs, Register rhs, -+ Label* trap_rem_by_zero); -+ inline void emit_i32_remu(Register dst, Register lhs, Register rhs, -+ Label* trap_rem_by_zero); -+ inline void emit_i32_and(Register dst, Register lhs, Register rhs); -+ inline void emit_i32_andi(Register dst, Register lhs, int32_t imm); -+ inline void emit_i32_or(Register dst, Register lhs, Register rhs); -+ inline void emit_i32_ori(Register dst, Register lhs, int32_t imm); -+ inline void emit_i32_xor(Register dst, Register lhs, Register rhs); -+ inline void emit_i32_xori(Register dst, Register lhs, int32_t imm); -+ inline void emit_i32_shl(Register dst, Register src, Register amount); -+ inline void emit_i32_shli(Register dst, Register src, int32_t amount); -+ inline void emit_i32_sar(Register dst, Register src, Register amount); -+ inline void emit_i32_sari(Register dst, Register src, int32_t amount); -+ inline void emit_i32_shr(Register dst, Register src, Register amount); -+ inline void emit_i32_shri(Register dst, Register src, int32_t amount); -+ -+ // i32 unops. -+ inline void emit_i32_clz(Register dst, Register src); -+ inline void emit_i32_ctz(Register dst, Register src); -+ inline bool emit_i32_popcnt(Register dst, Register src); -+ -+ // i64 binops. -+ inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t imm); -+ inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, Label* trap_div_by_zero, -+ Label* trap_div_unrepresentable); -+ inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, Label* trap_div_by_zero); -+ inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, Label* trap_rem_by_zero); -+ inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, Label* trap_rem_by_zero); -+ inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t imm); -+ inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t imm); -+ inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t imm); -+ inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, -+ Register amount); -+ inline void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, -+ int32_t amount); -+ inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, -+ Register amount); -+ inline void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, -+ int32_t amount); -+ inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, -+ Register amount); -+ inline void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, -+ int32_t amount); -+ -+ // i64 unops. -+ inline void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src); -+ inline bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src); -+ -+ inline void emit_u32_to_intptr(Register dst, Register src); -+ -+ inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) { -+ if (kSystemPointerSize == 8) { -+ emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), -+ LiftoffRegister(rhs)); -+ } else { -+ emit_i32_add(dst, lhs, rhs); -+ } -+ } -+ inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) { -+ if (kSystemPointerSize == 8) { -+ emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs), -+ LiftoffRegister(rhs)); -+ } else { -+ emit_i32_sub(dst, lhs, rhs); -+ } -+ } -+ inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) { -+ if (kSystemPointerSize == 8) { -+ emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs), -+ LiftoffRegister(rhs)); -+ } else { -+ emit_i32_and(dst, lhs, rhs); -+ } -+ } -+ inline void emit_ptrsize_shri(Register dst, Register src, int amount) { -+ if (kSystemPointerSize == 8) { -+ emit_i64_shri(LiftoffRegister(dst), LiftoffRegister(src), amount); -+ } else { -+ emit_i32_shri(dst, src, amount); -+ } -+ } -+ -+ inline void emit_ptrsize_addi(Register dst, Register lhs, int32_t imm) { -+ if (kSystemPointerSize == 8) { -+ emit_i64_addi(LiftoffRegister(dst), LiftoffRegister(lhs), imm); -+ } else { -+ emit_i32_addi(dst, lhs, imm); -+ } -+ } -+ -+ // f32 binops. -+ inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ -+ // f32 unops. -+ inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src); -+ inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f32_floor(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src); -+ inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src); -+ -+ // f64 binops. -+ inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ inline void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs); -+ -+ // f64 unops. -+ inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src); -+ inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src); -+ inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src); -+ inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src); -+ -+ inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, -+ LiftoffRegister src, Label* trap = nullptr); -+ -+ inline void emit_i32_signextend_i8(Register dst, Register src); -+ inline void emit_i32_signextend_i16(Register dst, Register src); -+ inline void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src); -+ -+ inline void emit_jump(Label*); -+ inline void emit_jump(Register); -+ -+ inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs, -+ Register rhs = no_reg); -+ // Set {dst} to 1 if condition holds, 0 otherwise. -+ inline void emit_i32_eqz(Register dst, Register src); -+ inline void emit_i32_set_cond(Condition, Register dst, Register lhs, -+ Register rhs); -+ inline void emit_i64_eqz(Register dst, LiftoffRegister src); -+ inline void emit_i64_set_cond(Condition condition, Register dst, -+ LiftoffRegister lhs, LiftoffRegister rhs); -+ inline void emit_f32_set_cond(Condition condition, Register dst, -+ DoubleRegister lhs, DoubleRegister rhs); -+ inline void emit_f64_set_cond(Condition condition, Register dst, -+ DoubleRegister lhs, DoubleRegister rhs); -+ -+ inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_s128_not(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, -+ LiftoffRegister src2, LiftoffRegister mask); -+ inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs); -+ inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_add_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_add_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_sub_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_sub_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs); -+ inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_add_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_add_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_sub_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_sub_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs); -+ inline void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs); -+ inline void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, -+ LiftoffRegister src); -+ inline void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_rounding_average_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i16x8_rounding_average_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs); -+ inline void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src); -+ inline void emit_i8x16_extract_lane_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_i8x16_extract_lane_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_i16x8_extract_lane_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_i16x8_extract_lane_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, -+ uint8_t imm_lane_idx); -+ inline void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx); -+ inline void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx); -+ inline void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx); -+ inline void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx); -+ inline void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx); -+ inline void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx); -+ -+ inline void StackCheck(Label* ool_code, Register limit_address); -+ -+ inline void CallTrapCallbackForTesting(); -+ -+ inline void AssertUnreachable(AbortReason reason); -+ -+ inline void PushRegisters(LiftoffRegList); -+ inline void PopRegisters(LiftoffRegList); -+ -+ inline void DropStackSlotsAndRet(uint32_t num_stack_slots); -+ -+ // Execute a C call. Arguments are pushed to the stack and a pointer to this -+ // region is passed to the C function. If {out_argument_type != kWasmStmt}, -+ // this is the return value of the C function, stored in {rets[0]}. Further -+ // outputs (specified in {sig->returns()}) are read from the buffer and stored -+ // in the remaining {rets} registers. -+ inline void CallC(const FunctionSig* sig, const LiftoffRegister* args, -+ const LiftoffRegister* rets, ValueType out_argument_type, -+ int stack_bytes, ExternalReference ext_ref); -+ -+ inline void CallNativeWasmCode(Address addr); -+ // Indirect call: If {target == no_reg}, then pop the target from the stack. -+ inline void CallIndirect(const FunctionSig* sig, -+ compiler::CallDescriptor* call_descriptor, -+ Register target); -+ inline void CallRuntimeStub(WasmCode::RuntimeStubId sid); -+ -+ // Reserve space in the current frame, store address to space in {addr}. -+ inline void AllocateStackSlot(Register addr, uint32_t size); -+ inline void DeallocateStackSlot(uint32_t size); -+ -+ //////////////////////////////////// -+ // End of platform-specific part. // -+ //////////////////////////////////// -+ -+ uint32_t num_locals() const { return num_locals_; } -+ void set_num_locals(uint32_t num_locals); -+ -+ int GetTotalFrameSlotCount() const { -+ // TODO(zhin): Temporary for migration from index to offset. -+ return ((max_used_spill_offset_ + kStackSlotSize - 1) / kStackSlotSize); -+ } -+ -+ int GetTotalFrameSize() const { return max_used_spill_offset_; } -+ -+ ValueType local_type(uint32_t index) { -+ DCHECK_GT(num_locals_, index); -+ ValueType* locals = -+ num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_; -+ return locals[index]; -+ } -+ -+ void set_local_type(uint32_t index, ValueType type) { -+ ValueType* locals = -+ num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_; -+ locals[index] = type; -+ } -+ -+ CacheState* cache_state() { return &cache_state_; } -+ const CacheState* cache_state() const { return &cache_state_; } -+ -+ bool did_bailout() { return bailout_reason_ != kSuccess; } -+ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; } -+ const char* bailout_detail() const { return bailout_detail_; } -+ -+ void bailout(LiftoffBailoutReason reason, const char* detail) { -+ DCHECK_NE(kSuccess, reason); -+ if (bailout_reason_ != kSuccess) return; -+ AbortCompilation(); -+ bailout_reason_ = reason; -+ bailout_detail_ = detail; -+ } -+ -+ private: -+ LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned); -+ LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half); -+ -+ uint32_t num_locals_ = 0; -+ static constexpr uint32_t kInlineLocalTypes = 8; -+ union { -+ ValueType local_types_[kInlineLocalTypes]; -+ ValueType* more_local_types_; -+ }; -+ static_assert(sizeof(ValueType) == 4, -+ "Reconsider this inlining if ValueType gets bigger"); -+ CacheState cache_state_; -+ int max_used_spill_offset_ = StaticStackFrameSize(); -+ LiftoffBailoutReason bailout_reason_ = kSuccess; -+ const char* bailout_detail_ = nullptr; -+ -+ LiftoffRegister SpillOneRegister(LiftoffRegList candidates, -+ LiftoffRegList pinned); -+ // Spill one or two fp registers to get a pair of adjacent fp registers. -+ LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned); -+}; -+ -+std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState); -+ -+// ======================================================================= -+// Partially platform-independent implementations of the platform-dependent -+// part. -+ -+#ifdef V8_TARGET_ARCH_32_BIT -+ -+namespace liftoff { -+template -+void EmitI64IndependentHalfOperation(LiftoffAssembler* assm, -+ LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) { -+ // If {dst.low_gp()} does not overlap with {lhs.high_gp()} or {rhs.high_gp()}, -+ // just first compute the lower half, then the upper half. -+ if (dst.low() != lhs.high() && dst.low() != rhs.high()) { -+ (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp()); -+ (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp()); -+ return; -+ } -+ // If {dst.high_gp()} does not overlap with {lhs.low_gp()} or {rhs.low_gp()}, -+ // we can compute this the other way around. -+ if (dst.high() != lhs.low() && dst.high() != rhs.low()) { -+ (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp()); -+ (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp()); -+ return; -+ } -+ // Otherwise, we need a temporary register. -+ Register tmp = -+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); -+ (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp()); -+ (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp()); -+ assm->Move(dst.low_gp(), tmp, kWasmI32); -+} -+ -+template -+void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm, -+ LiftoffRegister dst, -+ LiftoffRegister lhs, int32_t imm) { -+ // Top half of the immediate sign extended, either 0 or -1. -+ int32_t sign_extend = imm < 0 ? -1 : 0; -+ // If {dst.low_gp()} does not overlap with {lhs.high_gp()}, -+ // just first compute the lower half, then the upper half. -+ if (dst.low() != lhs.high()) { -+ (assm->*op)(dst.low_gp(), lhs.low_gp(), imm); -+ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend); -+ return; -+ } -+ // If {dst.high_gp()} does not overlap with {lhs.low_gp()}, -+ // we can compute this the other way around. -+ if (dst.high() != lhs.low()) { -+ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend); -+ (assm->*op)(dst.low_gp(), lhs.low_gp(), imm); -+ return; -+ } -+ // Otherwise, we need a temporary register. -+ Register tmp = -+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp(); -+ (assm->*op)(tmp, lhs.low_gp(), imm); -+ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend); -+ assm->Move(dst.low_gp(), tmp, kWasmI32); -+} -+} // namespace liftoff -+ -+void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) { -+ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>( -+ this, dst, lhs, rhs); -+} -+ -+void LiftoffAssembler::emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t imm) { -+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_andi>( -+ this, dst, lhs, imm); -+} -+ -+void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) { -+ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>( -+ this, dst, lhs, rhs); -+} -+ -+void LiftoffAssembler::emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t imm) { -+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_ori>( -+ this, dst, lhs, imm); -+} -+ -+void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) { -+ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>( -+ this, dst, lhs, rhs); -+} -+ -+void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t imm) { -+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_xori>( -+ this, dst, lhs, imm); -+} -+ -+#endif // V8_TARGET_ARCH_32_BIT -+ -+// End of the partially platform-independent implementations of the -+// platform-dependent part. -+// ======================================================================= -+ -+class LiftoffStackSlots { -+ public: -+ explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {} -+ -+ void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset, -+ RegPairHalf half) { -+ slots_.emplace_back(src, src_offset, half); -+ } -+ void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); } -+ -+ inline void Construct(); -+ -+ private: -+ struct Slot { -+ // Allow move construction. -+ Slot(Slot&&) V8_NOEXCEPT = default; -+ Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset, -+ RegPairHalf half) -+ : src_(src), src_offset_(src_offset), half_(half) {} -+ explicit Slot(const LiftoffAssembler::VarState& src) -+ : src_(src), half_(kLowWord) {} -+ -+ const LiftoffAssembler::VarState src_; -+ uint32_t src_offset_ = 0; -+ RegPairHalf half_; -+ }; -+ -+ base::SmallVector slots_; -+ LiftoffAssembler* const asm_; -+ -+ DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots); -+}; -+ -+} // namespace wasm -+} // namespace internal -+} // namespace v8 -+ -+// Include platform specific implementation. -+#if V8_TARGET_ARCH_IA32 -+#include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h" -+#elif V8_TARGET_ARCH_X64 -+#include "src/wasm/baseline/x64/liftoff-assembler-x64.h" -+#elif V8_TARGET_ARCH_ARM64 -+#include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h" -+#elif V8_TARGET_ARCH_ARM -+#include "src/wasm/baseline/arm/liftoff-assembler-arm.h" -+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 -+#include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h" -+#elif V8_TARGET_ARCH_MIPS -+#include "src/wasm/baseline/mips/liftoff-assembler-mips.h" -+#elif V8_TARGET_ARCH_MIPS64 -+#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" -+#elif V8_TARGET_ARCH_S390 -+#include "src/wasm/baseline/s390/liftoff-assembler-s390.h" -+#else -+#error Unsupported architecture. -+#endif -+ -+#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ -diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h -new file mode 100644 -index 00000000..085cc34b ---- /dev/null -+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h -@@ -0,0 +1,1849 @@ -+// Copyright 2017 the V8 project authors. All rights reserved. -+// Use of this source code is governed by a BSD-style license that can be -+// found in the LICENSE file. -+ -+#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_ -+#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_ -+ -+#include "src/wasm/baseline/liftoff-assembler.h" -+ -+namespace v8 { -+namespace internal { -+namespace wasm { -+ -+namespace liftoff { -+ -+// Liftoff Frames. -+// -+// slot Frame -+// +--------------------+--------------------------- -+// n+4 | optional padding slot to keep the stack 16 byte aligned. -+// n+3 | parameter n | -+// ... | ... | -+// 4 | parameter 1 | or parameter 2 -+// 3 | parameter 0 | or parameter 1 -+// 2 | (result address) | or parameter 0 -+// -----+--------------------+--------------------------- -+// 1 | return addr (ra) | -+// 0 | previous frame (fp)| -+// -----+--------------------+ <-- frame ptr (fp) -+// -1 | 0xa: WASM | -+// -2 | instance | -+// -----+--------------------+--------------------------- -+// -3 | slot 0 | ^ -+// -4 | slot 1 | | -+// | | Frame slots -+// | | | -+// | | v -+// | optional padding slot to keep the stack 16 byte aligned. -+// -----+--------------------+ <-- stack ptr (sp) -+// -+ -+// fp-8 holds the stack marker, fp-16 is the instance parameter. -+constexpr int kInstanceOffset = 16; -+ -+inline MemOperand GetStackSlot(int offset) { -+ return MemOperand(offset > 0 ? fp : sp, -offset); -+} -+ -+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } -+ -+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, -+ ValueType type) { -+ switch (type.kind()) { -+ case ValueType::kI32: -+ assm->Ld_w(dst.gp(), src); -+ break; -+ case ValueType::kI64: -+ assm->Ld_d(dst.gp(), src); -+ break; -+ case ValueType::kF32: -+ assm->Fld_s(dst.fp(), src); -+ break; -+ case ValueType::kF64: -+ assm->Fld_d(dst.fp(), src); -+ break; -+ case ValueType::kS128: -+ UNREACHABLE(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, -+ LiftoffRegister src, ValueType type) { -+ MemOperand dst(base, offset); -+ switch (type.kind()) { -+ case ValueType::kI32: -+ assm->St_w(src.gp(), dst); -+ break; -+ case ValueType::kI64: -+ assm->St_d(src.gp(), dst); -+ break; -+ case ValueType::kF32: -+ assm->Fst_s(src.fp(), dst); -+ break; -+ case ValueType::kF64: -+ assm->Fst_d(src.fp(), dst); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { -+ switch (type.kind()) { -+ case ValueType::kI32: -+ assm->addi_d(sp, sp, -kSystemPointerSize); -+ assm->St_w(reg.gp(), MemOperand(sp, 0)); -+ break; -+ case ValueType::kI64: -+ assm->push(reg.gp()); -+ break; -+ case ValueType::kF32: -+ assm->addi_d(sp, sp, -kSystemPointerSize); -+ assm->Fst_s(reg.fp(), MemOperand(sp, 0)); -+ break; -+ case ValueType::kF64: -+ assm->addi_d(sp, sp, -kSystemPointerSize); -+ assm->Fst_d(reg.fp(), MemOperand(sp, 0)); -+ break; -+ case ValueType::kS128: -+ UNREACHABLE(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+} // namespace liftoff -+ -+int LiftoffAssembler::PrepareStackFrame() { -+ int offset = pc_offset(); -+ // When constant that represents size of stack frame can't be represented -+ // as 16bit we need three instructions to add it to sp, so we reserve space -+ // for this case. -+ addi_d(sp, sp, 0); -+ nop(); -+ nop(); -+ return offset; -+} -+ -+void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) { -+ // We can't run out of space, just pass anything big enough to not cause the -+ // assembler to try to grow the buffer. -+ constexpr int kAvailableSpace = 256; -+ TurboAssembler patching_assembler( -+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, -+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); -+ // If bytes can be represented as 16bit, daddiu will be generated and two -+ // nops will stay untouched. Otherwise, lui-ori sequence will load it to -+ // register and, as third instruction, daddu will be generated. -+ patching_assembler.Add_d(sp, sp, Operand(-frame_size)); -+} -+ -+void LiftoffAssembler::FinishCode() {} -+ -+void LiftoffAssembler::AbortCompilation() {} -+ -+// static -+constexpr int LiftoffAssembler::StaticStackFrameSize() { -+ return liftoff::kInstanceOffset; -+} -+ -+int LiftoffAssembler::SlotSizeForType(ValueType type) { -+ switch (type.kind()) { -+ case ValueType::kS128: -+ return type.element_size_bytes(); -+ default: -+ return kStackSlotSize; -+ } -+} -+ -+bool LiftoffAssembler::NeedsAlignment(ValueType type) { -+ switch (type.kind()) { -+ case ValueType::kS128: -+ return true; -+ default: -+ // No alignment because all other types are kStackSlotSize. -+ return false; -+ } -+} -+ -+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, -+ RelocInfo::Mode rmode) { -+ switch (value.type().kind()) { -+ case ValueType::kI32: -+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); -+ break; -+ case ValueType::kI64: -+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); -+ break; -+ case ValueType::kF32: -+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); -+ break; -+ case ValueType::kF64: -+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, -+ int size) { -+ DCHECK_LE(offset, kMaxInt); -+ Ld_d(dst, liftoff::GetInstanceOperand()); -+ DCHECK(size == 4 || size == 8); -+ if (size == 4) { -+ Ld_w(dst, MemOperand(dst, offset)); -+ } else { -+ Ld_d(dst, MemOperand(dst, offset)); -+ } -+} -+ -+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, -+ uint32_t offset) { -+ LoadFromInstance(dst, offset, kTaggedSize); -+} -+ -+void LiftoffAssembler::SpillInstance(Register instance) { -+ St_d(instance, liftoff::GetInstanceOperand()); -+} -+ -+void LiftoffAssembler::FillInstanceInto(Register dst) { -+ Ld_d(dst, liftoff::GetInstanceOperand()); -+} -+ -+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, -+ Register offset_reg, -+ uint32_t offset_imm, -+ LiftoffRegList pinned) { -+ STATIC_ASSERT(kTaggedSize == kInt64Size); -+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm, -+ LoadType::kI64Load, pinned); -+} -+ -+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, -+ Register offset_reg, uint32_t offset_imm, -+ LoadType type, LiftoffRegList pinned, -+ uint32_t* protected_load_pc, bool is_load_mem) { -+ Register src = no_reg; -+ if (offset_reg != no_reg) { -+ src = GetUnusedRegister(kGpReg, pinned).gp(); -+ emit_ptrsize_add(src, src_addr, offset_reg); -+ } -+ MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm) -+ : MemOperand(src_addr, offset_imm); -+ -+ if (protected_load_pc) *protected_load_pc = pc_offset(); -+ switch (type.value()) { -+ case LoadType::kI32Load8U: -+ case LoadType::kI64Load8U: -+ Ld_bu(dst.gp(), src_op); -+ break; -+ case LoadType::kI32Load8S: -+ case LoadType::kI64Load8S: -+ Ld_b(dst.gp(), src_op); -+ break; -+ case LoadType::kI32Load16U: -+ case LoadType::kI64Load16U: -+ TurboAssembler::Ld_hu(dst.gp(), src_op); -+ break; -+ case LoadType::kI32Load16S: -+ case LoadType::kI64Load16S: -+ TurboAssembler::Ld_h(dst.gp(), src_op); -+ break; -+ case LoadType::kI64Load32U: -+ TurboAssembler::Ld_wu(dst.gp(), src_op); -+ break; -+ case LoadType::kI32Load: -+ case LoadType::kI64Load32S: -+ TurboAssembler::Ld_w(dst.gp(), src_op); -+ break; -+ case LoadType::kI64Load: -+ TurboAssembler::Ld_d(dst.gp(), src_op); -+ break; -+ case LoadType::kF32Load: -+ TurboAssembler::Fld_s(dst.fp(), src_op); -+ break; -+ case LoadType::kF64Load: -+ TurboAssembler::Fld_d(dst.fp(), src_op); -+ break; -+ case LoadType::kS128Load: -+ UNREACHABLE(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister src, -+ StoreType type, LiftoffRegList pinned, -+ uint32_t* protected_store_pc, bool is_store_mem) { -+ Register dst = no_reg; -+ MemOperand dst_op = MemOperand(dst_addr, offset_imm); -+ if (offset_reg != no_reg) { -+ if (is_store_mem) { -+ pinned.set(src); -+ } -+ dst = GetUnusedRegister(kGpReg, pinned).gp(); -+ emit_ptrsize_add(dst, dst_addr, offset_reg); -+ dst_op = MemOperand(dst, offset_imm); -+ } -+ -+ if (protected_store_pc) *protected_store_pc = pc_offset(); -+ switch (type.value()) { -+ case StoreType::kI32Store8: -+ case StoreType::kI64Store8: -+ St_b(src.gp(), dst_op); -+ break; -+ case StoreType::kI32Store16: -+ case StoreType::kI64Store16: -+ TurboAssembler::St_h(src.gp(), dst_op); -+ break; -+ case StoreType::kI32Store: -+ case StoreType::kI64Store32: -+ TurboAssembler::St_w(src.gp(), dst_op); -+ break; -+ case StoreType::kI64Store: -+ TurboAssembler::St_d(src.gp(), dst_op); -+ break; -+ case StoreType::kF32Store: -+ TurboAssembler::Fst_s(src.fp(), dst_op); -+ break; -+ case StoreType::kF64Store: -+ TurboAssembler::Fst_d(src.fp(), dst_op); -+ break; -+ case StoreType::kS128Store: -+ UNREACHABLE(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, -+ Register offset_reg, uint32_t offset_imm, -+ LoadType type, LiftoffRegList pinned) { -+ bailout(kAtomics, "AtomicLoad"); -+} -+ -+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister src, -+ StoreType type, LiftoffRegList pinned) { -+ bailout(kAtomics, "AtomicStore"); -+} -+ -+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type) { -+ bailout(kAtomics, "AtomicAdd"); -+} -+ -+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type) { -+ bailout(kAtomics, "AtomicSub"); -+} -+ -+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type) { -+ bailout(kAtomics, "AtomicAnd"); -+} -+ -+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type) { -+ bailout(kAtomics, "AtomicOr"); -+} -+ -+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, LiftoffRegister value, -+ LiftoffRegister result, StoreType type) { -+ bailout(kAtomics, "AtomicXor"); -+} -+ -+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, -+ uint32_t offset_imm, -+ LiftoffRegister value, -+ LiftoffRegister result, StoreType type) { -+ bailout(kAtomics, "AtomicExchange"); -+} -+ -+void LiftoffAssembler::AtomicCompareExchange( -+ Register dst_addr, Register offset_reg, uint32_t offset_imm, -+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result, -+ StoreType type) { -+ bailout(kAtomics, "AtomicCompareExchange"); -+} -+ -+void LiftoffAssembler::AtomicFence() { dbar(0); } -+ -+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, -+ uint32_t caller_slot_idx, -+ ValueType type) { -+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1)); -+ liftoff::Load(this, dst, src, type); -+} -+ -+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, -+ uint32_t caller_slot_idx, -+ ValueType type) { -+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1); -+ liftoff::Store(this, fp, offset, src, type); -+} -+ -+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, -+ ValueType type) { -+ DCHECK_NE(dst_offset, src_offset); -+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); -+ Fill(reg, src_offset, type); -+ Spill(dst_offset, reg, type); -+} -+ -+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { -+ DCHECK_NE(dst, src); -+ // TODO(ksreten): Handle different sizes here. -+ TurboAssembler::Move(dst, src); -+} -+ -+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, -+ ValueType type) { -+ DCHECK_NE(dst, src); -+ if (type != kWasmS128) { -+ TurboAssembler::Move(dst, src); -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { -+ RecordUsedSpillOffset(offset); -+ MemOperand dst = liftoff::GetStackSlot(offset); -+ switch (type.kind()) { -+ case ValueType::kI32: -+ St_w(reg.gp(), dst); -+ break; -+ case ValueType::kI64: -+ St_d(reg.gp(), dst); -+ break; -+ case ValueType::kF32: -+ Fst_s(reg.fp(), dst); -+ break; -+ case ValueType::kF64: -+ TurboAssembler::Fst_d(reg.fp(), dst); -+ break; -+ case ValueType::kS128: -+ UNREACHABLE(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void LiftoffAssembler::Spill(int offset, WasmValue value) { -+ RecordUsedSpillOffset(offset); -+ MemOperand dst = liftoff::GetStackSlot(offset); -+ switch (value.type().kind()) { -+ case ValueType::kI32: { -+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); -+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); -+ St_w(tmp.gp(), dst); -+ break; -+ } -+ case ValueType::kI64: { -+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); -+ TurboAssembler::li(tmp.gp(), value.to_i64()); -+ St_d(tmp.gp(), dst); -+ break; -+ } -+ default: -+ // kWasmF32 and kWasmF64 are unreachable, since those -+ // constants are not tracked. -+ UNREACHABLE(); -+ } -+} -+ -+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { -+ MemOperand src = liftoff::GetStackSlot(offset); -+ switch (type.kind()) { -+ case ValueType::kI32: -+ Ld_w(reg.gp(), src); -+ break; -+ case ValueType::kI64: -+ Ld_d(reg.gp(), src); -+ break; -+ case ValueType::kF32: -+ Fld_s(reg.fp(), src); -+ break; -+ case ValueType::kF64: -+ TurboAssembler::Fld_d(reg.fp(), src); -+ break; -+ case ValueType::kS128: -+ UNREACHABLE(); -+ break; -+ default: -+ UNREACHABLE(); -+ } -+} -+ -+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) { -+ UNREACHABLE(); -+} -+ -+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { -+ DCHECK_LT(0, size); -+ RecordUsedSpillOffset(start + size); -+ -+ if (size <= 12 * kStackSlotSize) { -+ // Special straight-line code for up to 12 slots. Generates one -+ // instruction per slot (<= 12 instructions total). -+ uint32_t remainder = size; -+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) { -+ St_d(zero_reg, liftoff::GetStackSlot(start + remainder)); -+ } -+ DCHECK(remainder == 4 || remainder == 0); -+ if (remainder) { -+ St_w(zero_reg, liftoff::GetStackSlot(start + remainder)); -+ } -+ } else { -+ // General case for bigger counts (12 instructions). -+ // Use a0 for start address (inclusive), a1 for end address (exclusive). -+ Push(a1, a0); -+ Add_d(a0, fp, Operand(-start - size)); -+ Add_d(a1, fp, Operand(-start)); -+ -+ Label loop; -+ bind(&loop); -+ St_d(zero_reg, MemOperand(a0, kSystemPointerSize)); -+ addi_d(a0, a0, kSystemPointerSize); -+ BranchShort(&loop, ne, a0, Operand(a1)); -+ -+ Pop(a1, a0); -+ } -+} -+ -+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { -+ TurboAssembler::Clz_d(dst.gp(), src.gp()); -+} -+ -+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { -+ TurboAssembler::Ctz_d(dst.gp(), src.gp()); -+} -+ -+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, -+ LiftoffRegister src) { -+ TurboAssembler::Popcnt_d(dst.gp(), src.gp()); -+ return true; -+} -+ -+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { -+ TurboAssembler::Mul_w(dst, lhs, rhs); -+} -+ -+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, -+ Label* trap_div_by_zero, -+ Label* trap_div_unrepresentable) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); -+ -+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. -+ TurboAssembler::li(kScratchReg, 1); -+ TurboAssembler::li(kScratchReg2, 1); -+ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); -+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); -+ add_d(kScratchReg, kScratchReg, kScratchReg2); -+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, -+ Operand(zero_reg)); -+ -+ TurboAssembler::Div_w(dst, lhs, rhs); -+} -+ -+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, -+ Label* trap_div_by_zero) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); -+ TurboAssembler::Div_wu(dst, lhs, rhs); -+} -+ -+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, -+ Label* trap_div_by_zero) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); -+ TurboAssembler::Mod_w(dst, lhs, rhs); -+} -+ -+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, -+ Label* trap_div_by_zero) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); -+ TurboAssembler::Mod_wu(dst, lhs, rhs); -+} -+ -+#define I32_BINOP(name, instruction) \ -+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ -+ Register rhs) { \ -+ instruction(dst, lhs, rhs); \ -+ } -+ -+// clang-format off -+I32_BINOP(add, add_w) -+I32_BINOP(sub, sub_w) -+I32_BINOP(and, and_) -+I32_BINOP(or, or_) -+I32_BINOP(xor, xor_) -+// clang-format on -+ -+#undef I32_BINOP -+ -+#define I32_BINOP_I(name, instruction) \ -+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \ -+ int32_t imm) { \ -+ instruction(dst, lhs, imm); \ -+ } -+ -+// clang-format off -+I32_BINOP_I(add, Add_w) -+I32_BINOP_I(and, And) -+I32_BINOP_I(or, Or) -+I32_BINOP_I(xor, Xor) -+// clang-format on -+ -+#undef I32_BINOP_I -+ -+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { -+ TurboAssembler::Clz_w(dst, src); -+} -+ -+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { -+ TurboAssembler::Ctz_w(dst, src); -+} -+ -+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { -+ TurboAssembler::Popcnt_w(dst, src); -+ return true; -+} -+ -+#define I32_SHIFTOP(name, instruction) \ -+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ -+ Register amount) { \ -+ instruction(dst, src, amount); \ -+ } -+#define I32_SHIFTOP_I(name, instruction, instruction1) \ -+ I32_SHIFTOP(name, instruction) \ -+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \ -+ int amount) { \ -+ instruction1(dst, src, amount & 0x1f); \ -+ } -+ -+I32_SHIFTOP_I(shl, sll_w, slli_w) -+I32_SHIFTOP_I(sar, sra_w, srai_w) -+I32_SHIFTOP_I(shr, srl_w, srli_w) -+ -+#undef I32_SHIFTOP -+#undef I32_SHIFTOP_I -+ -+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) { -+ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); -+} -+ -+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, -+ Label* trap_div_by_zero, -+ Label* trap_div_unrepresentable) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); -+ -+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. -+ TurboAssembler::li(kScratchReg, 1); -+ TurboAssembler::li(kScratchReg2, 1); -+ TurboAssembler::LoadZeroOnCondition( -+ kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), eq); -+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); -+ add_d(kScratchReg, kScratchReg, kScratchReg2); -+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, -+ Operand(zero_reg)); -+ -+ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); -+ return true; -+} -+ -+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, -+ Label* trap_div_by_zero) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); -+ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); -+ return true; -+} -+ -+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, -+ Label* trap_div_by_zero) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); -+ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); -+ return true; -+} -+ -+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs, -+ Label* trap_div_by_zero) { -+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); -+ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); -+ return true; -+} -+ -+#define I64_BINOP(name, instruction) \ -+ void LiftoffAssembler::emit_i64_##name( \ -+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ -+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \ -+ } -+ -+// clang-format off -+I64_BINOP(add, Add_d) -+I64_BINOP(sub, Sub_d) -+I64_BINOP(and, and_) -+I64_BINOP(or, or_) -+I64_BINOP(xor, xor_) -+// clang-format on -+ -+#undef I64_BINOP -+ -+#define I64_BINOP_I(name, instruction) \ -+ void LiftoffAssembler::emit_i64_##name##i( \ -+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \ -+ instruction(dst.gp(), lhs.gp(), imm); \ -+ } -+ -+// clang-format off -+I64_BINOP_I(add, addi_d) -+I64_BINOP_I(and, And) -+I64_BINOP_I(or, Or) -+I64_BINOP_I(xor, Xor) -+// clang-format on -+ -+#undef I64_BINOP_I -+ -+#define I64_SHIFTOP(name, instruction) \ -+ void LiftoffAssembler::emit_i64_##name( \ -+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \ -+ instruction(dst.gp(), src.gp(), amount); \ -+ } -+#define I64_SHIFTOP_I(name, instruction, instructioni) \ -+ I64_SHIFTOP(name, instruction) \ -+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \ -+ LiftoffRegister src, int amount) { \ -+ DCHECK(is_uint6(amount)); \ -+ instructioni(dst.gp(), src.gp(), amount); \ -+ } -+ -+I64_SHIFTOP_I(shl, sll_d, slli_d) -+I64_SHIFTOP_I(sar, sra_d, srai_d) -+I64_SHIFTOP_I(shr, srl_d, srli_d) -+ -+#undef I64_SHIFTOP -+#undef I64_SHIFTOP_I -+ -+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) { -+ add_w(dst, src, zero_reg); -+} -+ -+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { -+ TurboAssembler::Neg_s(dst, src); -+} -+ -+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { -+ TurboAssembler::Neg_d(dst, src); -+} -+ -+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs) { -+ Label ool, done; -+ TurboAssembler::Float32Min(dst, lhs, rhs, &ool); -+ Branch(&done); -+ -+ bind(&ool); -+ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); -+ bind(&done); -+} -+ -+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs) { -+ Label ool, done; -+ TurboAssembler::Float32Max(dst, lhs, rhs, &ool); -+ Branch(&done); -+ -+ bind(&ool); -+ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); -+ bind(&done); -+} -+ -+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs) { -+ bailout(kComplexOperation, "f32_copysign"); -+} -+ -+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs) { -+ Label ool, done; -+ TurboAssembler::Float64Min(dst, lhs, rhs, &ool); -+ Branch(&done); -+ -+ bind(&ool); -+ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); -+ bind(&done); -+} -+ -+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs) { -+ Label ool, done; -+ TurboAssembler::Float64Max(dst, lhs, rhs, &ool); -+ Branch(&done); -+ -+ bind(&ool); -+ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); -+ bind(&done); -+} -+ -+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, -+ DoubleRegister rhs) { -+ bailout(kComplexOperation, "f64_copysign"); -+} -+ -+#define FP_BINOP(name, instruction) \ -+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \ -+ DoubleRegister rhs) { \ -+ instruction(dst, lhs, rhs); \ -+ } -+#define FP_UNOP(name, instruction) \ -+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ -+ instruction(dst, src); \ -+ } -+#define FP_UNOP_RETURN_TRUE(name, instruction) \ -+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ -+ instruction(dst, src); \ -+ return true; \ -+ } -+ -+FP_BINOP(f32_add, fadd_s) -+FP_BINOP(f32_sub, fsub_s) -+FP_BINOP(f32_mul, fmul_s) -+FP_BINOP(f32_div, fdiv_s) -+FP_UNOP(f32_abs, fabs_s) -+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s) -+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s) -+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s) -+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s) -+FP_UNOP(f32_sqrt, fsqrt_s) -+FP_BINOP(f64_add, fadd_d) -+FP_BINOP(f64_sub, fsub_d) -+FP_BINOP(f64_mul, fmul_d) -+FP_BINOP(f64_div, fdiv_d) -+FP_UNOP(f64_abs, fabs_d) -+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d) -+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d) -+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d) -+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d) -+FP_UNOP(f64_sqrt, fsqrt_d) -+ -+#undef FP_BINOP -+#undef FP_UNOP -+#undef FP_UNOP_RETURN_TRUE -+ -+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, -+ LiftoffRegister dst, -+ LiftoffRegister src, Label* trap) { -+ switch (opcode) { -+ case kExprI32ConvertI64: -+ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); -+ return true; -+ case kExprI32SConvertF32: { -+ LiftoffRegister rounded = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); -+ LiftoffRegister converted_back = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); -+ -+ // Real conversion. -+ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); -+ ftintrz_w_s(kScratchDoubleReg, rounded.fp()); -+ movfr2gr_s(dst.gp(), kScratchDoubleReg); -+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, -+ // because INT32_MIN allows easier out-of-bounds detection. -+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); -+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); -+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); -+ -+ // Checking if trap. -+ movgr2fr_w(kScratchDoubleReg, dst.gp()); -+ ffint_s_w(converted_back.fp(), kScratchDoubleReg); -+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); -+ TurboAssembler::BranchFalseF(trap); -+ return true; -+ } -+ case kExprI32UConvertF32: { -+ LiftoffRegister rounded = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); -+ LiftoffRegister converted_back = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); -+ -+ // Real conversion. -+ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); -+ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); -+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, -+ // because 0 allows easier out-of-bounds detection. -+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); -+ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); -+ -+ // Checking if trap. -+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); -+ fcvt_s_d(converted_back.fp(), converted_back.fp()); -+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); -+ TurboAssembler::BranchFalseF(trap); -+ return true; -+ } -+ case kExprI32SConvertF64: { -+ LiftoffRegister rounded = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); -+ LiftoffRegister converted_back = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); -+ -+ // Real conversion. -+ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); -+ ftintrz_w_d(kScratchDoubleReg, rounded.fp()); -+ movfr2gr_s(dst.gp(), kScratchDoubleReg); -+ -+ // Checking if trap. -+ ffint_d_w(converted_back.fp(), kScratchDoubleReg); -+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); -+ TurboAssembler::BranchFalseF(trap); -+ return true; -+ } -+ case kExprI32UConvertF64: { -+ LiftoffRegister rounded = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); -+ LiftoffRegister converted_back = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); -+ -+ // Real conversion. -+ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); -+ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); -+ -+ // Checking if trap. -+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); -+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); -+ TurboAssembler::BranchFalseF(trap); -+ return true; -+ } -+ case kExprI32ReinterpretF32: -+ TurboAssembler::FmoveLow(dst.gp(), src.fp()); -+ return true; -+ case kExprI64SConvertI32: -+ slli_w(dst.gp(), src.gp(), 0); -+ return true; -+ case kExprI64UConvertI32: -+ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); -+ return true; -+ case kExprI64SConvertF32: { -+ LiftoffRegister rounded = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); -+ LiftoffRegister converted_back = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); -+ -+ // Real conversion. -+ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); -+ ftintrz_l_s(kScratchDoubleReg, rounded.fp()); -+ movfr2gr_d(dst.gp(), kScratchDoubleReg); -+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, -+ // because INT64_MIN allows easier out-of-bounds detection. -+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); -+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); -+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); -+ -+ // Checking if trap. -+ movgr2fr_d(kScratchDoubleReg, dst.gp()); -+ ffint_s_l(converted_back.fp(), kScratchDoubleReg); -+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); -+ TurboAssembler::BranchFalseF(trap); -+ return true; -+ } -+ case kExprI64UConvertF32: { -+ // Real conversion. -+ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, -+ kScratchReg); -+ -+ // Checking if trap. -+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); -+ return true; -+ } -+ case kExprI64SConvertF64: { -+ LiftoffRegister rounded = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); -+ LiftoffRegister converted_back = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); -+ -+ // Real conversion. -+ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); -+ ftintrz_l_d(kScratchDoubleReg, rounded.fp()); -+ movfr2gr_d(dst.gp(), kScratchDoubleReg); -+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, -+ // because INT64_MIN allows easier out-of-bounds detection. -+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); -+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); -+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); -+ -+ // Checking if trap. -+ movgr2fr_d(kScratchDoubleReg, dst.gp()); -+ ffint_d_l(converted_back.fp(), kScratchDoubleReg); -+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); -+ TurboAssembler::BranchFalseF(trap); -+ return true; -+ } -+ case kExprI64UConvertF64: { -+ // Real conversion. -+ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, -+ kScratchReg); -+ -+ // Checking if trap. -+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); -+ return true; -+ } -+ case kExprI64ReinterpretF64: -+ movfr2gr_d(dst.gp(), src.fp()); -+ return true; -+ case kExprF32SConvertI32: { -+ LiftoffRegister scratch = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); -+ movgr2fr_w(scratch.fp(), src.gp()); -+ ffint_s_w(dst.fp(), scratch.fp()); -+ return true; -+ } -+ case kExprF32UConvertI32: -+ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp()); -+ return true; -+ case kExprF32ConvertF64: -+ fcvt_s_d(dst.fp(), src.fp()); -+ return true; -+ case kExprF32ReinterpretI32: -+ TurboAssembler::FmoveLow(dst.fp(), src.gp()); -+ return true; -+ case kExprF64SConvertI32: { -+ LiftoffRegister scratch = -+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); -+ movgr2fr_w(scratch.fp(), src.gp()); -+ ffint_d_w(dst.fp(), scratch.fp()); -+ return true; -+ } -+ case kExprF64UConvertI32: -+ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp()); -+ return true; -+ case kExprF64ConvertF32: -+ fcvt_d_s(dst.fp(), src.fp()); -+ return true; -+ case kExprF64ReinterpretI64: -+ movgr2fr_d(dst.fp(), src.gp()); -+ return true; -+ default: -+ return false; -+ } -+} -+ -+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { -+ bailout(kComplexOperation, "i32_signextend_i8"); -+} -+ -+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { -+ bailout(kComplexOperation, "i32_signextend_i16"); -+} -+ -+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, -+ LiftoffRegister src) { -+ bailout(kComplexOperation, "i64_signextend_i8"); -+} -+ -+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, -+ LiftoffRegister src) { -+ bailout(kComplexOperation, "i64_signextend_i16"); -+} -+ -+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, -+ LiftoffRegister src) { -+ bailout(kComplexOperation, "i64_signextend_i32"); -+} -+ -+void LiftoffAssembler::emit_jump(Label* label) { -+ TurboAssembler::Branch(label); -+} -+ -+void LiftoffAssembler::emit_jump(Register target) { -+ TurboAssembler::Jump(target); -+} -+ -+void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, -+ ValueType type, Register lhs, -+ Register rhs) { -+ if (rhs != no_reg) { -+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); -+ } else { -+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); -+ } -+} -+ -+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { -+ sltui(dst, src, 1); -+} -+ -+void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, -+ Register lhs, Register rhs) { -+ Register tmp = dst; -+ if (dst == lhs || dst == rhs) { -+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); -+ } -+ // Write 1 as result. -+ TurboAssembler::li(tmp, 1); -+ -+ // If negative condition is true, write 0 as result. -+ Condition neg_cond = NegateCondition(cond); -+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); -+ -+ // If tmp != dst, result will be moved. -+ TurboAssembler::Move(dst, tmp); -+} -+ -+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { -+ sltui(dst, src.gp(), 1); -+} -+ -+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) { -+ Register tmp = dst; -+ if (dst == lhs.gp() || dst == rhs.gp()) { -+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); -+ } -+ // Write 1 as result. -+ TurboAssembler::li(tmp, 1); -+ -+ // If negative condition is true, write 0 as result. -+ Condition neg_cond = NegateCondition(cond); -+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), -+ neg_cond); -+ -+ // If tmp != dst, result will be moved. -+ TurboAssembler::Move(dst, tmp); -+} -+ -+namespace liftoff { -+ -+inline FPUCondition ConditionToConditionCmpFPU(Condition condition, -+ bool* predicate) { -+ switch (condition) { -+ case kEqual: -+ *predicate = true; -+ return CEQ; -+ case kUnequal: -+ *predicate = false; -+ return CEQ; -+ case kUnsignedLessThan: -+ *predicate = true; -+ return CLT; -+ case kUnsignedGreaterEqual: -+ *predicate = false; -+ return CLT; -+ case kUnsignedLessEqual: -+ *predicate = true; -+ return CLE; -+ case kUnsignedGreaterThan: -+ *predicate = false; -+ return CLE; -+ default: -+ *predicate = true; -+ break; -+ } -+ UNREACHABLE(); -+} -+ -+} // namespace liftoff -+ -+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, -+ DoubleRegister lhs, -+ DoubleRegister rhs) { -+ Label not_nan, cont; -+ TurboAssembler::CompareIsNanF32(lhs, rhs); -+ TurboAssembler::BranchFalseF(¬_nan); -+ // If one of the operands is NaN, return 1 for f32.ne, else 0. -+ if (cond == ne) { -+ TurboAssembler::li(dst, 1); -+ } else { -+ TurboAssembler::Move(dst, zero_reg); -+ } -+ TurboAssembler::Branch(&cont); -+ -+ bind(¬_nan); -+ -+ TurboAssembler::li(dst, 1); -+ bool predicate; -+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); -+ TurboAssembler::CompareF32(lhs, rhs, fcond); -+ if (predicate) { -+ TurboAssembler::LoadZeroIfNotFPUCondition(dst); -+ } else { -+ TurboAssembler::LoadZeroIfFPUCondition(dst); -+ } -+ -+ bind(&cont); -+} -+ -+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, -+ DoubleRegister lhs, -+ DoubleRegister rhs) { -+ Label not_nan, cont; -+ TurboAssembler::CompareIsNanF64(lhs, rhs); -+ TurboAssembler::BranchFalseF(¬_nan); -+ // If one of the operands is NaN, return 1 for f64.ne, else 0. -+ if (cond == ne) { -+ TurboAssembler::li(dst, 1); -+ } else { -+ TurboAssembler::Move(dst, zero_reg); -+ } -+ TurboAssembler::Branch(&cont); -+ -+ bind(¬_nan); -+ -+ TurboAssembler::li(dst, 1); -+ bool predicate; -+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); -+ TurboAssembler::CompareF64(lhs, rhs, fcond); -+ if (predicate) { -+ TurboAssembler::LoadZeroIfNotFPUCondition(dst); -+ } else { -+ TurboAssembler::LoadZeroIfFPUCondition(dst); -+ } -+ -+ bind(&cont); -+} -+ -+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { -+} -+ -+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst, -+ LiftoffRegister src1, -+ LiftoffRegister src2, -+ LiftoffRegister mask) {} -+ -+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, -+ int32_t rhs) {} -+ -+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ LiftoffRegister rhs) {} -+ -+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst, -+ LiftoffRegister src) {} -+ -+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, -+ LiftoffRegister lhs, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, -+ LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst, -+ LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst, -+ LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst, -+ LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst, -+ LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst, -+ LiftoffRegister src1, -+ LiftoffRegister src2, -+ uint8_t imm_lane_idx) {} -+ -+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { -+ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); -+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); -+} -+ -+void LiftoffAssembler::CallTrapCallbackForTesting() { -+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); -+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); -+} -+ -+void LiftoffAssembler::AssertUnreachable(AbortReason reason) { -+ if (emit_debug_code()) Abort(reason); -+} -+ -+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { -+ LiftoffRegList gp_regs = regs & kGpCacheRegList; -+ unsigned num_gp_regs = gp_regs.GetNumRegsSet(); -+ if (num_gp_regs) { -+ unsigned offset = num_gp_regs * kSystemPointerSize; -+ addi_d(sp, sp, -offset); -+ while (!gp_regs.is_empty()) { -+ LiftoffRegister reg = gp_regs.GetFirstRegSet(); -+ offset -= kSystemPointerSize; -+ St_d(reg.gp(), MemOperand(sp, offset)); -+ gp_regs.clear(reg); -+ } -+ DCHECK_EQ(offset, 0); -+ } -+ LiftoffRegList fp_regs = regs & kFpCacheRegList; -+ unsigned num_fp_regs = fp_regs.GetNumRegsSet(); -+ if (num_fp_regs) { -+ unsigned slot_size = /*IsEnabled(MIPS_SIMD) ? 16 :*/ 8; -+ addi_d(sp, sp, -(num_fp_regs * slot_size)); -+ unsigned offset = 0; -+ while (!fp_regs.is_empty()) { -+ LiftoffRegister reg = fp_regs.GetFirstRegSet(); -+ if (0 /*IsEnabled(MIPS_SIMD)*/) { -+ // TurboAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset)); -+ } else { -+ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); -+ } -+ fp_regs.clear(reg); -+ offset += slot_size; -+ } -+ DCHECK_EQ(offset, num_fp_regs * slot_size); -+ } -+} -+ -+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { -+ LiftoffRegList fp_regs = regs & kFpCacheRegList; -+ unsigned fp_offset = 0; -+ while (!fp_regs.is_empty()) { -+ LiftoffRegister reg = fp_regs.GetFirstRegSet(); -+ if (0 /*IsEnabled(MIPS_SIMD)*/) { -+ // TurboAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset)); -+ } else { -+ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); -+ } -+ fp_regs.clear(reg); -+ fp_offset += (/*IsEnabled(MIPS_SIMD) ? 16 :*/ 8); -+ } -+ if (fp_offset) addi_d(sp, sp, fp_offset); -+ LiftoffRegList gp_regs = regs & kGpCacheRegList; -+ unsigned gp_offset = 0; -+ while (!gp_regs.is_empty()) { -+ LiftoffRegister reg = gp_regs.GetLastRegSet(); -+ Ld_d(reg.gp(), MemOperand(sp, gp_offset)); -+ gp_regs.clear(reg); -+ gp_offset += kSystemPointerSize; -+ } -+ addi_d(sp, sp, gp_offset); -+} -+ -+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { -+ DCHECK_LT(num_stack_slots, -+ (1 << 16) / kSystemPointerSize); // 16 bit immediate -+ TurboAssembler::DropAndRet(static_cast(num_stack_slots)); -+} -+ -+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, -+ const LiftoffRegister* args, -+ const LiftoffRegister* rets, -+ ValueType out_argument_type, int stack_bytes, -+ ExternalReference ext_ref) { -+ addi_d(sp, sp, -stack_bytes); -+ -+ int arg_bytes = 0; -+ for (ValueType param_type : sig->parameters()) { -+ liftoff::Store(this, sp, arg_bytes, *args++, param_type); -+ arg_bytes += param_type.element_size_bytes(); -+ } -+ DCHECK_LE(arg_bytes, stack_bytes); -+ -+ // Pass a pointer to the buffer with the arguments to the C function. -+ // On mips, the first argument is passed in {a0}. -+ constexpr Register kFirstArgReg = a0; -+ mov(kFirstArgReg, sp); -+ -+ // Now call the C function. -+ constexpr int kNumCCallArgs = 1; -+ PrepareCallCFunction(kNumCCallArgs, kScratchReg); -+ CallCFunction(ext_ref, kNumCCallArgs); -+ -+ // Move return value to the right register. -+ const LiftoffRegister* next_result_reg = rets; -+ if (sig->return_count() > 0) { -+ DCHECK_EQ(1, sig->return_count()); -+ constexpr Register kReturnReg = a0; -+ if (kReturnReg != next_result_reg->gp()) { -+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0)); -+ } -+ ++next_result_reg; -+ } -+ -+ // Load potential output value from the buffer on the stack. -+ if (out_argument_type != kWasmStmt) { -+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type); -+ } -+ -+ addi_d(sp, sp, stack_bytes); -+} -+ -+void LiftoffAssembler::CallNativeWasmCode(Address addr) { -+ Call(addr, RelocInfo::WASM_CALL); -+} -+ -+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, -+ compiler::CallDescriptor* call_descriptor, -+ Register target) { -+ if (target == no_reg) { -+ pop(kScratchReg); -+ Call(kScratchReg); -+ } else { -+ Call(target); -+ } -+} -+ -+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { -+ // A direct call to a wasm runtime stub defined in this module. -+ // Just encode the stub index. This will be patched at relocation. -+ Call(static_cast
(sid), RelocInfo::WASM_STUB_CALL); -+} -+ -+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { -+ addi_d(sp, sp, -size); -+ TurboAssembler::Move(addr, sp); -+} -+ -+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { -+ addi_d(sp, sp, size); -+} -+ -+void LiftoffStackSlots::Construct() { -+ for (auto& slot : slots_) { -+ const LiftoffAssembler::VarState& src = slot.src_; -+ switch (src.loc()) { -+ case LiftoffAssembler::VarState::kStack: -+ if (src.type() != kWasmS128) { -+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_)); -+ asm_->push(kScratchReg); -+ } else { -+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8)); -+ asm_->push(kScratchReg); -+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_)); -+ asm_->push(kScratchReg); -+ } -+ break; -+ case LiftoffAssembler::VarState::kRegister: -+ liftoff::push(asm_, src.reg(), src.type()); -+ break; -+ case LiftoffAssembler::VarState::kIntConst: { -+ asm_->li(kScratchReg, Operand(src.i32_const())); -+ asm_->push(kScratchReg); -+ break; -+ } -+ } -+ } -+} -+ -+} // namespace wasm -+} // namespace internal -+} // namespace v8 -+ -+#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_ -diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc -index 90cdad46..5077edd4 100644 ---- a/deps/v8/src/wasm/jump-table-assembler.cc -+++ b/deps/v8/src/wasm/jump-table-assembler.cc -@@ -268,6 +268,37 @@ void JumpTableAssembler::NopBytes(int bytes) { - } - } - -+#elif V8_TARGET_ARCH_LOONG64 -+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, -+ Address lazy_compile_target) { -+ DCHECK(is_int32(func_index)); -+ int start = pc_offset(); -+ li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr -+ // Jump produces max. 3 instructions for 32-bit platform -+ // and max. 4 instructions for 64-bit platform. -+ Jump(lazy_compile_target, RelocInfo::NONE); -+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset(); -+ DCHECK_EQ(nop_bytes % kInstrSize, 0); -+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop(); -+} -+bool JumpTableAssembler::EmitJumpSlot(Address target) { -+ PatchAndJump(target); -+ return true; -+} -+void JumpTableAssembler::EmitFarJumpSlot(Address target) { -+ JumpToInstructionStream(target); -+} -+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { -+ UNREACHABLE(); -+} -+void JumpTableAssembler::NopBytes(int bytes) { -+ DCHECK_LE(0, bytes); -+ DCHECK_EQ(0, bytes % kInstrSize); -+ for (; bytes > 0; bytes -= kInstrSize) { -+ nop(); -+ } -+} -+ - #elif V8_TARGET_ARCH_PPC64 - void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, - Address lazy_compile_target) { -diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h -index 253f0bc0..2137afcd 100644 ---- a/deps/v8/src/wasm/jump-table-assembler.h -+++ b/deps/v8/src/wasm/jump-table-assembler.h -@@ -215,6 +215,12 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { - static constexpr int kJumpTableSlotSize = 8 * kInstrSize; - static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize; - static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; -+#elif V8_TARGET_ARCH_LOONG64 -+ // TODO -+ static constexpr int kJumpTableLineSize = 8 * kInstrSize; -+ static constexpr int kJumpTableSlotSize = 8 * kInstrSize; -+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize; -+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; - #else - #error Unknown architecture. - #endif -diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h -index 7e56ea6e..4e8d0c65 100644 ---- a/deps/v8/src/wasm/wasm-linkage.h -+++ b/deps/v8/src/wasm/wasm-linkage.h -@@ -75,6 +75,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1}; - constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14}; - constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4}; - -+#elif V8_TARGET_ARCH_LOONG64 -+// =========================================================================== -+// == LOONG64 TODO ============================================================= -+// =========================================================================== -+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7}; -+constexpr Register kGpReturnRegisters[] = {a0, a1}; -+constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14}; -+constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4}; -+ - #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 - // =========================================================================== - // == ppc & ppc64 ============================================================ -diff --git a/deps/v8/test/cctest/test-assembler-loong64.cc b/deps/v8/test/cctest/test-assembler-loong64.cc -new file mode 100644 -index 00000000..366bcb7c ---- /dev/null -+++ b/deps/v8/test/cctest/test-assembler-loong64.cc -@@ -0,0 +1,5127 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+#include // NOLINT(readability/streams) -+ -+#include "src/base/utils/random-number-generator.h" -+#include "src/codegen/assembler-inl.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/diagnostics/disassembler.h" -+#include "src/execution/simulator.h" -+#include "src/heap/factory.h" -+#include "src/init/v8.h" -+#include "test/cctest/cctest.h" -+ -+namespace v8 { -+namespace internal { -+ -+// Define these function prototypes to match JSEntryFunction in execution.cc. -+// TODO(mips64): Refine these signatures per test case. -+using F1 = void*(int x, int p1, int p2, int p3, int p4); -+using F2 = void*(int x, int y, int p2, int p3, int p4); -+using F3 = void*(void* p, int p1, int p2, int p3, int p4); -+using F4 = void*(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4); -+using F5 = void*(void* p0, void* p1, int p2, int p3, int p4); -+ -+#define __ assm. -+// v0->a2, v1->a3 -+TEST(LA0) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ // Addition. -+ __ addi_d(a2, a0, 0xC); -+ -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0xAB0, 0, 0, 0, 0)); -+ CHECK_EQ(0xABCL, res); -+} -+ -+TEST(LA1) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ Label L, C; -+ -+ __ ori(a1, a0, 0); -+ __ ori(a2, zero_reg, 0); -+ __ b(&C); -+ -+ __ bind(&L); -+ __ add_d(a2, a2, a1); -+ __ addi_d(a1, a1, -1); -+ -+ __ bind(&C); -+ __ ori(a3, a1, 0); -+ -+ __ Branch(&L, ne, a3, Operand((int64_t)0)); -+ -+ __ or_(a0, a2, zero_reg); -+ __ or_(a1, a3, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(50, 0, 0, 0, 0)); -+ CHECK_EQ(1275L, res); -+} -+ -+TEST(LA2) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label exit, error; -+ -+ __ ori(a4, zero_reg, 0); // 00000000 -+ __ lu12i_w(a4, 0x12345); // 12345000 -+ __ ori(a4, a4, 0); // 12345000 -+ __ ori(a2, a4, 0xF0F); // 12345F0F -+ __ Branch(&error, ne, a2, Operand(0x12345F0F)); -+ -+ __ ori(a4, zero_reg, 0); -+ __ lu32i_d(a4, 0x12345); // 1 2345 0000 0000 -+ __ ori(a4, a4, 0xFFF); // 1 2345 0000 0FFF -+ __ addi_d(a2, a4, 1); -+ __ Branch(&error, ne, a2, Operand(0x1234500001000)); -+ -+ __ ori(a4, zero_reg, 0); -+ __ lu52i_d(a4, zero_reg, 0x123); // 1230 0000 0000 0000 -+ __ ori(a4, a4, 0xFFF); // 123F 0000 0000 0FFF -+ __ addi_d(a2, a4, 1); // 1230 0000 0000 1000 -+ __ Branch(&error, ne, a2, Operand(0x1230000000001000)); -+ -+ __ li(a2, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ __ li(a2, 0x666); -+ -+ __ bind(&exit); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+TEST(LA3) { -+ // Test 32bit calculate instructions. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label exit, error; -+ -+ __ li(a4, 0x00000004); -+ __ li(a5, 0x00001234); -+ __ li(a6, 0x12345678); -+ __ li(a7, 0x7FFFFFFF); -+ __ li(t0, static_cast(0xFFFFFFFC)); -+ __ li(t1, static_cast(0xFFFFEDCC)); -+ __ li(t2, static_cast(0xEDCBA988)); -+ __ li(t3, static_cast(0x80000000)); -+ -+ __ ori(a2, zero_reg, 0); // 0x00000000 -+ __ add_w(a2, a4, a5); // 0x00001238 -+ __ sub_w(a2, a2, a4); // 0x00001234 -+ __ Branch(&error, ne, a2, Operand(0x00001234)); -+ __ ori(a3, zero_reg, 0); // 0x00000000 -+ __ add_w(a3, a7, a4); // 32bit addu result is sign-extended into 64bit reg. -+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000003)); -+ -+ __ sub_w(a3, t3, a4); // 0x7FFFFFFC -+ __ Branch(&error, ne, a3, Operand(0x7FFFFFFC)); -+ -+ __ ori(a2, zero_reg, 0); // 0x00000000 -+ __ ori(a3, zero_reg, 0); // 0x00000000 -+ __ addi_w(a2, zero_reg, 0x421); // 0x00007421 -+ __ addi_w(a2, a2, -0x1); // 0x00007420 -+ __ addi_w(a2, a2, -0x20); // 0x00007400 -+ __ Branch(&error, ne, a2, Operand(0x0000400)); -+ __ addi_w(a3, a7, 0x1); // 0x80000000 - result is sign-extended. -+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000000)); -+ -+ __ ori(a2, zero_reg, 0); // 0x00000000 -+ __ ori(a3, zero_reg, 0); // 0x00000000 -+ __ alsl_w(a2, a6, a4, 3); // 0xFFFFFFFF91A2B3C4 -+ __ alsl_w(a2, a2, a4, 2); // 0x468ACF14 -+ __ Branch(&error, ne, a2, Operand(0x468acf14)); -+ __ ori(a0, zero_reg, 31); -+ __ alsl_wu(a3, a6, a4, 3); // 0x91A2B3C4 -+ __ alsl_wu(a3, a3, a7, 1); // 0xFFFFFFFFA3456787 -+ __ Branch(&error, ne, a3, Operand(0xA3456787)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ ori(a3, zero_reg, 0); -+ __ mul_w(a2, a5, a7); -+ __ div_w(a2, a2, a4); -+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFB73)); -+ __ mul_w(a3, a4, t1); -+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFB730)); -+ __ div_w(a3, t3, a4); -+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFE0000000)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mulh_w(a2, a4, t1); -+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFFFF)); -+ __ mulh_w(a2, a4, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mulh_wu(a2, a4, t1); -+ __ Branch(&error, ne, a2, Operand(0x3)); -+ __ mulh_wu(a2, a4, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mulw_d_w(a2, a4, t1); -+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFB730)); -+ __ mulw_d_w(a2, a4, a6); -+ __ Branch(&error, ne, a2, Operand(0x48D159E0)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mulw_d_wu(a2, a4, t1); -+ __ Branch(&error, ne, a2, Operand(0x3FFFFB730)); //========0xFFFFB730 -+ __ ori(a2, zero_reg, 81); -+ __ mulw_d_wu(a2, a4, a6); -+ __ Branch(&error, ne, a2, Operand(0x48D159E0)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ div_wu(a2, a7, a5); -+ __ Branch(&error, ne, a2, Operand(0x70821)); -+ __ div_wu(a2, t0, a5); -+ __ Branch(&error, ne, a2, Operand(0xE1042)); -+ __ div_wu(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mod_w(a2, a6, a5); -+ __ Branch(&error, ne, a2, Operand(0xDA8)); -+ __ ori(a2, zero_reg, 0); -+ __ mod_w(a2, t2, a5); -+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258)); -+ __ ori(a2, zero_reg, 0); -+ __ mod_w(a2, t2, t1); -+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mod_wu(a2, a6, a5); -+ __ Branch(&error, ne, a2, Operand(0xDA8)); -+ __ mod_wu(a2, t2, a5); -+ __ Branch(&error, ne, a2, Operand(0xF0)); -+ __ mod_wu(a2, t2, t1); -+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFEDCBA988)); -+ -+ __ li(a2, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ __ li(a2, 0x666); -+ -+ __ bind(&exit); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+TEST(LA4) { -+ // Test 64bit calculate instructions. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label exit, error; -+ -+ __ li(a4, 0x17312); -+ __ li(a5, 0x1012131415161718); -+ __ li(a6, 0x51F4B764A26E7412); -+ __ li(a7, 0x7FFFFFFFFFFFFFFF); -+ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); -+ __ li(t1, static_cast(0xDF6B8F35A10E205C)); -+ __ li(t2, static_cast(0x81F25A87C4236841)); -+ __ li(t3, static_cast(0x8000000000000000)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ add_d(a2, a4, a5); -+ __ sub_d(a2, a2, a4); -+ __ Branch(&error, ne, a2, Operand(0x1012131415161718)); -+ __ ori(a3, zero_reg, 0); -+ __ add_d(a3, a6, a7); //溢出 -+ __ Branch(&error, ne, a3, Operand(0xd1f4b764a26e7411)); -+ __ sub_d(a3, t3, a4); //溢出 -+ __ Branch(&error, ne, a3, Operand(0x7ffffffffffe8cee)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ addi_d(a2, a5, 0x412); //正值 -+ __ Branch(&error, ne, a2, Operand(0x1012131415161b2a)); -+ __ addi_d(a2, a7, 0x547); //负值 -+ __ Branch(&error, ne, a2, Operand(0x8000000000000546)); -+ -+ __ ori(t4, zero_reg, 0); -+ __ addu16i_d(a2, t4, 0x1234); -+ __ Branch(&error, ne, a2, Operand(0x12340000)); -+ __ addu16i_d(a2, a2, 0x9876); -+ __ Branch(&error, ne, a2, Operand(0xffffffffaaaa0000)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ alsl_d(a2, t2, t0, 3); -+ __ Branch(&error, ne, a2, Operand(0xf92d43e211b374f)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mul_d(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0)); -+ __ mul_d(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0x57ad69f40f870584)); -+ __ mul_d(a2, a4, t0); -+ __ Branch(&error, ne, a2, Operand(0xfffffffff07523fe)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mulh_d(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); -+ __ mulh_d(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0x15d)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mulh_du(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); -+ __ mulh_du(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0xdf6b8f35a10e1700)); -+ __ mulh_du(a2, a4, t0); -+ __ Branch(&error, ne, a2, Operand(0x17311)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ div_d(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ div_d(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ div_d(a2, t1, a4); -+ __ Branch(&error, ne, a2, Operand(0xffffe985f631e6d9)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ div_du(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ div_du(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ div_du(a2, t1, a4); -+ __ Branch(&error, ne, a2, Operand(0x9a22ffd3973d)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mod_d(a2, a6, a4); -+ __ Branch(&error, ne, a2, Operand(0x13558)); -+ __ mod_d(a2, t2, t0); -+ __ Branch(&error, ne, a2, Operand(0xfffffffffffffb0a)); -+ __ mod_d(a2, t1, a4); -+ __ Branch(&error, ne, a2, Operand(0xffffffffffff6a1a)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ mod_du(a2, a6, a4); -+ __ Branch(&error, ne, a2, Operand(0x13558)); -+ __ mod_du(a2, t2, t0); -+ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841)); -+ __ mod_du(a2, t1, a4); -+ __ Branch(&error, ne, a2, Operand(0x1712)); -+ -+ // Everything was correctly executed. Load the expected result. -+ __ li(a2, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ __ li(a2, 0x666); -+ // Got an error. Return a wrong result. -+ -+ __ bind(&exit); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+TEST(LA5) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label exit, error; -+ -+ __ li(a4, 0x17312); -+ __ li(a5, 0x1012131415161718); -+ __ li(a6, 0x51F4B764A26E7412); -+ __ li(a7, 0x7FFFFFFFFFFFFFFF); -+ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); -+ __ li(t1, static_cast(0xDF6B8F35A10E205C)); -+ __ li(t2, static_cast(0x81F25A87C4236841)); -+ __ li(t3, static_cast(0x8000000000000000)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ slt(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ slt(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ slt(a2, t1, t1); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ ori(a2, zero_reg, 0); -+ __ sltu(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ sltu(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ sltu(a2, t1, t1); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ ori(a2, zero_reg, 0); -+ __ slti(a2, a5, 0x123); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ slti(a2, t0, 0x123); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ sltui(a2, a5, 0x123); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ sltui(a2, t0, 0x123); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ ori(a2, zero_reg, 0); -+ __ and_(a2, a4, a5); -+ __ Branch(&error, ne, a2, Operand(0x1310)); -+ __ and_(a2, a6, a7); -+ __ Branch(&error, ne, a2, Operand(0x51F4B764A26E7412)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ or_(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f)); -+ __ or_(a2, t2, t3); -+ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ nor(a2, a4, a5); -+ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5)); -+ __ nor(a2, a6, a7); -+ __ Branch(&error, ne, a2, Operand(0x8000000000000000)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ xor_(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b)); -+ __ xor_(a2, t2, t3); -+ __ Branch(&error, ne, a2, Operand(0x1f25a87c4236841)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ andn(a2, a4, a5); -+ __ Branch(&error, ne, a2, Operand(0x16002)); -+ __ andn(a2, a6, a7); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ ori(a2, zero_reg, 0); -+ __ orn(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); -+ __ orn(a2, t2, t3); -+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffff)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ andi(a2, a4, 0x123); -+ __ Branch(&error, ne, a2, Operand(0x102)); -+ __ andi(a2, a6, 0xDCB); -+ __ Branch(&error, ne, a2, Operand(0x402)); -+ -+ __ ori(a2, zero_reg, 0); -+ __ xori(a2, t0, 0x123); -+ __ Branch(&error, ne, a2, Operand(0xfffffffffffff464)); -+ __ xori(a2, t2, 0xDCB); -+ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a)); -+ -+ // Everything was correctly executed. Load the expected result. -+ __ li(a2, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ // Got an error. Return a wrong result. -+ __ li(a2, 0x666); -+ -+ __ bind(&exit); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+TEST(LA6) { -+ // Test loads and stores instruction. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct T { -+ int64_t si1; -+ int64_t si2; -+ int64_t si3; -+ int64_t result_ld_b_si1; -+ int64_t result_ld_b_si2; -+ int64_t result_ld_h_si1; -+ int64_t result_ld_h_si2; -+ int64_t result_ld_w_si1; -+ int64_t result_ld_w_si2; -+ int64_t result_ld_d_si1; -+ int64_t result_ld_d_si3; -+ int64_t result_ld_bu_si2; -+ int64_t result_ld_hu_si2; -+ int64_t result_ld_wu_si2; -+ int64_t result_st_b; -+ int64_t result_st_h; -+ int64_t result_st_w; -+ }; -+ T t; -+ -+ // Ld_b -+ __ Ld_b(a4, MemOperand(a0, offsetof(T, si1))); -+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si1))); -+ -+ __ Ld_b(a4, MemOperand(a0, offsetof(T, si2))); -+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si2))); -+ -+ // Ld_h -+ __ Ld_h(a5, MemOperand(a0, offsetof(T, si1))); -+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si1))); -+ -+ __ Ld_h(a5, MemOperand(a0, offsetof(T, si2))); -+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si2))); -+ -+ // Ld_w -+ __ Ld_w(a6, MemOperand(a0, offsetof(T, si1))); -+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si1))); -+ -+ __ Ld_w(a6, MemOperand(a0, offsetof(T, si2))); -+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si2))); -+ -+ // Ld_d -+ __ Ld_d(a7, MemOperand(a0, offsetof(T, si1))); -+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si1))); -+ -+ __ Ld_d(a7, MemOperand(a0, offsetof(T, si3))); -+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si3))); -+ -+ // Ld_bu -+ __ Ld_bu(t0, MemOperand(a0, offsetof(T, si2))); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_ld_bu_si2))); -+ -+ // Ld_hu -+ __ Ld_hu(t1, MemOperand(a0, offsetof(T, si2))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_ld_hu_si2))); -+ -+ // Ld_wu -+ __ Ld_wu(t2, MemOperand(a0, offsetof(T, si2))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_ld_wu_si2))); -+ -+ // St -+ __ li(t4, 0x11111111); -+ -+ // St_b -+ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3))); -+ __ St_d(t5, MemOperand(a0, offsetof(T, result_st_b))); -+ __ St_b(t4, MemOperand(a0, offsetof(T, result_st_b))); -+ -+ // St_h -+ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3))); -+ __ St_d(t6, MemOperand(a0, offsetof(T, result_st_h))); -+ __ St_h(t4, MemOperand(a0, offsetof(T, result_st_h))); -+ -+ // St_w -+ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3))); -+ __ St_d(t7, MemOperand(a0, offsetof(T, result_st_w))); -+ __ St_w(t4, MemOperand(a0, offsetof(T, result_st_w))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.si1 = 0x11223344; -+ t.si2 = 0x99AABBCC; -+ t.si3 = 0x1122334455667788; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x44), t.result_ld_b_si1); -+ CHECK_EQ(static_cast(0xFFFFFFFFFFFFFFCC), t.result_ld_b_si2); -+ -+ CHECK_EQ(static_cast(0x3344), t.result_ld_h_si1); -+ CHECK_EQ(static_cast(0xFFFFFFFFFFFFBBCC), t.result_ld_h_si2); -+ -+ CHECK_EQ(static_cast(0x11223344), t.result_ld_w_si1); -+ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), t.result_ld_w_si2); -+ -+ CHECK_EQ(static_cast(0x11223344), t.result_ld_d_si1); -+ CHECK_EQ(static_cast(0x1122334455667788), t.result_ld_d_si3); -+ -+ CHECK_EQ(static_cast(0xCC), t.result_ld_bu_si2); -+ CHECK_EQ(static_cast(0xBBCC), t.result_ld_hu_si2); -+ CHECK_EQ(static_cast(0x99AABBCC), t.result_ld_wu_si2); -+ -+ CHECK_EQ(static_cast(0x1122334455667711), t.result_st_b); -+ CHECK_EQ(static_cast(0x1122334455661111), t.result_st_h); -+ CHECK_EQ(static_cast(0x1122334411111111), t.result_st_w); -+} -+ -+TEST(LA7) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct T { -+ int64_t si1; -+ int64_t si2; -+ int64_t si3; -+ int64_t result_ldx_b_si1; -+ int64_t result_ldx_b_si2; -+ int64_t result_ldx_h_si1; -+ int64_t result_ldx_h_si2; -+ int64_t result_ldx_w_si1; -+ int64_t result_ldx_w_si2; -+ int64_t result_ldx_d_si1; -+ int64_t result_ldx_d_si3; -+ int64_t result_ldx_bu_si2; -+ int64_t result_ldx_hu_si2; -+ int64_t result_ldx_wu_si2; -+ int64_t result_stx_b; -+ int64_t result_stx_h; -+ int64_t result_stx_w; -+ }; -+ T t; -+ -+ // ldx_b -+ __ li(a2, static_cast(offsetof(T, si1))); -+ __ Ld_b(a4, MemOperand(a0, a2)); -+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si1))); -+ -+ __ li(a2, static_cast(offsetof(T, si2))); -+ __ Ld_b(a4, MemOperand(a0, a2)); -+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si2))); -+ -+ // ldx_h -+ __ li(a2, static_cast(offsetof(T, si1))); -+ __ Ld_h(a5, MemOperand(a0, a2)); -+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si1))); -+ -+ __ li(a2, static_cast(offsetof(T, si2))); -+ __ Ld_h(a5, MemOperand(a0, a2)); -+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si2))); -+ -+ // ldx_w -+ __ li(a2, static_cast(offsetof(T, si1))); -+ __ Ld_w(a6, MemOperand(a0, a2)); -+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si1))); -+ -+ __ li(a2, static_cast(offsetof(T, si2))); -+ __ Ld_w(a6, MemOperand(a0, a2)); -+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si2))); -+ -+ // Ld_d -+ __ li(a2, static_cast(offsetof(T, si1))); -+ __ Ld_d(a7, MemOperand(a0, a2)); -+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si1))); -+ -+ __ li(a2, static_cast(offsetof(T, si3))); -+ __ Ld_d(a7, MemOperand(a0, a2)); -+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si3))); -+ -+ // Ld_bu -+ __ li(a2, static_cast(offsetof(T, si2))); -+ __ Ld_bu(t0, MemOperand(a0, a2)); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_ldx_bu_si2))); -+ -+ // Ld_hu -+ __ li(a2, static_cast(offsetof(T, si2))); -+ __ Ld_hu(t1, MemOperand(a0, a2)); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_ldx_hu_si2))); -+ -+ // Ld_wu -+ __ li(a2, static_cast(offsetof(T, si2))); -+ __ Ld_wu(t2, MemOperand(a0, a2)); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_ldx_wu_si2))); -+ -+ // St -+ __ li(t4, 0x11111111); -+ -+ // St_b -+ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3))); -+ __ St_d(t5, MemOperand(a0, offsetof(T, result_stx_b))); -+ __ li(a2, static_cast(offsetof(T, result_stx_b))); -+ __ St_b(t4, MemOperand(a0, a2)); -+ -+ // St_h -+ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3))); -+ __ St_d(t6, MemOperand(a0, offsetof(T, result_stx_h))); -+ __ li(a2, static_cast(offsetof(T, result_stx_h))); -+ __ St_h(t4, MemOperand(a0, a2)); -+ -+ // St_w -+ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3))); -+ __ li(a2, static_cast(offsetof(T, result_stx_w))); -+ __ St_d(t7, MemOperand(a0, a2)); -+ __ li(a3, static_cast(offsetof(T, result_stx_w))); -+ __ St_w(t4, MemOperand(a0, a3)); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.si1 = 0x11223344; -+ t.si2 = 0x99AABBCC; -+ t.si3 = 0x1122334455667788; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x44), t.result_ldx_b_si1); -+ CHECK_EQ(static_cast(0xFFFFFFFFFFFFFFCC), t.result_ldx_b_si2); -+ -+ CHECK_EQ(static_cast(0x3344), t.result_ldx_h_si1); -+ CHECK_EQ(static_cast(0xFFFFFFFFFFFFBBCC), t.result_ldx_h_si2); -+ -+ CHECK_EQ(static_cast(0x11223344), t.result_ldx_w_si1); -+ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), t.result_ldx_w_si2); -+ -+ CHECK_EQ(static_cast(0x11223344), t.result_ldx_d_si1); -+ CHECK_EQ(static_cast(0x1122334455667788), t.result_ldx_d_si3); -+ -+ CHECK_EQ(static_cast(0xCC), t.result_ldx_bu_si2); -+ CHECK_EQ(static_cast(0xBBCC), t.result_ldx_hu_si2); -+ CHECK_EQ(static_cast(0x99AABBCC), t.result_ldx_wu_si2); -+ -+ CHECK_EQ(static_cast(0x1122334455667711), t.result_stx_b); -+ CHECK_EQ(static_cast(0x1122334455661111), t.result_stx_h); -+ CHECK_EQ(static_cast(0x1122334411111111), t.result_stx_w); -+} -+ -+TEST(LDPTR_STPTR) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ int64_t test[10]; -+ -+ __ ldptr_w(a4, a0, 0); -+ __ stptr_d(a4, a0, 24); // test[3] -+ -+ __ ldptr_w(a5, a0, 8); // test[1] -+ __ stptr_d(a5, a0, 32); // test[4] -+ -+ __ ldptr_d(a6, a0, 16); // test[2] -+ __ stptr_d(a6, a0, 40); // test[5] -+ -+ __ li(t0, 0x11111111); -+ -+ __ stptr_d(a6, a0, 48); // test[6] -+ __ stptr_w(t0, a0, 48); // test[6] -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ test[0] = 0x11223344; -+ test[1] = 0x99AABBCC; -+ test[2] = 0x1122334455667788; -+ f.Call(&test, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x11223344), test[3]); -+ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), test[4]); -+ CHECK_EQ(static_cast(0x1122334455667788), test[5]); -+ CHECK_EQ(static_cast(0x1122334411111111), test[6]); -+} -+ -+TEST(LA8) { -+ // Test 32bit shift instructions. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ struct T { -+ int32_t input; -+ int32_t result_sll_w_0; -+ int32_t result_sll_w_8; -+ int32_t result_sll_w_10; -+ int32_t result_sll_w_31; -+ int32_t result_srl_w_0; -+ int32_t result_srl_w_8; -+ int32_t result_srl_w_10; -+ int32_t result_srl_w_31; -+ int32_t result_sra_w_0; -+ int32_t result_sra_w_8; -+ int32_t result_sra_w_10; -+ int32_t result_sra_w_31; -+ int32_t result_rotr_w_0; -+ int32_t result_rotr_w_8; -+ int32_t result_slli_w_0; -+ int32_t result_slli_w_8; -+ int32_t result_slli_w_10; -+ int32_t result_slli_w_31; -+ int32_t result_srli_w_0; -+ int32_t result_srli_w_8; -+ int32_t result_srli_w_10; -+ int32_t result_srli_w_31; -+ int32_t result_srai_w_0; -+ int32_t result_srai_w_8; -+ int32_t result_srai_w_10; -+ int32_t result_srai_w_31; -+ int32_t result_rotri_w_0; -+ int32_t result_rotri_w_8; -+ int32_t result_rotri_w_10; -+ int32_t result_rotri_w_31; -+ }; -+ T t; -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ __ Ld_w(a4, MemOperand(a0, offsetof(T, input))); -+ -+ // sll_w -+ __ li(a5, 0); -+ __ sll_w(t0, a4, a5); -+ __ li(a5, 0x8); -+ __ sll_w(t1, a4, a5); -+ __ li(a5, 0xA); -+ __ sll_w(t2, a4, a5); -+ __ li(a5, 0x1F); -+ __ sll_w(t3, a4, a5); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_sll_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_sll_w_8))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_sll_w_10))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_sll_w_31))); -+ -+ // srl_w -+ __ li(a5, 0x0); -+ __ srl_w(t0, a4, a5); -+ __ li(a5, 0x8); -+ __ srl_w(t1, a4, a5); -+ __ li(a5, 0xA); -+ __ srl_w(t2, a4, a5); -+ __ li(a5, 0x1F); -+ __ srl_w(t3, a4, a5); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_srl_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_srl_w_8))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_srl_w_10))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_srl_w_31))); -+ -+ // sra_w -+ __ li(a5, 0x0); -+ __ sra_w(t0, a4, a5); -+ __ li(a5, 0x8); -+ __ sra_w(t1, a4, a5); -+ -+ __ li(a6, static_cast(0x80000000)); -+ __ add_w(a6, a6, a4); -+ __ li(a5, 0xA); -+ __ sra_w(t2, a6, a5); -+ __ li(a5, 0x1F); -+ __ sra_w(t3, a6, a5); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_sra_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_sra_w_8))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_sra_w_10))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_sra_w_31))); -+ -+ // rotr -+ __ li(a5, 0x0); -+ __ rotr_w(t0, a4, a5); -+ __ li(a6, 0x8); -+ __ rotr_w(t1, a4, a6); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_w_8))); -+ -+ // slli_w -+ __ slli_w(t0, a4, 0); -+ __ slli_w(t1, a4, 0x8); -+ __ slli_w(t2, a4, 0xA); -+ __ slli_w(t3, a4, 0x1F); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_slli_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_slli_w_8))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_slli_w_10))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_slli_w_31))); -+ -+ // srli_w -+ __ srli_w(t0, a4, 0); -+ __ srli_w(t1, a4, 0x8); -+ __ srli_w(t2, a4, 0xA); -+ __ srli_w(t3, a4, 0x1F); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_srli_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_srli_w_8))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_srli_w_10))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_srli_w_31))); -+ -+ // srai_w -+ __ srai_w(t0, a4, 0); -+ __ srai_w(t1, a4, 0x8); -+ -+ __ li(a6, static_cast(0x80000000)); -+ __ add_w(a6, a6, a4); -+ __ srai_w(t2, a6, 0xA); -+ __ srai_w(t3, a6, 0x1F); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_srai_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_srai_w_8))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_srai_w_10))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_srai_w_31))); -+ -+ // rotri_w -+ __ rotri_w(t0, a4, 0); -+ __ rotri_w(t1, a4, 0x8); -+ __ rotri_w(t2, a4, 0xA); -+ __ rotri_w(t3, a4, 0x1F); -+ -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_w_0))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_w_8))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_w_10))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_w_31))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.input = 0x12345678; -+ f.Call(&t, 0x0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_sll_w_0); -+ CHECK_EQ(static_cast(0x34567800), t.result_sll_w_8); -+ CHECK_EQ(static_cast(0xD159E000), t.result_sll_w_10); -+ CHECK_EQ(static_cast(0x0), t.result_sll_w_31); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_srl_w_0); -+ CHECK_EQ(static_cast(0x123456), t.result_srl_w_8); -+ CHECK_EQ(static_cast(0x48D15), t.result_srl_w_10); -+ CHECK_EQ(static_cast(0x0), t.result_srl_w_31); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_sra_w_0); -+ CHECK_EQ(static_cast(0x123456), t.result_sra_w_8); -+ CHECK_EQ(static_cast(0xFFE48D15), t.result_sra_w_10); -+ CHECK_EQ(static_cast(0xFFFFFFFF), t.result_sra_w_31); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_rotr_w_0); -+ CHECK_EQ(static_cast(0x78123456), t.result_rotr_w_8); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_slli_w_0); -+ CHECK_EQ(static_cast(0x34567800), t.result_slli_w_8); -+ CHECK_EQ(static_cast(0xD159E000), t.result_slli_w_10); -+ CHECK_EQ(static_cast(0x0), t.result_slli_w_31); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_srli_w_0); -+ CHECK_EQ(static_cast(0x123456), t.result_srli_w_8); -+ CHECK_EQ(static_cast(0x48D15), t.result_srli_w_10); -+ CHECK_EQ(static_cast(0x0), t.result_srli_w_31); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_srai_w_0); -+ CHECK_EQ(static_cast(0x123456), t.result_srai_w_8); -+ CHECK_EQ(static_cast(0xFFE48D15), t.result_srai_w_10); -+ CHECK_EQ(static_cast(0xFFFFFFFF), t.result_srai_w_31); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_rotri_w_0); -+ CHECK_EQ(static_cast(0x78123456), t.result_rotri_w_8); -+ CHECK_EQ(static_cast(0x9E048D15), t.result_rotri_w_10); -+ CHECK_EQ(static_cast(0x2468ACF0), t.result_rotri_w_31); -+} -+ -+TEST(LA9) { -+ // Test 64bit shift instructions. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ struct T { -+ int64_t input; -+ int64_t result_sll_d_0; -+ int64_t result_sll_d_13; -+ int64_t result_sll_d_30; -+ int64_t result_sll_d_63; -+ int64_t result_srl_d_0; -+ int64_t result_srl_d_13; -+ int64_t result_srl_d_30; -+ int64_t result_srl_d_63; -+ int64_t result_sra_d_0; -+ int64_t result_sra_d_13; -+ int64_t result_sra_d_30; -+ int64_t result_sra_d_63; -+ int64_t result_rotr_d_0; -+ int64_t result_rotr_d_13; -+ int64_t result_slli_d_0; -+ int64_t result_slli_d_13; -+ int64_t result_slli_d_30; -+ int64_t result_slli_d_63; -+ int64_t result_srli_d_0; -+ int64_t result_srli_d_13; -+ int64_t result_srli_d_30; -+ int64_t result_srli_d_63; -+ int64_t result_srai_d_0; -+ int64_t result_srai_d_13; -+ int64_t result_srai_d_30; -+ int64_t result_srai_d_63; -+ int64_t result_rotri_d_0; -+ int64_t result_rotri_d_13; -+ int64_t result_rotri_d_30; -+ int64_t result_rotri_d_63; -+ }; -+ -+ T t; -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, input))); -+ -+ // sll_d -+ __ li(a5, 0); -+ __ sll_d(t0, a4, a5); -+ __ li(a5, 0xD); -+ __ sll_d(t1, a4, a5); -+ __ li(a5, 0x1E); -+ __ sll_d(t2, a4, a5); -+ __ li(a5, 0x3F); -+ __ sll_d(t3, a4, a5); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_sll_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_sll_d_13))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_sll_d_30))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_sll_d_63))); -+ -+ // srl_d -+ __ li(a5, 0x0); -+ __ srl_d(t0, a4, a5); -+ __ li(a5, 0xD); -+ __ srl_d(t1, a4, a5); -+ __ li(a5, 0x1E); -+ __ srl_d(t2, a4, a5); -+ __ li(a5, 0x3F); -+ __ srl_d(t3, a4, a5); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_srl_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_srl_d_13))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_srl_d_30))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_srl_d_63))); -+ -+ // sra_d -+ __ li(a5, 0x0); -+ __ sra_d(t0, a4, a5); -+ __ li(a5, 0xD); -+ __ sra_d(t1, a4, a5); -+ -+ __ li(a6, static_cast(0x8000000000000000)); -+ __ add_d(a6, a6, a4); -+ __ li(a5, 0x1E); -+ __ sra_d(t2, a6, a5); -+ __ li(a5, 0x3F); -+ __ sra_d(t3, a6, a5); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_sra_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_sra_d_13))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_sra_d_30))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_sra_d_63))); -+ -+ // rotr -+ __ li(a5, 0x0); -+ __ rotr_d(t0, a4, a5); -+ __ li(a6, 0xD); -+ __ rotr_d(t1, a4, a6); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_d_13))); -+ -+ // slli_d -+ __ slli_d(t0, a4, 0); -+ __ slli_d(t1, a4, 0xD); -+ __ slli_d(t2, a4, 0x1E); -+ __ slli_d(t3, a4, 0x3F); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_slli_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_slli_d_13))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_slli_d_30))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_slli_d_63))); -+ -+ // srli_d -+ __ srli_d(t0, a4, 0); -+ __ srli_d(t1, a4, 0xD); -+ __ srli_d(t2, a4, 0x1E); -+ __ srli_d(t3, a4, 0x3F); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_srli_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_srli_d_13))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_srli_d_30))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_srli_d_63))); -+ -+ // srai_d -+ __ srai_d(t0, a4, 0); -+ __ srai_d(t1, a4, 0xD); -+ -+ __ li(a6, static_cast(0x8000000000000000)); -+ __ add_d(a6, a6, a4); -+ __ srai_d(t2, a6, 0x1E); -+ __ srai_d(t3, a6, 0x3F); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_srai_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_srai_d_13))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_srai_d_30))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_srai_d_63))); -+ -+ // rotri_d -+ __ rotri_d(t0, a4, 0); -+ __ rotri_d(t1, a4, 0xD); -+ __ rotri_d(t2, a4, 0x1E); -+ __ rotri_d(t3, a4, 0x3F); -+ -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_d_0))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_d_13))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_d_30))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_d_63))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.input = 0x51F4B764A26E7412; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_sll_d_0); -+ CHECK_EQ(static_cast(0x96ec944dce824000), t.result_sll_d_13); -+ CHECK_EQ(static_cast(0x289b9d0480000000), t.result_sll_d_30); -+ CHECK_EQ(static_cast(0x0), t.result_sll_d_63); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srl_d_0); -+ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srl_d_13); -+ CHECK_EQ(static_cast(0x147d2dd92), t.result_srl_d_30); -+ CHECK_EQ(static_cast(0x0), t.result_srl_d_63); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_sra_d_0); -+ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_sra_d_13); -+ CHECK_EQ(static_cast(0xffffffff47d2dd92), t.result_sra_d_30); -+ CHECK_EQ(static_cast(0xffffffffffffffff), t.result_sra_d_63); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_rotr_d_0); -+ CHECK_EQ(static_cast(0xa0928fa5bb251373), t.result_rotr_d_13); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_slli_d_0); -+ CHECK_EQ(static_cast(0x96ec944dce824000), t.result_slli_d_13); -+ CHECK_EQ(static_cast(0x289b9d0480000000), t.result_slli_d_30); -+ CHECK_EQ(static_cast(0x0), t.result_slli_d_63); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srli_d_0); -+ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srli_d_13); -+ CHECK_EQ(static_cast(0x147d2dd92), t.result_srli_d_30); -+ CHECK_EQ(static_cast(0x0), t.result_srli_d_63); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srai_d_0); -+ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srai_d_13); -+ CHECK_EQ(static_cast(0xffffffff47d2dd92), t.result_srai_d_30); -+ CHECK_EQ(static_cast(0xffffffffffffffff), t.result_srai_d_63); -+ -+ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_rotri_d_0); -+ CHECK_EQ(static_cast(0xa0928fa5bb251373), t.result_rotri_d_13); -+ CHECK_EQ(static_cast(0x89b9d04947d2dd92), t.result_rotri_d_30); -+ CHECK_EQ(static_cast(0xa3e96ec944dce824), t.result_rotri_d_63); -+} -+ -+TEST(LA10) { -+ // Test 32bit bit operation instructions. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct T { -+ int64_t si1; -+ int64_t si2; -+ int32_t result_ext_w_b_si1; -+ int32_t result_ext_w_b_si2; -+ int32_t result_ext_w_h_si1; -+ int32_t result_ext_w_h_si2; -+ int32_t result_clo_w_si1; -+ int32_t result_clo_w_si2; -+ int32_t result_clz_w_si1; -+ int32_t result_clz_w_si2; -+ int32_t result_cto_w_si1; -+ int32_t result_cto_w_si2; -+ int32_t result_ctz_w_si1; -+ int32_t result_ctz_w_si2; -+ int32_t result_bytepick_w_si1; -+ int32_t result_bytepick_w_si2; -+ int32_t result_revb_2h_si1; -+ int32_t result_revb_2h_si2; -+ int32_t result_bitrev_4b_si1; -+ int32_t result_bitrev_4b_si2; -+ int32_t result_bitrev_w_si1; -+ int32_t result_bitrev_w_si2; -+ int32_t result_bstrins_w_si1; -+ int32_t result_bstrins_w_si2; -+ int32_t result_bstrpick_w_si1; -+ int32_t result_bstrpick_w_si2; -+ }; -+ T t; -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1))); -+ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2))); -+ -+ // ext_w_b -+ __ ext_w_b(t0, a4); -+ __ ext_w_b(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_b_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_b_si2))); -+ -+ // ext_w_h -+ __ ext_w_h(t0, a4); -+ __ ext_w_h(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_h_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_h_si2))); -+ -+ /* //clo_w -+ __ clo_w(t0, a4); -+ __ clo_w(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_w_si2)));*/ -+ -+ // clz_w -+ __ clz_w(t0, a4); -+ __ clz_w(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_clz_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_clz_w_si2))); -+ -+ /* //cto_w -+ __ cto_w(t0, a4); -+ __ cto_w(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_w_si2)));*/ -+ -+ // ctz_w -+ __ ctz_w(t0, a4); -+ __ ctz_w(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_ctz_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_ctz_w_si2))); -+ -+ // bytepick_w -+ __ bytepick_w(t0, a4, a5, 0); -+ __ bytepick_w(t1, a5, a4, 2); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bytepick_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bytepick_w_si2))); -+ -+ // revb_2h -+ __ revb_2h(t0, a4); -+ __ revb_2h(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_revb_2h_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_revb_2h_si2))); -+ -+ // bitrev -+ __ bitrev_4b(t0, a4); -+ __ bitrev_4b(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_4b_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_4b_si2))); -+ -+ // bitrev_w -+ __ bitrev_w(t0, a4); -+ __ bitrev_w(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_w_si2))); -+ -+ // bstrins -+ __ or_(t0, zero_reg, zero_reg); -+ __ or_(t1, zero_reg, zero_reg); -+ __ bstrins_w(t0, a4, 0xD, 0x4); -+ __ bstrins_w(t1, a5, 0x16, 0x5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrins_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrins_w_si2))); -+ -+ // bstrpick -+ __ or_(t0, zero_reg, zero_reg); -+ __ or_(t1, zero_reg, zero_reg); -+ __ bstrpick_w(t0, a4, 0xD, 0x4); -+ __ bstrpick_w(t1, a5, 0x16, 0x5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrpick_w_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrpick_w_si2))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.si1 = 0x51F4B764A26E7412; -+ t.si2 = 0x81F25A87C423B891; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x12), t.result_ext_w_b_si1); -+ CHECK_EQ(static_cast(0xffffff91), t.result_ext_w_b_si2); -+ CHECK_EQ(static_cast(0x7412), t.result_ext_w_h_si1); -+ CHECK_EQ(static_cast(0xffffb891), t.result_ext_w_h_si2); -+ // CHECK_EQ(static_cast(0x1), t.result_clo_w_si1); -+ // CHECK_EQ(static_cast(0x2), t.result_clo_w_si2); -+ CHECK_EQ(static_cast(0x0), t.result_clz_w_si1); -+ CHECK_EQ(static_cast(0x0), t.result_clz_w_si2); -+ // CHECK_EQ(static_cast(0x0), t.result_cto_w_si1); -+ // CHECK_EQ(static_cast(0x1), t.result_cto_w_si2); -+ CHECK_EQ(static_cast(0x1), t.result_ctz_w_si1); -+ CHECK_EQ(static_cast(0x0), t.result_ctz_w_si2); -+ CHECK_EQ(static_cast(0xc423b891), t.result_bytepick_w_si1); -+ CHECK_EQ(static_cast(0x7412c423), -+ t.result_bytepick_w_si2); // 0xffffc423 -+ CHECK_EQ(static_cast(0x6ea21274), t.result_revb_2h_si1); -+ CHECK_EQ(static_cast(0x23c491b8), t.result_revb_2h_si2); -+ CHECK_EQ(static_cast(0x45762e48), t.result_bitrev_4b_si1); -+ CHECK_EQ(static_cast(0x23c41d89), t.result_bitrev_4b_si2); -+ CHECK_EQ(static_cast(0x482e7645), t.result_bitrev_w_si1); -+ CHECK_EQ(static_cast(0x891dc423), t.result_bitrev_w_si2); -+ CHECK_EQ(static_cast(0x120), t.result_bstrins_w_si1); -+ CHECK_EQ(static_cast(0x771220), t.result_bstrins_w_si2); -+ CHECK_EQ(static_cast(0x341), t.result_bstrpick_w_si1); -+ CHECK_EQ(static_cast(0x11dc4), t.result_bstrpick_w_si2); -+} -+ -+TEST(LA11) { -+ // Test 64bit bit operation instructions. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct T { -+ int64_t si1; -+ int64_t si2; -+ int64_t result_clo_d_si1; -+ int64_t result_clo_d_si2; -+ int64_t result_clz_d_si1; -+ int64_t result_clz_d_si2; -+ int64_t result_cto_d_si1; -+ int64_t result_cto_d_si2; -+ int64_t result_ctz_d_si1; -+ int64_t result_ctz_d_si2; -+ int64_t result_bytepick_d_si1; -+ int64_t result_bytepick_d_si2; -+ int64_t result_revb_4h_si1; -+ int64_t result_revb_4h_si2; -+ int64_t result_revb_2w_si1; -+ int64_t result_revb_2w_si2; -+ int64_t result_revb_d_si1; -+ int64_t result_revb_d_si2; -+ int64_t result_revh_2w_si1; -+ int64_t result_revh_2w_si2; -+ int64_t result_revh_d_si1; -+ int64_t result_revh_d_si2; -+ int64_t result_bitrev_8b_si1; -+ int64_t result_bitrev_8b_si2; -+ int64_t result_bitrev_d_si1; -+ int64_t result_bitrev_d_si2; -+ int64_t result_bstrins_d_si1; -+ int64_t result_bstrins_d_si2; -+ int64_t result_bstrpick_d_si1; -+ int64_t result_bstrpick_d_si2; -+ int64_t result_maskeqz_si1; -+ int64_t result_maskeqz_si2; -+ int64_t result_masknez_si1; -+ int64_t result_masknez_si2; -+ }; -+ -+ T t; -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1))); -+ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2))); -+ -+ /* //clo_d -+ __ clo_d(t0, a4); -+ __ clo_d(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_d_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_d_si2)));*/ -+ -+ // clz_d -+ __ or_(t0, zero_reg, zero_reg); -+ __ clz_d(t0, a4); -+ __ clz_d(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_clz_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_clz_d_si2))); -+ -+ /* //cto_d -+ __ cto_d(t0, a4); -+ __ cto_d(t1, a5); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_d_si1))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_d_si2)));*/ -+ -+ // ctz_d -+ __ ctz_d(t0, a4); -+ __ ctz_d(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_ctz_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_ctz_d_si2))); -+ -+ // bytepick_d -+ __ bytepick_d(t0, a4, a5, 0); -+ __ bytepick_d(t1, a5, a4, 5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bytepick_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bytepick_d_si2))); -+ -+ // revb_4h -+ __ revb_4h(t0, a4); -+ __ revb_4h(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_4h_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_4h_si2))); -+ -+ // revb_2w -+ __ revb_2w(t0, a4); -+ __ revb_2w(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_2w_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_2w_si2))); -+ -+ // revb_d -+ __ revb_d(t0, a4); -+ __ revb_d(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_d_si2))); -+ -+ // revh_2w -+ __ revh_2w(t0, a4); -+ __ revh_2w(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_2w_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_2w_si2))); -+ -+ // revh_d -+ __ revh_d(t0, a4); -+ __ revh_d(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_d_si2))); -+ -+ // bitrev_8b -+ __ bitrev_8b(t0, a4); -+ __ bitrev_8b(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_8b_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_8b_si2))); -+ -+ // bitrev_d -+ __ bitrev_d(t0, a4); -+ __ bitrev_d(t1, a5); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_d_si2))); -+ -+ // bstrins_d -+ __ or_(t0, zero_reg, zero_reg); -+ __ or_(t1, zero_reg, zero_reg); -+ __ bstrins_d(t0, a4, 5, 0); -+ __ bstrins_d(t1, a5, 39, 12); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrins_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrins_d_si2))); -+ -+ // bstrpick_d -+ __ or_(t0, zero_reg, zero_reg); -+ __ or_(t1, zero_reg, zero_reg); -+ __ bstrpick_d(t0, a4, 5, 0); -+ __ bstrpick_d(t1, a5, 63, 48); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrpick_d_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrpick_d_si2))); -+ -+ // maskeqz -+ __ maskeqz(t0, a4, a4); -+ __ maskeqz(t1, a5, zero_reg); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_maskeqz_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_maskeqz_si2))); -+ -+ // masknez -+ __ masknez(t0, a4, a4); -+ __ masknez(t1, a5, zero_reg); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_masknez_si1))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_masknez_si2))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.si1 = 0x10C021098B710CDE; -+ t.si2 = 0xFB8017FF781A15C3; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ // CHECK_EQ(static_cast(0x0), t.result_clo_d_si1); -+ // CHECK_EQ(static_cast(0x5), t.result_clo_d_si2); -+ CHECK_EQ(static_cast(0x3), t.result_clz_d_si1); -+ CHECK_EQ(static_cast(0x0), t.result_clz_d_si2); -+ // CHECK_EQ(static_cast(0x0), t.result_cto_d_si1); -+ // CHECK_EQ(static_cast(0x2), t.result_cto_d_si2); -+ CHECK_EQ(static_cast(0x1), t.result_ctz_d_si1); -+ CHECK_EQ(static_cast(0x0), t.result_ctz_d_si2); -+ CHECK_EQ(static_cast(0xfb8017ff781a15c3), t.result_bytepick_d_si1); -+ CHECK_EQ(static_cast(0x710cde0000000000), t.result_bytepick_d_si2); -+ CHECK_EQ(static_cast(0xc0100921718bde0c), t.result_revb_4h_si1); -+ CHECK_EQ(static_cast(0x80fbff171a78c315), t.result_revb_4h_si2); -+ CHECK_EQ(static_cast(0x921c010de0c718b), t.result_revb_2w_si1); -+ CHECK_EQ(static_cast(0xff1780fbc3151a78), t.result_revb_2w_si2); -+ CHECK_EQ(static_cast(0xde0c718b0921c010), t.result_revb_d_si1); -+ CHECK_EQ(static_cast(0xc3151a78ff1780fb), t.result_revb_d_si2); -+ CHECK_EQ(static_cast(0x210910c00cde8b71), t.result_revh_2w_si1); -+ CHECK_EQ(static_cast(0x17fffb8015c3781a), t.result_revh_2w_si2); -+ CHECK_EQ(static_cast(0xcde8b71210910c0), t.result_revh_d_si1); -+ CHECK_EQ(static_cast(0x15c3781a17fffb80), t.result_revh_d_si2); -+ CHECK_EQ(static_cast(0x8038490d18e307b), t.result_bitrev_8b_si1); -+ CHECK_EQ(static_cast(0xdf01e8ff1e58a8c3), t.result_bitrev_8b_si2); -+ CHECK_EQ(static_cast(0x7b308ed190840308), t.result_bitrev_d_si1); -+ CHECK_EQ(static_cast(0xc3a8581effe801df), t.result_bitrev_d_si2); -+ CHECK_EQ(static_cast(0x1e), t.result_bstrins_d_si1); -+ CHECK_EQ(static_cast(0x81a15c3000), t.result_bstrins_d_si2); -+ CHECK_EQ(static_cast(0x1e), t.result_bstrpick_d_si1); -+ CHECK_EQ(static_cast(0xfb80), t.result_bstrpick_d_si2); -+ CHECK_EQ(static_cast(0), t.result_maskeqz_si1); -+ CHECK_EQ(static_cast(0xFB8017FF781A15C3), t.result_maskeqz_si2); -+ CHECK_EQ(static_cast(0x10C021098B710CDE), t.result_masknez_si1); -+ CHECK_EQ(static_cast(0), t.result_masknez_si2); -+} -+ -+uint64_t run_beq(int64_t value1, int64_t value2, int16_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(&L); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ beq(a0, a1, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(&L); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BEQ) { -+ CcTest::InitializeVM(); -+ struct TestCaseBeq { -+ int64_t value1; -+ int64_t value2; -+ int16_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBeq tc[] = { -+ // value1, value2, offset, expected_res -+ { 0, 0, -6, 0x3 }, -+ { 1, 1, -3, 0x30 }, -+ { -2, -2, 3, 0x300 }, -+ { 3, -3, 6, 0 }, -+ { 4, 4, 6, 0x700 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeq); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_beq(tc[i].value1, tc[i].value2, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_bne(int64_t value1, int64_t value2, int16_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(&L); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ bne(a0, a1, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(&L); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BNE) { -+ CcTest::InitializeVM(); -+ struct TestCaseBne { -+ int64_t value1; -+ int64_t value2; -+ int16_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBne tc[] = { -+ // value1, value2, offset, expected_res -+ { 1, -1, -6, 0x3 }, -+ { 2, -2, -3, 0x30 }, -+ { 3, -3, 3, 0x300 }, -+ { 4, -4, 6, 0x700 }, -+ { 0, 0, 6, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBne); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bne(tc[i].value1, tc[i].value2, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_blt(int64_t value1, int64_t value2, int16_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(&L); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ blt(a0, a1, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(&L); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BLT) { -+ CcTest::InitializeVM(); -+ struct TestCaseBlt { -+ int64_t value1; -+ int64_t value2; -+ int16_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBlt tc[] = { -+ // value1, value2, offset, expected_res -+ { -1, 1, -6, 0x3 }, -+ { -2, 2, -3, 0x30 }, -+ { -3, 3, 3, 0x300 }, -+ { -4, 4, 6, 0x700 }, -+ { 5, -5, 6, 0 }, -+ { 0, 0, 6, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBlt); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_blt(tc[i].value1, tc[i].value2, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_bge(uint64_t value1, uint64_t value2, int16_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(&L); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ bge(a0, a1, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(&L); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BGE) { -+ CcTest::InitializeVM(); -+ struct TestCaseBge { -+ int64_t value1; -+ int64_t value2; -+ int16_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBge tc[] = { -+ // value1, value2, offset, expected_res -+ { 0, 0, -6, 0x3 }, -+ { 1, 1, -3, 0x30 }, -+ { 2, -2, 3, 0x300 }, -+ { 3, -3, 6, 0x700 }, -+ { -4, 4, 6, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBge); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bge(tc[i].value1, tc[i].value2, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_bltu(int64_t value1, int64_t value2, int16_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(&L); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ bltu(a0, a1, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(&L); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BLTU) { -+ CcTest::InitializeVM(); -+ struct TestCaseBltu { -+ int64_t value1; -+ int64_t value2; -+ int16_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBltu tc[] = { -+ // value1, value2, offset, expected_res -+ { 0, 1, -6, 0x3 }, -+ { 1, -1, -3, 0x30 }, -+ { 2, -2, 3, 0x300 }, -+ { 3, -3, 6, 0x700 }, -+ { 4, 4, 6, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBltu); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bltu(tc[i].value1, tc[i].value2, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_bgeu(int64_t value1, int64_t value2, int16_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(&L); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ bgeu(a0, a1, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(&L); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BGEU) { -+ CcTest::InitializeVM(); -+ struct TestCaseBgeu { -+ int64_t value1; -+ int64_t value2; -+ int16_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBgeu tc[] = { -+ // value1, value2, offset, expected_res -+ { 0, 0, -6, 0x3 }, -+ { -1, 1, -3, 0x30 }, -+ { -2, 2, 3, 0x300 }, -+ { -3, 3, 6, 0x700 }, -+ { 4, -4, 6, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBgeu); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bgeu(tc[i].value1, tc[i].value2, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_beqz(int64_t value, int32_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(&L); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ beqz(a0, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(&L); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BEQZ) { -+ CcTest::InitializeVM(); -+ struct TestCaseBeqz { -+ int64_t value; -+ int32_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBeqz tc[] = { -+ // value, offset, expected_res -+ { 0, -6, 0x3 }, -+ { 0, -3, 0x30 }, -+ { 0, 3, 0x300 }, -+ { 0, 6, 0x700 }, -+ { 1, 6, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeqz); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_beqz(tc[i].value, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_bnez_b(int64_t value, int32_t offset) { -+ // bnez, b. -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0l); -+ __ b(&main_block); -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ b(5); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ b(2); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ bnez(a0, offset); -+ __ bind(&L); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ b(-4); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ b(-7); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(value, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BNEZ_B) { -+ CcTest::InitializeVM(); -+ struct TestCaseBnez { -+ int64_t value; -+ int32_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBnez tc[] = { -+ // value, offset, expected_res -+ { 1, -6, 0x3 }, -+ { -2, -3, 0x30 }, -+ { 3, 3, 0x300 }, -+ { -4, 6, 0x700 }, -+ { 0, 6, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBnez); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bnez_b(tc[i].value, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_bl(int32_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block; -+ __ li(a2, 0l); -+ __ push(ra); // push is implemented by two instructions, addi_d and st_d -+ __ b(&main_block); -+ -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ bl(offset); -+ __ or_(a0, a2, zero_reg); -+ __ pop(ra); // pop is implemented by two instructions, ld_d and addi_d. -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BL) { -+ CcTest::InitializeVM(); -+ struct TestCaseBl { -+ int32_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBl tc[] = { -+ // offset, expected_res -+ { -6, 0x3 }, -+ { -3, 0x30 }, -+ { 5, 0x300 }, -+ { 8, 0x700 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBl); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bl(tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+TEST(PCADD) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label exit, error; -+ __ push(ra); -+ -+ // pcaddi -+ __ li(a4, 0x1FFFFC); -+ __ li(a5, 0); -+ __ li(a6, static_cast(0xFFE00000)); -+ -+ __ bl(1); -+ __ pcaddi(a3, 0x7FFFF); -+ __ add_d(a2, ra, a4); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ __ bl(1); -+ __ pcaddi(a3, 0); -+ __ add_d(a2, ra, a5); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ __ bl(1); -+ __ pcaddi(a3, 0x80000); -+ __ add_d(a2, ra, a6); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ // pcaddu12i -+ __ li(a4, 0x7FFFF000); -+ __ li(a5, 0); -+ __ li(a6, static_cast(0x80000000)); -+ -+ __ bl(1); -+ __ pcaddu12i(a2, 0x7FFFF); -+ __ add_d(a3, ra, a4); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ __ bl(1); -+ __ pcaddu12i(a2, 0); -+ __ add_d(a3, ra, a5); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ __ bl(1); -+ __ pcaddu12i(a2, 0x80000); -+ __ add_d(a3, ra, a6); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ // pcaddu18i -+ __ li(a4, 0x1FFFFC0000); -+ __ li(a5, 0); -+ __ li(a6, static_cast(0xFFFFFFE000000000)); -+ -+ __ bl(1); -+ __ pcaddu18i(a2, 0x7FFFF); -+ __ add_d(a3, ra, a4); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ __ bl(1); -+ __ pcaddu18i(a2, 0); -+ __ add_d(a3, ra, a5); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ __ bl(1); -+ __ pcaddu18i(a2, 0x80000); -+ __ add_d(a3, ra, a6); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ // pcalau12i -+ __ li(a4, 0x7FFFF000); -+ __ li(a5, 0); -+ __ li(a6, static_cast(0x80000000)); -+ __ li(a7, static_cast(0xFFFFFFFFFFFFF000)); -+ -+ __ bl(1); -+ __ pcalau12i(a3, 0x7FFFF); -+ __ add_d(a2, ra, a4); -+ __ and_(t0, a2, a7); -+ __ and_(t1, a3, a7); -+ __ Branch(&error, ne, t0, Operand(t1)); -+ -+ __ bl(1); -+ __ pcalau12i(a3, 0); -+ __ add_d(a2, ra, a5); -+ __ and_(t0, a2, a7); -+ __ and_(t1, a3, a7); -+ __ Branch(&error, ne, t0, Operand(t1)); -+ -+ __ bl(1); -+ __ pcalau12i(a2, 0x80000); -+ __ add_d(a3, ra, a6); -+ __ and_(t0, a2, a7); -+ __ and_(t1, a3, a7); -+ __ Branch(&error, ne, t0, Operand(t1)); -+ -+ __ li(a0, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ __ li(a0, 0x666); -+ -+ __ bind(&exit); -+ __ pop(ra); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+uint64_t run_jirl(int16_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block; -+ __ li(a2, 0l); -+ __ push(ra); -+ __ b(&main_block); -+ -+ // Block 1 -+ __ addi_d(a2, a2, 0x1); -+ __ addi_d(a2, a2, 0x2); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 2 -+ __ addi_d(a2, a2, 0x10); -+ __ addi_d(a2, a2, 0x20); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ pcaddi(a3, 1); -+ __ jirl(ra, a3, offset); -+ __ or_(a0, a2, zero_reg); -+ __ pop(ra); // pop is implemented by two instructions, ld_d and addi_d. -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ __ addi_d(a2, a2, 0x100); -+ __ addi_d(a2, a2, 0x200); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 5 -+ __ addi_d(a2, a2, 0x300); -+ __ addi_d(a2, a2, 0x400); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(JIRL) { -+ CcTest::InitializeVM(); -+ struct TestCaseJirl { -+ int16_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseJirl tc[] = { -+ // offset, expected_res -+ { -7, 0x3 }, -+ { -4, 0x30 }, -+ { 5, 0x300 }, -+ { 8, 0x700 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseJirl); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_jirl(tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+TEST(LA12) { -+ // Test floating point calculate instructions. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ struct T { -+ double a; -+ double b; -+ double c; -+ double d; -+ double e; -+ double f; -+ double result_fadd_d; -+ double result_fsub_d; -+ double result_fmul_d; -+ double result_fdiv_d; -+ double result_fmadd_d; -+ double result_fmsub_d; -+ double result_fnmadd_d; -+ double result_fnmsub_d; -+ double result_fsqrt_d; -+ double result_frecip_d; -+ double result_frsqrt_d; -+ double result_fscaleb_d; -+ double result_flogb_d; -+ double result_fcopysign_d; -+ double result_fclass_d; -+ }; -+ T t; -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ // Double precision floating point instructions. -+ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(T, b))); -+ -+ __ fneg_d(f10, f8); -+ __ fadd_d(f11, f9, f10); -+ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_fadd_d))); -+ __ fabs_d(f11, f11); -+ __ fsub_d(f12, f11, f9); -+ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_fsub_d))); -+ -+ __ Fld_d(f13, MemOperand(a0, offsetof(T, c))); -+ __ Fld_d(f14, MemOperand(a0, offsetof(T, d))); -+ __ Fld_d(f15, MemOperand(a0, offsetof(T, e))); -+ -+ __ fmin_d(f16, f13, f14); -+ __ fmul_d(f17, f15, f16); -+ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_fmul_d))); -+ __ fmax_d(f18, f13, f14); -+ __ fdiv_d(f19, f15, f18); -+ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fdiv_d))); -+ -+ __ fmina_d(f16, f13, f14); -+ __ fmadd_d(f18, f17, f15, f16); -+ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fmadd_d))); -+ __ fnmadd_d(f19, f17, f15, f16); -+ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fnmadd_d))); -+ __ fmaxa_d(f16, f13, f14); -+ __ fmsub_d(f20, f17, f15, f16); -+ __ Fst_d(f20, MemOperand(a0, offsetof(T, result_fmsub_d))); -+ __ fnmsub_d(f21, f17, f15, f16); -+ __ Fst_d(f21, MemOperand(a0, offsetof(T, result_fnmsub_d))); -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(T, f))); -+ __ fsqrt_d(f10, f8); -+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_fsqrt_d))); -+ //__ frecip_d(f11, f10); -+ //__ frsqrt_d(f12, f8); -+ //__ Fst_d(f11, MemOperand(a0, offsetof(T, result_frecip_d))); -+ //__ Fst_d(f12, MemOperand(a0, offsetof(T, result_frsqrt_d))); -+ -+ /*__ fscaleb_d(f16, f13, f15); -+ __ flogb_d(f17, f15); -+ __ fcopysign_d(f18, f8, f9); -+ __ fclass_d(f19, f9); -+ __ Fst_d(f16, MemOperand(a0, offsetof(T, result_fscaleb_d))); -+ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_flogb_d))); -+ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fcopysign_d))); -+ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fclass_d)));*/ -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ // Double test values. -+ t.a = 1.5e14; -+ t.b = -2.75e11; -+ t.c = 1.5; -+ t.d = -2.75; -+ t.e = 120.0; -+ t.f = 120.44; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(-1.502750e14), t.result_fadd_d); -+ CHECK_EQ(static_cast(1.505500e14), t.result_fsub_d); -+ CHECK_EQ(static_cast(-3.300000e02), t.result_fmul_d); -+ CHECK_EQ(static_cast(8.000000e01), t.result_fdiv_d); -+ CHECK_EQ(static_cast(-3.959850e04), t.result_fmadd_d); -+ CHECK_EQ(static_cast(-3.959725e04), t.result_fmsub_d); -+ CHECK_EQ(static_cast(3.959850e04), t.result_fnmadd_d); -+ CHECK_EQ(static_cast(3.959725e04), t.result_fnmsub_d); -+ CHECK_EQ(static_cast(10.97451593465515908537), t.result_fsqrt_d); -+ // CHECK_EQ(static_cast( 8.164965e-08), t.result_frecip_d); -+ // CHECK_EQ(static_cast( 8.164966e-08), t.result_frsqrt_d); -+ // CHECK_EQ(static_cast(), t.result_fscaleb_d); -+ // CHECK_EQ(static_cast( 6.906891), t.result_flogb_d); -+ // CHECK_EQ(static_cast( 2.75e11), t.result_fcopysign_d); -+ // CHECK_EQ(static_cast(), t.result_fclass_d); -+} -+ -+TEST(LA13) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ struct T { -+ float a; -+ float b; -+ float c; -+ float d; -+ float e; -+ float result_fadd_s; -+ float result_fsub_s; -+ float result_fmul_s; -+ float result_fdiv_s; -+ float result_fmadd_s; -+ float result_fmsub_s; -+ float result_fnmadd_s; -+ float result_fnmsub_s; -+ float result_fsqrt_s; -+ float result_frecip_s; -+ float result_frsqrt_s; -+ float result_fscaleb_s; -+ float result_flogb_s; -+ float result_fcopysign_s; -+ float result_fclass_s; -+ }; -+ T t; -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ // Float precision floating point instructions. -+ __ Fld_s(f8, MemOperand(a0, offsetof(T, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(T, b))); -+ -+ __ fneg_s(f10, f8); -+ __ fadd_s(f11, f9, f10); -+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_fadd_s))); -+ __ fabs_s(f11, f11); -+ __ fsub_s(f12, f11, f9); -+ __ Fst_s(f12, MemOperand(a0, offsetof(T, result_fsub_s))); -+ -+ __ Fld_s(f13, MemOperand(a0, offsetof(T, c))); -+ __ Fld_s(f14, MemOperand(a0, offsetof(T, d))); -+ __ Fld_s(f15, MemOperand(a0, offsetof(T, e))); -+ -+ __ fmin_s(f16, f13, f14); -+ __ fmul_s(f17, f15, f16); -+ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_fmul_s))); -+ __ fmax_s(f18, f13, f14); -+ __ fdiv_s(f19, f15, f18); -+ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fdiv_s))); -+ -+ __ fmina_s(f16, f13, f14); -+ __ fmadd_s(f18, f17, f15, f16); -+ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fmadd_s))); -+ __ fnmadd_s(f19, f17, f15, f16); -+ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fnmadd_s))); -+ __ fmaxa_s(f16, f13, f14); -+ __ fmsub_s(f20, f17, f15, f16); -+ __ Fst_s(f20, MemOperand(a0, offsetof(T, result_fmsub_s))); -+ __ fnmsub_s(f21, f17, f15, f16); -+ __ Fst_s(f21, MemOperand(a0, offsetof(T, result_fnmsub_s))); -+ -+ __ fsqrt_s(f10, f8); -+ //__ frecip_s(f11, f10); -+ //__ frsqrt_s(f12, f8); -+ __ Fst_s(f10, MemOperand(a0, offsetof(T, result_fsqrt_s))); -+ //__ Fst_s(f11, MemOperand(a0, offsetof(T, result_frecip_s))); -+ //__ Fst_s(f12, MemOperand(a0, offsetof(T, result_frsqrt_s))); -+ -+ /*__ fscaleb_s(f16, f13, f15); -+ __ flogb_s(f17, f15); -+ __ fcopysign_s(f18, f8, f9); -+ __ fclass_s(f19, f9); -+ __ Fst_s(f16, MemOperand(a0, offsetof(T, result_fscaleb_s))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_flogb_s))); -+ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fcopysign_s))); -+ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fclass_s)));*/ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ // Float test values. -+ t.a = 1.5e6; -+ t.b = -2.75e4; -+ t.c = 1.5; -+ t.d = -2.75; -+ t.e = 120.0; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(-1.527500e06), t.result_fadd_s); -+ CHECK_EQ(static_cast(1.555000e06), t.result_fsub_s); -+ CHECK_EQ(static_cast(-3.300000e02), t.result_fmul_s); -+ CHECK_EQ(static_cast(8.000000e01), t.result_fdiv_s); -+ CHECK_EQ(static_cast(-3.959850e04), t.result_fmadd_s); -+ CHECK_EQ(static_cast(-3.959725e04), t.result_fmsub_s); -+ CHECK_EQ(static_cast(3.959850e04), t.result_fnmadd_s); -+ CHECK_EQ(static_cast(3.959725e04), t.result_fnmsub_s); -+ CHECK_EQ(static_cast(1224.744873), t.result_fsqrt_s); -+ // CHECK_EQ(static_cast( 8.164966e-04), t.result_frecip_s); -+ // CHECK_EQ(static_cast( 8.164966e-04), t.result_frsqrt_s); -+ // CHECK_EQ(static_cast(), t.result_fscaleb_s); -+ // CHECK_EQ(static_cast( 6.906890), t.result_flogb_s); -+ // CHECK_EQ(static_cast( 2.75e4), t.result_fcopysign_s); -+ // CHECK_EQ(static_cast(), t.result_fclass_s); -+} -+ -+TEST(FCMP_COND) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ double dTrue; -+ double dFalse; -+ double dOp1; -+ double dOp2; -+ double dCaf; -+ double dCun; -+ double dCeq; -+ double dCueq; -+ double dClt; -+ double dCult; -+ double dCle; -+ double dCule; -+ double dCne; -+ double dCor; -+ double dCune; -+ double dSaf; -+ double dSun; -+ double dSeq; -+ double dSueq; -+ double dSlt; -+ double dSult; -+ double dSle; -+ double dSule; -+ double dSne; -+ double dSor; -+ double dSune; -+ float fTrue; -+ float fFalse; -+ float fOp1; -+ float fOp2; -+ float fCaf; -+ float fCun; -+ float fCeq; -+ float fCueq; -+ float fClt; -+ float fCult; -+ float fCle; -+ float fCule; -+ float fCne; -+ float fCor; -+ float fCune; -+ float fSaf; -+ float fSun; -+ float fSeq; -+ float fSueq; -+ float fSlt; -+ float fSult; -+ float fSle; -+ float fSule; -+ float fSne; -+ float fSor; -+ float fSune; -+ }; -+ -+ TestFloat test; -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, dOp1))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, dOp2))); -+ -+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, fOp1))); -+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, fOp2))); -+ -+ __ Fld_d(f12, MemOperand(a0, offsetof(TestFloat, dFalse))); -+ __ Fld_d(f13, MemOperand(a0, offsetof(TestFloat, dTrue))); -+ -+ __ Fld_s(f14, MemOperand(a0, offsetof(TestFloat, fFalse))); -+ __ Fld_s(f15, MemOperand(a0, offsetof(TestFloat, fTrue))); -+ -+ __ fcmp_cond_d(CAF, f8, f9, FCC0); -+ __ fcmp_cond_s(CAF, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCaf))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCaf))); -+ -+ __ fcmp_cond_d(CUN, f8, f9, FCC0); -+ __ fcmp_cond_s(CUN, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCun))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCun))); -+ -+ __ fcmp_cond_d(CEQ, f8, f9, FCC0); -+ __ fcmp_cond_s(CEQ, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCeq))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCeq))); -+ -+ __ fcmp_cond_d(CUEQ, f8, f9, FCC0); -+ __ fcmp_cond_s(CUEQ, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCueq))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCueq))); -+ -+ __ fcmp_cond_d(CLT, f8, f9, FCC0); -+ __ fcmp_cond_s(CLT, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dClt))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fClt))); -+ -+ __ fcmp_cond_d(CULT, f8, f9, FCC0); -+ __ fcmp_cond_s(CULT, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCult))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCult))); -+ -+ __ fcmp_cond_d(CLE, f8, f9, FCC0); -+ __ fcmp_cond_s(CLE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCle))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCle))); -+ -+ __ fcmp_cond_d(CULE, f8, f9, FCC0); -+ __ fcmp_cond_s(CULE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCule))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCule))); -+ -+ __ fcmp_cond_d(CNE, f8, f9, FCC0); -+ __ fcmp_cond_s(CNE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCne))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCne))); -+ -+ __ fcmp_cond_d(COR, f8, f9, FCC0); -+ __ fcmp_cond_s(COR, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCor))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCor))); -+ -+ __ fcmp_cond_d(CUNE, f8, f9, FCC0); -+ __ fcmp_cond_s(CUNE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCune))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCune))); -+ -+ /* __ fcmp_cond_d(SAF, f8, f9, FCC0); -+ __ fcmp_cond_s(SAF, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSaf))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSaf))); -+ -+ __ fcmp_cond_d(SUN, f8, f9, FCC0); -+ __ fcmp_cond_s(SUN, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSun))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSun))); -+ -+ __ fcmp_cond_d(SEQ, f8, f9, FCC0); -+ __ fcmp_cond_s(SEQ, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSeq))); -+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSeq))); -+ -+ __ fcmp_cond_d(SUEQ, f8, f9, FCC0); -+ __ fcmp_cond_s(SUEQ, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSueq))); -+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSueq))); -+ -+ __ fcmp_cond_d(SLT, f8, f9, FCC0); -+ __ fcmp_cond_s(SLT, f10, f11, FCC1); -+ __ fsel(f16, f12, f13, FCC0); -+ __ fsel(f17, f14, f15, FCC1); -+ __ Fld_d(f16, MemOperand(a0, offsetof(TestFloat, dSlt))); -+ __ Fst_d(f17, MemOperand(a0, offsetof(TestFloat, fSlt))); -+ -+ __ fcmp_cond_d(SULT, f8, f9, FCC0); -+ __ fcmp_cond_s(SULT, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSult))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSult))); -+ -+ __ fcmp_cond_d(SLE, f8, f9, FCC0); -+ __ fcmp_cond_s(SLE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSle))); -+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSle))); -+ -+ __ fcmp_cond_d(SULE, f8, f9, FCC0); -+ __ fcmp_cond_s(SULE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSule))); -+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSule))); -+ -+ __ fcmp_cond_d(SNE, f8, f9, FCC0); -+ __ fcmp_cond_s(SNE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSne))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSne))); -+ -+ __ fcmp_cond_d(SOR, f8, f9, FCC0); -+ __ fcmp_cond_s(SOR, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSor))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSor))); -+ -+ __ fcmp_cond_d(SUNE, f8, f9, FCC0); -+ __ fcmp_cond_s(SUNE, f10, f11, FCC1); -+ __ fsel(FCC0, f16, f12, f13); -+ __ fsel(FCC1, f17, f14, f15); -+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSune))); -+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSune)));*/ -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ test.dTrue = 1234.0; -+ test.dFalse = 0.0; -+ test.fTrue = 12.0; -+ test.fFalse = 0.0; -+ -+ test.dOp1 = 2.0; -+ test.dOp2 = 3.0; -+ test.fOp1 = 2.0; -+ test.fOp2 = 3.0; -+ f.Call(&test, 0, 0, 0, 0); -+ -+ CHECK_EQ(test.dCaf, test.dFalse); -+ CHECK_EQ(test.fCaf, test.fFalse); -+ CHECK_EQ(test.dCun, test.dFalse); -+ CHECK_EQ(test.fCun, test.fFalse); -+ CHECK_EQ(test.dCeq, test.dFalse); -+ CHECK_EQ(test.fCeq, test.fFalse); -+ CHECK_EQ(test.dCueq, test.dFalse); -+ CHECK_EQ(test.fCueq, test.fFalse); -+ CHECK_EQ(test.dClt, test.dTrue); -+ CHECK_EQ(test.fClt, test.fTrue); -+ CHECK_EQ(test.dCult, test.dTrue); -+ CHECK_EQ(test.fCult, test.fTrue); -+ CHECK_EQ(test.dCle, test.dTrue); -+ CHECK_EQ(test.fCle, test.fTrue); -+ CHECK_EQ(test.dCule, test.dTrue); -+ CHECK_EQ(test.fCule, test.fTrue); -+ CHECK_EQ(test.dCne, test.dTrue); -+ CHECK_EQ(test.fCne, test.fTrue); -+ CHECK_EQ(test.dCor, test.dTrue); -+ CHECK_EQ(test.fCor, test.fTrue); -+ CHECK_EQ(test.dCune, test.dTrue); -+ CHECK_EQ(test.fCune, test.fTrue); -+ /* CHECK_EQ(test.dSaf, test.dFalse); -+ CHECK_EQ(test.fSaf, test.fFalse); -+ CHECK_EQ(test.dSun, test.dFalse); -+ CHECK_EQ(test.fSun, test.fFalse); -+ CHECK_EQ(test.dSeq, test.dFalse); -+ CHECK_EQ(test.fSeq, test.fFalse); -+ CHECK_EQ(test.dSueq, test.dFalse); -+ CHECK_EQ(test.fSueq, test.fFalse); -+ CHECK_EQ(test.dClt, test.dTrue); -+ CHECK_EQ(test.fClt, test.fTrue); -+ CHECK_EQ(test.dCult, test.dTrue); -+ CHECK_EQ(test.fCult, test.fTrue); -+ CHECK_EQ(test.dSle, test.dTrue); -+ CHECK_EQ(test.fSle, test.fTrue); -+ CHECK_EQ(test.dSule, test.dTrue); -+ CHECK_EQ(test.fSule, test.fTrue); -+ CHECK_EQ(test.dSne, test.dTrue); -+ CHECK_EQ(test.fSne, test.fTrue); -+ CHECK_EQ(test.dSor, test.dTrue); -+ CHECK_EQ(test.fSor, test.fTrue); -+ CHECK_EQ(test.dSune, test.dTrue); -+ CHECK_EQ(test.fSune, test.fTrue);*/ -+ -+ test.dOp1 = std::numeric_limits::max(); -+ test.dOp2 = std::numeric_limits::min(); -+ test.fOp1 = std::numeric_limits::min(); -+ test.fOp2 = -std::numeric_limits::max(); -+ f.Call(&test, 0, 0, 0, 0); -+ -+ CHECK_EQ(test.dCaf, test.dFalse); -+ CHECK_EQ(test.fCaf, test.fFalse); -+ CHECK_EQ(test.dCun, test.dFalse); -+ CHECK_EQ(test.fCun, test.fFalse); -+ CHECK_EQ(test.dCeq, test.dFalse); -+ CHECK_EQ(test.fCeq, test.fFalse); -+ CHECK_EQ(test.dCueq, test.dFalse); -+ CHECK_EQ(test.fCueq, test.fFalse); -+ CHECK_EQ(test.dClt, test.dFalse); -+ CHECK_EQ(test.fClt, test.fFalse); -+ CHECK_EQ(test.dCult, test.dFalse); -+ CHECK_EQ(test.fCult, test.fFalse); -+ CHECK_EQ(test.dCle, test.dFalse); -+ CHECK_EQ(test.fCle, test.fFalse); -+ CHECK_EQ(test.dCule, test.dFalse); -+ CHECK_EQ(test.fCule, test.fFalse); -+ CHECK_EQ(test.dCne, test.dTrue); -+ CHECK_EQ(test.fCne, test.fTrue); -+ CHECK_EQ(test.dCor, test.dTrue); -+ CHECK_EQ(test.fCor, test.fTrue); -+ CHECK_EQ(test.dCune, test.dTrue); -+ CHECK_EQ(test.fCune, test.fTrue); -+ /* CHECK_EQ(test.dSaf, test.dFalse); -+ CHECK_EQ(test.fSaf, test.fFalse); -+ CHECK_EQ(test.dSun, test.dFalse); -+ CHECK_EQ(test.fSun, test.fFalse); -+ CHECK_EQ(test.dSeq, test.dFalse); -+ CHECK_EQ(test.fSeq, test.fFalse); -+ CHECK_EQ(test.dSueq, test.dFalse); -+ CHECK_EQ(test.fSueq, test.fFalse); -+ CHECK_EQ(test.dSlt, test.dFalse); -+ CHECK_EQ(test.fSlt, test.fFalse); -+ CHECK_EQ(test.dSult, test.dFalse); -+ CHECK_EQ(test.fSult, test.fFalse); -+ CHECK_EQ(test.dSle, test.dFalse); -+ CHECK_EQ(test.fSle, test.fFalse); -+ CHECK_EQ(test.dSule, test.dFalse); -+ CHECK_EQ(test.fSule, test.fFalse); -+ CHECK_EQ(test.dSne, test.dTrue); -+ CHECK_EQ(test.fSne, test.fTrue); -+ CHECK_EQ(test.dSor, test.dTrue); -+ CHECK_EQ(test.fSor, test.fTrue); -+ CHECK_EQ(test.dSune, test.dTrue); -+ CHECK_EQ(test.fSune, test.fTrue);*/ -+ -+ test.dOp1 = std::numeric_limits::quiet_NaN(); -+ test.dOp2 = 0.0; -+ test.fOp1 = std::numeric_limits::quiet_NaN(); -+ test.fOp2 = 0.0; -+ f.Call(&test, 0, 0, 0, 0); -+ -+ CHECK_EQ(test.dCaf, test.dFalse); -+ CHECK_EQ(test.fCaf, test.fFalse); -+ CHECK_EQ(test.dCun, test.dTrue); -+ CHECK_EQ(test.fCun, test.fTrue); -+ CHECK_EQ(test.dCeq, test.dFalse); -+ CHECK_EQ(test.fCeq, test.fFalse); -+ CHECK_EQ(test.dCueq, test.dTrue); -+ CHECK_EQ(test.fCueq, test.fTrue); -+ CHECK_EQ(test.dClt, test.dFalse); -+ CHECK_EQ(test.fClt, test.fFalse); -+ CHECK_EQ(test.dCult, test.dTrue); -+ CHECK_EQ(test.fCult, test.fTrue); -+ CHECK_EQ(test.dCle, test.dFalse); -+ CHECK_EQ(test.fCle, test.fFalse); -+ CHECK_EQ(test.dCule, test.dTrue); -+ CHECK_EQ(test.fCule, test.fTrue); -+ CHECK_EQ(test.dCne, test.dFalse); -+ CHECK_EQ(test.fCne, test.fFalse); -+ CHECK_EQ(test.dCor, test.dFalse); -+ CHECK_EQ(test.fCor, test.fFalse); -+ CHECK_EQ(test.dCune, test.dTrue); -+ CHECK_EQ(test.fCune, test.fTrue); -+ /* CHECK_EQ(test.dSaf, test.dTrue); -+ CHECK_EQ(test.fSaf, test.fTrue); -+ CHECK_EQ(test.dSun, test.dTrue); -+ CHECK_EQ(test.fSun, test.fTrue); -+ CHECK_EQ(test.dSeq, test.dFalse); -+ CHECK_EQ(test.fSeq, test.fFalse); -+ CHECK_EQ(test.dSueq, test.dTrue); -+ CHECK_EQ(test.fSueq, test.fTrue); -+ CHECK_EQ(test.dSlt, test.dFalse); -+ CHECK_EQ(test.fSlt, test.fFalse); -+ CHECK_EQ(test.dSult, test.dTrue); -+ CHECK_EQ(test.fSult, test.fTrue); -+ CHECK_EQ(test.dSle, test.dFalse); -+ CHECK_EQ(test.fSle, test.fFalse); -+ CHECK_EQ(test.dSule, test.dTrue); -+ CHECK_EQ(test.fSule, test.fTrue); -+ CHECK_EQ(test.dSne, test.dFalse); -+ CHECK_EQ(test.fSne, test.fFalse); -+ CHECK_EQ(test.dSor, test.dFalse); -+ CHECK_EQ(test.fSor, test.fFalse); -+ CHECK_EQ(test.dSune, test.dTrue); -+ CHECK_EQ(test.fSune, test.fTrue);*/ -+} -+ -+TEST(FCVT) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ float fcvt_d_s_in; -+ double fcvt_s_d_in; -+ double fcvt_d_s_out; -+ float fcvt_s_d_out; -+ int fcsr; -+ }; -+ TestFloat test; -+ __ xor_(a4, a4, a4); -+ __ xor_(a5, a5, a5); -+ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr))); -+ __ movfcsr2gr(a5); -+ __ movgr2fcsr(a4); -+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_in))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_in))); -+ __ fcvt_d_s(f10, f8); -+ __ fcvt_s_d(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_out))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_out))); -+ __ movgr2fcsr(a5); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ test.fcsr = kRoundToZero; -+ -+ test.fcvt_d_s_in = -0.51; -+ test.fcvt_s_d_in = -0.51; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); -+ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); -+ -+ test.fcvt_d_s_in = 0.49; -+ test.fcvt_s_d_in = 0.49; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); -+ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); -+ -+ test.fcvt_d_s_in = std::numeric_limits::max(); -+ test.fcvt_s_d_in = std::numeric_limits::max(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); -+ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); -+ -+ test.fcvt_d_s_in = -std::numeric_limits::max(); -+ test.fcvt_s_d_in = -std::numeric_limits::max(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); -+ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); -+ -+ test.fcvt_d_s_in = std::numeric_limits::min(); -+ test.fcvt_s_d_in = std::numeric_limits::min(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); -+ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); -+} -+ -+TEST(FFINT) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ int32_t ffint_s_w_in; -+ int64_t ffint_s_l_in; -+ int32_t ffint_d_w_in; -+ int64_t ffint_d_l_in; -+ float ffint_s_w_out; -+ float ffint_s_l_out; -+ double ffint_d_w_out; -+ double ffint_d_l_out; -+ int fcsr; -+ }; -+ TestFloat test; -+ __ xor_(a4, a4, a4); -+ __ xor_(a5, a5, a5); -+ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr))); -+ __ movfcsr2gr(a5); -+ __ movgr2fcsr(a4); -+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, ffint_s_w_in))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, ffint_s_l_in))); -+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, ffint_d_w_in))); -+ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, ffint_d_l_in))); -+ __ ffint_s_w(f12, f8); -+ __ ffint_s_l(f13, f9); -+ __ ffint_d_w(f14, f10); -+ __ ffint_d_l(f15, f11); -+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, ffint_s_w_out))); -+ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, ffint_s_l_out))); -+ __ Fst_d(f14, MemOperand(a0, offsetof(TestFloat, ffint_d_w_out))); -+ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, ffint_d_l_out))); -+ __ movgr2fcsr(a5); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ test.fcsr = kRoundToZero; -+ -+ test.ffint_s_w_in = -1; -+ test.ffint_s_l_in = -1; -+ test.ffint_d_w_in = -1; -+ test.ffint_d_l_in = -1; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); -+ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); -+ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); -+ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); -+ -+ test.ffint_s_w_in = 1; -+ test.ffint_s_l_in = 1; -+ test.ffint_d_w_in = 1; -+ test.ffint_d_l_in = 1; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); -+ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); -+ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); -+ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); -+ -+ test.ffint_s_w_in = std::numeric_limits::max(); -+ test.ffint_s_l_in = std::numeric_limits::max(); -+ test.ffint_d_w_in = std::numeric_limits::max(); -+ test.ffint_d_l_in = std::numeric_limits::max(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); -+ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); -+ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); -+ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); -+ -+ test.ffint_s_w_in = std::numeric_limits::min(); -+ test.ffint_s_l_in = std::numeric_limits::min(); -+ test.ffint_d_w_in = std::numeric_limits::min(); -+ test.ffint_d_l_in = std::numeric_limits::min(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); -+ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); -+ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); -+ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); -+} -+ -+TEST(FTINT) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct Test { -+ double a; -+ float b; -+ int32_t c; -+ int32_t d; -+ int64_t e; -+ int64_t f; -+ int fcsr; -+ }; -+ Test test; -+ -+ const int kTableLength = 9; -+ // clang-format off -+ double inputs_d[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ float inputs_s[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ double outputs_RN_W[kTableLength] = { -+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_RN_L[kTableLength] = { -+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ double outputs_RZ_W[kTableLength] = { -+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_RZ_L[kTableLength] = { -+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ double outputs_RP_W[kTableLength] = { -+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_RP_L[kTableLength] = { -+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ double outputs_RM_W[kTableLength] = { -+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_RM_L[kTableLength] = { -+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ // clang-format on -+ -+ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf, -+ kRoundToMinusInf}; -+ double* outputs[8] = { -+ outputs_RN_W, outputs_RN_L, outputs_RZ_W, outputs_RZ_L, -+ outputs_RP_W, outputs_RP_L, outputs_RM_W, outputs_RM_L, -+ }; -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); -+ __ xor_(a5, a5, a5); -+ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr))); -+ __ movfcsr2gr(a4); -+ __ movgr2fcsr(a5); -+ __ ftint_w_d(f10, f8); -+ __ ftint_w_s(f11, f9); -+ __ ftint_l_d(f12, f8); -+ __ ftint_l_s(f13, f9); -+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); -+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); -+ __ movgr2fcsr(a4); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int j = 0; j < 4; j++) { -+ test.fcsr = fcsr_inputs[j]; -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_d[i]; -+ test.b = inputs_s[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, outputs[2 * j][i]); -+ CHECK_EQ(test.d, outputs[2 * j][i]); -+ CHECK_EQ(test.e, outputs[2 * j + 1][i]); -+ CHECK_EQ(test.f, outputs[2 * j + 1][i]); -+ } -+ } -+} -+ -+TEST(FTINTRM) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct Test { -+ double a; -+ float b; -+ int32_t c; -+ int32_t d; -+ int64_t e; -+ int64_t f; -+ }; -+ Test test; -+ -+ const int kTableLength = 9; -+ -+ // clang-format off -+ double inputs_d[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ float inputs_s[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ double outputs_w[kTableLength] = { -+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_l[kTableLength] = { -+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); -+ __ ftintrm_w_d(f10, f8); -+ __ ftintrm_w_s(f11, f9); -+ __ ftintrm_l_d(f12, f8); -+ __ ftintrm_l_s(f13, f9); -+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); -+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_d[i]; -+ test.b = inputs_s[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, outputs_w[i]); -+ CHECK_EQ(test.d, outputs_w[i]); -+ CHECK_EQ(test.e, outputs_l[i]); -+ CHECK_EQ(test.f, outputs_l[i]); -+ } -+} -+ -+TEST(FTINTRP) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct Test { -+ double a; -+ float b; -+ int32_t c; -+ int32_t d; -+ int64_t e; -+ int64_t f; -+ }; -+ Test test; -+ -+ const int kTableLength = 9; -+ -+ // clang-format off -+ double inputs_d[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ float inputs_s[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ double outputs_w[kTableLength] = { -+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_l[kTableLength] = { -+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); -+ __ ftintrp_w_d(f10, f8); -+ __ ftintrp_w_s(f11, f9); -+ __ ftintrp_l_d(f12, f8); -+ __ ftintrp_l_s(f13, f9); -+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); -+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_d[i]; -+ test.b = inputs_s[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, outputs_w[i]); -+ CHECK_EQ(test.d, outputs_w[i]); -+ CHECK_EQ(test.e, outputs_l[i]); -+ CHECK_EQ(test.f, outputs_l[i]); -+ } -+} -+ -+TEST(FTINTRZ) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct Test { -+ double a; -+ float b; -+ int32_t c; -+ int32_t d; -+ int64_t e; -+ int64_t f; -+ }; -+ Test test; -+ -+ const int kTableLength = 9; -+ -+ // clang-format off -+ double inputs_d[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ float inputs_s[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ double outputs_w[kTableLength] = { -+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_l[kTableLength] = { -+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); -+ __ ftintrz_w_d(f10, f8); -+ __ ftintrz_w_s(f11, f9); -+ __ ftintrz_l_d(f12, f8); -+ __ ftintrz_l_s(f13, f9); -+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); -+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_d[i]; -+ test.b = inputs_s[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, outputs_w[i]); -+ CHECK_EQ(test.d, outputs_w[i]); -+ CHECK_EQ(test.e, outputs_l[i]); -+ CHECK_EQ(test.f, outputs_l[i]); -+ } -+} -+ -+TEST(FTINTRNE) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct Test { -+ double a; -+ float b; -+ int32_t c; -+ int32_t d; -+ int64_t e; -+ int64_t f; -+ }; -+ Test test; -+ -+ const int kTableLength = 9; -+ -+ // clang-format off -+ double inputs_d[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ float inputs_s[kTableLength] = { -+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, -+ 2147483648.0, -+ std::numeric_limits::quiet_NaN(), -+ std::numeric_limits::infinity() -+ }; -+ double outputs_w[kTableLength] = { -+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, -+ kFPUInvalidResult, 0, -+ kFPUInvalidResult}; -+ double outputs_l[kTableLength] = { -+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, -+ 2147483648.0, 0, -+ kFPU64InvalidResult}; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); -+ __ ftintrne_w_d(f10, f8); -+ __ ftintrne_w_s(f11, f9); -+ __ ftintrne_l_d(f12, f8); -+ __ ftintrne_l_s(f13, f9); -+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); -+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_d[i]; -+ test.b = inputs_s[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, outputs_w[i]); -+ CHECK_EQ(test.d, outputs_w[i]); -+ CHECK_EQ(test.e, outputs_l[i]); -+ CHECK_EQ(test.f, outputs_l[i]); -+ } -+} -+ -+TEST(FRINT) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct Test { -+ double a; -+ float b; -+ double c; -+ float d; -+ int fcsr; -+ }; -+ Test test; -+ -+ const int kTableLength = 32; -+ -+ // clang-format off -+ double inputs_d[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, -+ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307, -+ 309485009821345068724781056.89, -+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, -+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::max() - 0.1, -+ std::numeric_limits::infinity() -+ }; -+ float inputs_s[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, -+ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37, -+ 309485009821345068724781056.89, -+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, -+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::lowest() + 0.6, -+ std::numeric_limits::infinity() -+ }; -+ float outputs_RN_S[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, -+ 1.7976931348623157E38, 0, -+ 309485009821345068724781057.0, -+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, -+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_RN_D[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, -+ 1.7976931348623157E308, 0, -+ 309485009821345068724781057.0, -+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, -+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::max(), -+ std::numeric_limits::infinity() -+ }; -+ float outputs_RZ_S[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, -+ 1.7976931348623157E38, 0, -+ 309485009821345068724781057.0, -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_RZ_D[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, -+ 1.7976931348623157E308, 0, -+ 309485009821345068724781057.0, -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::max() - 1, -+ std::numeric_limits::infinity() -+ }; -+ float outputs_RP_S[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, -+ 1.7976931348623157E38, 1, -+ 309485009821345068724781057.0, -+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_RP_D[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, -+ 1.7976931348623157E308, 1, -+ 309485009821345068724781057.0, -+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::max(), -+ std::numeric_limits::infinity() -+ }; -+ float outputs_RM_S[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, -+ 1.7976931348623157E38, 0, -+ 309485009821345068724781057.0, -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_RM_D[kTableLength] = { -+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, -+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, -+ 1.7976931348623157E308, 0, -+ 309485009821345068724781057.0, -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, -+ 37778931862957161709568.0, 37778931862957161709569.0, -+ 37778931862957161709580.0, 37778931862957161709581.0, -+ 37778931862957161709582.0, 37778931862957161709583.0, -+ 37778931862957161709584.0, 37778931862957161709585.0, -+ 37778931862957161709586.0, 37778931862957161709587.0, -+ std::numeric_limits::max(), -+ std::numeric_limits::infinity() -+ }; -+ // clang-format on -+ -+ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf, -+ kRoundToMinusInf}; -+ double* outputs_d[4] = {outputs_RN_D, outputs_RZ_D, outputs_RP_D, -+ outputs_RM_D}; -+ float* outputs_s[4] = {outputs_RN_S, outputs_RZ_S, outputs_RP_S, -+ outputs_RM_S}; -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); -+ __ xor_(a5, a5, a5); -+ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr))); -+ __ movfcsr2gr(a4); -+ __ movgr2fcsr(a5); -+ __ frint_d(f10, f8); -+ __ frint_s(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(Test, c))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); -+ __ movgr2fcsr(a4); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int j = 0; j < 4; j++) { -+ test.fcsr = fcsr_inputs[j]; -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_d[i]; -+ test.b = inputs_s[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, outputs_d[j][i]); -+ CHECK_EQ(test.d, outputs_s[j][i]); -+ } -+ } -+} -+ -+TEST(FMOV) { -+ const int kTableLength = 7; -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ double a; -+ float b; -+ double c; -+ float d; -+ }; -+ -+ TestFloat test; -+ -+ // clang-format off -+ double inputs_D[kTableLength] = { -+ 5.3, -5.3, 0.29, -0.29, 0, -+ std::numeric_limits::max(), -+ -std::numeric_limits::max() -+ }; -+ float inputs_S[kTableLength] = { -+ 4.8, -4.8, 0.29, -0.29, 0, -+ std::numeric_limits::max(), -+ -std::numeric_limits::max() -+ }; -+ -+ double outputs_D[kTableLength] = { -+ 5.3, -5.3, 0.29, -0.29, 0, -+ std::numeric_limits::max(), -+ -std::numeric_limits::max() -+ }; -+ -+ float outputs_S[kTableLength] = { -+ 4.8, -4.8, 0.29, -0.29, 0, -+ std::numeric_limits::max(), -+ -std::numeric_limits::max() -+ }; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ fmov_d(f10, f8); -+ __ fmov_s(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, d))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_D[i]; -+ test.b = inputs_S[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, outputs_D[i]); -+ CHECK_EQ(test.d, outputs_S[i]); -+ } -+} -+ -+TEST(LA14) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ struct T { -+ double a; -+ double b; -+ double c; -+ double d; -+ int64_t high; -+ int64_t low; -+ }; -+ T t; -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(T, b))); -+ -+ __ movfr2gr_s(a4, f8); -+ __ movfrh2gr_s(a5, f8); -+ __ movfr2gr_d(a6, f9); -+ -+ __ movgr2fr_w(f9, a4); -+ __ movgr2frh_w(f9, a5); -+ __ movgr2fr_d(f8, a6); -+ -+ __ Fst_d(f8, MemOperand(a0, offsetof(T, a))); -+ __ Fst_d(f9, MemOperand(a0, offsetof(T, c))); -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(T, d))); -+ __ movfrh2gr_s(a4, f8); -+ __ movfr2gr_s(a5, f8); -+ -+ __ St_d(a4, MemOperand(a0, offsetof(T, high))); -+ __ St_d(a5, MemOperand(a0, offsetof(T, low))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ -+ t.a = 1.5e22; -+ t.b = 2.75e11; -+ t.c = 17.17; -+ t.d = -2.75e11; -+ f.Call(&t, 0, 0, 0, 0); -+ CHECK_EQ(2.75e11, t.a); -+ CHECK_EQ(2.75e11, t.b); -+ CHECK_EQ(1.5e22, t.c); -+ CHECK_EQ(static_cast(0xFFFFFFFFC25001D1L), t.high); -+ CHECK_EQ(static_cast(0xFFFFFFFFBF800000L), t.low); -+ -+ t.a = -1.5e22; -+ t.b = -2.75e11; -+ t.c = 17.17; -+ t.d = 274999868928.0; -+ f.Call(&t, 0, 0, 0, 0); -+ CHECK_EQ(-2.75e11, t.a); -+ CHECK_EQ(-2.75e11, t.b); -+ CHECK_EQ(-1.5e22, t.c); -+ CHECK_EQ(static_cast(0x425001D1L), t.high); -+ CHECK_EQ(static_cast(0x3F800000L), t.low); -+} -+ -+uint64_t run_bceqz(int fcc_value, int32_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0); -+ __ li(t0, fcc_value); -+ __ b(&main_block); -+ // Block 1 -+ for (int32_t i = -104; i <= -55; ++i) { -+ __ addi_d(a2, a2, 0x1); -+ } -+ __ b(&L); -+ -+ // Block 2 -+ for (int32_t i = -53; i <= -4; ++i) { -+ __ addi_d(a2, a2, 0x10); -+ } -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ movcf2gr(t1, FCC0); -+ __ movgr2cf(FCC0, t0); -+ __ bceqz(FCC0, offset); -+ __ bind(&L); -+ __ movgr2cf(FCC0, t1); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ for (int32_t i = 4; i <= 53; ++i) { -+ __ addi_d(a2, a2, 0x100); -+ } -+ __ b(&L); -+ -+ // Block 5 -+ for (int32_t i = 55; i <= 104; ++i) { -+ __ addi_d(a2, a2, 0x300); -+ } -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BCEQZ) { -+ CcTest::InitializeVM(); -+ struct TestCaseBceqz { -+ int fcc; -+ int32_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBceqz tc[] = { -+ // fcc, offset, expected_res -+ { 0, -90, 0x24 }, -+ { 0, -27, 0x180 }, -+ { 0, 47, 0x700 }, -+ { 0, 70, 0x6900 }, -+ { 1, -27, 0 }, -+ { 1, 47, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBceqz); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bceqz(tc[i].fcc, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+uint64_t run_bcnez(int fcc_value, int32_t offset) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label main_block, L; -+ __ li(a2, 0); -+ __ li(t0, fcc_value); -+ __ b(&main_block); -+ // Block 1 -+ for (int32_t i = -104; i <= -55; ++i) { -+ __ addi_d(a2, a2, 0x1); -+ } -+ __ b(&L); -+ -+ // Block 2 -+ for (int32_t i = -53; i <= -4; ++i) { -+ __ addi_d(a2, a2, 0x10); -+ } -+ __ b(&L); -+ -+ // Block 3 (Main) -+ __ bind(&main_block); -+ __ movcf2gr(t1, FCC0); -+ __ movgr2cf(FCC0, t0); -+ __ bcnez(FCC0, offset); -+ __ bind(&L); -+ __ movgr2cf(FCC0, t1); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ // Block 4 -+ for (int32_t i = 4; i <= 53; ++i) { -+ __ addi_d(a2, a2, 0x100); -+ } -+ __ b(&L); -+ -+ // Block 5 -+ for (int32_t i = 55; i <= 104; ++i) { -+ __ addi_d(a2, a2, 0x300); -+ } -+ __ b(&L); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(BCNEZ) { -+ CcTest::InitializeVM(); -+ struct TestCaseBcnez { -+ int fcc; -+ int32_t offset; -+ uint64_t expected_res; -+ }; -+ -+ // clang-format off -+ struct TestCaseBcnez tc[] = { -+ // fcc, offset, expected_res -+ { 1, -90, 0x24 }, -+ { 1, -27, 0x180 }, -+ { 1, 47, 0x700 }, -+ { 1, 70, 0x6900 }, -+ { 0, -27, 0 }, -+ { 0, 47, 0 }, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBcnez); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_bcnez(tc[i].fcc, tc[i].offset); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+TEST(jump_tables1) { -+ // Test jump tables with forward jumps. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ const int kNumCases = 512; -+ int values[kNumCases]; -+ isolate->random_number_generator()->NextBytes(values, sizeof(values)); -+ Label labels[kNumCases]; -+ -+ __ addi_d(sp, sp, -8); -+ __ St_d(ra, MemOperand(sp, 0)); -+ __ Align(8); -+ -+ Label done; -+ { -+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); -+ __ pcaddi(ra, 2); -+ __ slli_d(t7, a0, 3); -+ __ add_d(t7, t7, ra); -+ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); -+ __ jirl(zero_reg, t7, 0); -+ __ nop(); -+ for (int i = 0; i < kNumCases; ++i) { -+ __ dd(&labels[i]); -+ } -+ } -+ -+ for (int i = 0; i < kNumCases; ++i) { -+ __ bind(&labels[i]); -+ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF); -+ __ ori(a2, a2, values[i] & 0xFFF); -+ __ b(&done); -+ __ nop(); -+ } -+ -+ __ bind(&done); -+ __ Ld_d(ra, MemOperand(sp, 0)); -+ __ addi_d(sp, sp, 8); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CHECK_EQ(0, assm.UnboundLabelsCount()); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kNumCases; ++i) { -+ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); -+ ::printf("f(%d) = %" PRId64 "\n", i, res); -+ CHECK_EQ((values[i]), static_cast(res)); -+ } -+} -+ -+TEST(jump_tables2) { -+ // Test jump tables with backward jumps. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ const int kNumCases = 512; -+ int values[kNumCases]; -+ isolate->random_number_generator()->NextBytes(values, sizeof(values)); -+ Label labels[kNumCases]; -+ -+ __ addi_d(sp, sp, -8); -+ __ St_d(ra, MemOperand(sp, 0)); -+ -+ Label done, dispatch; -+ __ b(&dispatch); -+ __ nop(); -+ -+ for (int i = 0; i < kNumCases; ++i) { -+ __ bind(&labels[i]); -+ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF); -+ __ ori(a2, a2, values[i] & 0xFFF); -+ __ b(&done); -+ __ nop(); -+ } -+ -+ __ Align(8); -+ __ bind(&dispatch); -+ { -+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); -+ __ pcaddi(ra, 2); -+ __ slli_d(t7, a0, 3); -+ __ add_d(t7, t7, ra); -+ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); -+ __ jirl(zero_reg, t7, 0); -+ __ nop(); -+ for (int i = 0; i < kNumCases; ++i) { -+ __ dd(&labels[i]); -+ } -+ } -+ -+ __ bind(&done); -+ __ Ld_d(ra, MemOperand(sp, 0)); -+ __ addi_d(sp, sp, 8); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kNumCases; ++i) { -+ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); -+ ::printf("f(%d) = %" PRId64 "\n", i, res); -+ CHECK_EQ(values[i], res); -+ } -+} -+ -+TEST(jump_tables3) { -+ // Test jump tables with backward jumps and embedded heap objects. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ const int kNumCases = 512; -+ Handle values[kNumCases]; -+ for (int i = 0; i < kNumCases; ++i) { -+ double value = isolate->random_number_generator()->NextDouble(); -+ values[i] = isolate->factory()->NewHeapNumber(value); -+ } -+ Label labels[kNumCases]; -+ Object obj; -+ int64_t imm64; -+ -+ __ addi_d(sp, sp, -8); -+ __ St_d(ra, MemOperand(sp, 0)); -+ -+ Label done, dispatch; -+ __ b(&dispatch); -+ __ nop(); -+ -+ for (int i = 0; i < kNumCases; ++i) { -+ __ bind(&labels[i]); -+ obj = *values[i]; -+ imm64 = obj.ptr(); -+ __ lu12i_w(a2, (imm64 >> 12) & 0xFFFFF); -+ __ ori(a2, a2, imm64 & 0xFFF); -+ __ lu32i_d(a2, (imm64 >> 32) & 0xFFFFF); -+ __ lu52i_d(a2, a2, (imm64 >> 52) & 0xFFF); -+ __ b(&done); -+ } -+ -+ __ Align(8); -+ __ bind(&dispatch); -+ { -+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); -+ __ pcaddi(ra, 2); -+ __ slli_d(t7, a0, 3); // In delay slot. -+ __ add_d(t7, t7, ra); -+ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); -+ __ jirl(zero_reg, t7, 0); -+ __ nop(); -+ for (int i = 0; i < kNumCases; ++i) { -+ __ dd(&labels[i]); -+ } -+ } -+ __ bind(&done); -+ __ Ld_d(ra, MemOperand(sp, 0)); -+ __ addi_d(sp, sp, 8); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kNumCases; ++i) { -+ Handle result( -+ Object(reinterpret_cast
(f.Call(i, 0, 0, 0, 0))), isolate); -+#ifdef OBJECT_PRINT -+ ::printf("f(%d) = ", i); -+ result->Print(std::cout); -+ ::printf("\n"); -+#endif -+ CHECK(values[i].is_identical_to(result)); -+ } -+} -+ -+uint64_t run_li_macro(int64_t imm, LiFlags mode, int32_t num_instr = 0) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ Label code_start; -+ __ bind(&code_start); -+ __ li(a2, imm, mode); -+ if (num_instr > 0) { -+ CHECK_EQ(assm.InstructionsGeneratedSince(&code_start), num_instr); -+ CHECK_EQ(__ InstrCountForLi64Bit(imm), num_instr); -+ } -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ -+ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(li_macro) { -+ CcTest::InitializeVM(); -+ -+ // Test li macro-instruction for border cases. -+ -+ struct TestCase_li { -+ uint64_t imm; -+ int32_t num_instr; -+ }; -+ // clang-format off -+ struct TestCase_li tc[] = { -+ // imm, num_instr -+ {0xFFFFFFFFFFFFF800, 1}, // min_int12 -+ // The test case above generates addi_d instruction. -+ // This is int12 value and we can load it using just addi_d. -+ { 0x800, 1}, // max_int12 + 1 -+ // Generates ori -+ // max_int12 + 1 is not int12 but is uint12, just use ori. -+ {0xFFFFFFFFFFFFF7FF, 2}, // min_int12 - 1 -+ // Generates lu12i + ori -+ // We load int32 value using lu12i_w + ori. -+ { 0x801, 1}, // max_int12 + 2 -+ // Generates ori -+ // Also an uint1 value, use ori. -+ { 0x00001000, 1}, // max_uint12 + 1 -+ // Generates lu12i_w -+ // Low 12 bits are 0, load value using lu12i_w. -+ { 0x00001001, 2}, // max_uint12 + 2 -+ // Generates lu12i_w + ori -+ // We have to generate two instructions in this case. -+ {0x00000000FFFFFFFF, 2}, // max_uint32 -+ // addi_w + lu32i_d -+ {0x00000000FFFFFFFE, 2}, // max_uint32 - 1 -+ // addi_w + lu32i_d -+ {0xFFFFFFFF80000000, 1}, // min_int32 -+ // lu12i_w -+ {0x0000000080000000, 2}, // max_int32 + 1 -+ // lu12i_w + lu32i_d -+ {0xFFFF0000FFFF8765, 3}, -+ // lu12i_w + ori + lu32i_d -+ {0x1234ABCD87654321, 4}, -+ // lu12i_w + ori + lu32i_d + lu52i_d -+ {0xFFFF789100000000, 2}, -+ // xor + lu32i_d -+ {0xF12F789100000000, 3}, -+ // xor + lu32i_d + lu52i_d -+ {0xF120000000000800, 2}, -+ // ori + lu52i_d -+ {0xFFF0000000000000, 1}, -+ // lu52i_d -+ {0xF100000000000000, 1}, -+ {0x0122000000000000, 2}, -+ {0x1234FFFF77654321, 4}, -+ {0x1230000077654321, 3}, -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase_li); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ CHECK_EQ(tc[i].imm, -+ run_li_macro(tc[i].imm, OPTIMIZE_SIZE, tc[i].num_instr)); -+ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, CONSTANT_SIZE)); -+ if (is_int48(tc[i].imm)) { -+ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, ADDRESS_LOAD)); -+ } -+ } -+} -+ -+TEST(FMIN_FMAX) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ double a; -+ double b; -+ float c; -+ float d; -+ double e; -+ double f; -+ float g; -+ float h; -+ }; -+ -+ TestFloat test; -+ const double dnan = std::numeric_limits::quiet_NaN(); -+ const double dinf = std::numeric_limits::infinity(); -+ const double dminf = -std::numeric_limits::infinity(); -+ const float fnan = std::numeric_limits::quiet_NaN(); -+ const float finf = std::numeric_limits::infinity(); -+ const float fminf = -std::numeric_limits::infinity(); -+ const int kTableLength = 13; -+ -+ // clang-format off -+ double inputsa[kTableLength] = {2.0, 3.0, dnan, 3.0, -0.0, 0.0, dinf, -+ dnan, 42.0, dinf, dminf, dinf, dnan}; -+ double inputsb[kTableLength] = {3.0, 2.0, 3.0, dnan, 0.0, -0.0, dnan, -+ dinf, dinf, 42.0, dinf, dminf, dnan}; -+ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, -+ -0.0, dinf, dinf, 42.0, 42.0, -+ dminf, dminf, dnan}; -+ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, dinf, -+ dinf, dinf, dinf, dinf, dinf, dnan}; -+ -+ float inputsc[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf, -+ fnan, 42.0, finf, fminf, finf, fnan}; -+ float inputsd[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan, -+ finf, finf, 42.0, finf, fminf, fnan}; -+ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, -+ -0.0, finf, finf, 42.0, 42.0, -+ fminf, fminf, fnan}; -+ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf, -+ finf, finf, finf, finf, finf, fnan}; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c))); -+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); -+ __ fmin_d(f12, f8, f9); -+ __ fmax_d(f13, f8, f9); -+ __ fmin_s(f14, f10, f11); -+ __ fmax_s(f15, f10, f11); -+ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, e))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, f))); -+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g))); -+ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 4; i < kTableLength; i++) { -+ test.a = inputsa[i]; -+ test.b = inputsb[i]; -+ test.c = inputsc[i]; -+ test.d = inputsd[i]; -+ -+ f.Call(&test, 0, 0, 0, 0); -+ -+ CHECK_EQ(0, memcmp(&test.e, &outputsdmin[i], sizeof(test.e))); -+ CHECK_EQ(0, memcmp(&test.f, &outputsdmax[i], sizeof(test.f))); -+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); -+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); -+ } -+} -+ -+TEST(FMINA_FMAXA) { -+ const int kTableLength = 23; -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ const double dnan = std::numeric_limits::quiet_NaN(); -+ const double dinf = std::numeric_limits::infinity(); -+ const double dminf = -std::numeric_limits::infinity(); -+ const float fnan = std::numeric_limits::quiet_NaN(); -+ const float finf = std::numeric_limits::infinity(); -+ const float fminf = std::numeric_limits::infinity(); -+ -+ struct TestFloat { -+ double a; -+ double b; -+ double resd1; -+ double resd2; -+ float c; -+ float d; -+ float resf1; -+ float resf2; -+ }; -+ -+ TestFloat test; -+ // clang-format off -+ double inputsa[kTableLength] = { -+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, -+ dnan, 3.0, -0.0, 0.0, dinf, dnan, 42.0, dinf, dminf, dinf, dnan}; -+ double inputsb[kTableLength] = { -+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, -+ 3.0, dnan, 0.0, -0.0, dnan, dinf, dinf, 42.0, dinf, dminf, dnan}; -+ double resd1[kTableLength] = { -+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, -+ 3.0, 3.0, -0.0, -0.0, dinf, dinf, 42.0, 42.0, dminf, dminf, dnan}; -+ double resd2[kTableLength] = { -+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, -+ 3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, dinf, dinf, dinf, dnan}; -+ float inputsc[kTableLength] = { -+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, -+ fnan, 3.0, -0.0, 0.0, finf, fnan, 42.0, finf, fminf, finf, fnan}; -+ float inputsd[kTableLength] = { -+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, -+ 3.0, fnan, -0.0, 0.0, fnan, finf, finf, 42.0, finf, fminf, fnan}; -+ float resf1[kTableLength] = { -+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, -+ 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan}; -+ float resf2[kTableLength] = { -+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, -+ 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan}; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c))); -+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); -+ __ fmina_d(f12, f8, f9); -+ __ fmaxa_d(f13, f8, f9); -+ __ fmina_s(f14, f10, f11); -+ __ fmaxa_s(f15, f10, f11); -+ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, resd1))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resd2))); -+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resf1))); -+ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, resf2))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputsa[i]; -+ test.b = inputsb[i]; -+ test.c = inputsc[i]; -+ test.d = inputsd[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ if (i < kTableLength - 1) { -+ CHECK_EQ(test.resd1, resd1[i]); -+ CHECK_EQ(test.resd2, resd2[i]); -+ CHECK_EQ(test.resf1, resf1[i]); -+ CHECK_EQ(test.resf2, resf2[i]); -+ } else { -+ CHECK(std::isnan(test.resd1)); -+ CHECK(std::isnan(test.resd2)); -+ CHECK(std::isnan(test.resf1)); -+ CHECK(std::isnan(test.resf2)); -+ } -+ } -+} -+ -+TEST(FADD) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ double a; -+ double b; -+ double c; -+ float d; -+ float e; -+ float f; -+ }; -+ -+ TestFloat test; -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ fadd_d(f10, f8, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c))); -+ -+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); -+ __ Fld_s(f12, MemOperand(a0, offsetof(TestFloat, e))); -+ __ fadd_s(f13, f11, f12); -+ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, f))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ test.a = 2.0; -+ test.b = 3.0; -+ test.d = 2.0; -+ test.e = 3.0; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, 5.0); -+ CHECK_EQ(test.f, 5.0); -+ -+ test.a = std::numeric_limits::max(); -+ test.b = -std::numeric_limits::max(); // lowest() -+ test.d = std::numeric_limits::max(); -+ test.e = -std::numeric_limits::max(); // lowest() -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.c, 0.0); -+ CHECK_EQ(test.f, 0.0); -+ -+ test.a = std::numeric_limits::max(); -+ test.b = std::numeric_limits::max(); -+ test.d = std::numeric_limits::max(); -+ test.e = std::numeric_limits::max(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK(!std::isfinite(test.c)); -+ CHECK(!std::isfinite(test.f)); -+ -+ test.a = 5.0; -+ test.b = std::numeric_limits::signaling_NaN(); -+ test.d = 5.0; -+ test.e = std::numeric_limits::signaling_NaN(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK(std::isnan(test.c)); -+ CHECK(std::isnan(test.f)); -+} -+ -+TEST(FSUB) { -+ const int kTableLength = 12; -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ float a; -+ float b; -+ float resultS; -+ double c; -+ double d; -+ double resultD; -+ }; -+ -+ TestFloat test; -+ -+ // clang-format off -+ double inputfs_D[kTableLength] = { -+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, -+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 -+ }; -+ double inputft_D[kTableLength] = { -+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, -+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 -+ }; -+ double outputs_D[kTableLength] = { -+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, -+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 -+ }; -+ float inputfs_S[kTableLength] = { -+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, -+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 -+ }; -+ float inputft_S[kTableLength] = { -+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, -+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 -+ }; -+ float outputs_S[kTableLength] = { -+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, -+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 -+ }; -+ // clang-format on -+ -+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c))); -+ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d))); -+ __ fsub_s(f12, f8, f9); -+ __ fsub_d(f13, f10, f11); -+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputfs_S[i]; -+ test.b = inputft_S[i]; -+ test.c = inputfs_D[i]; -+ test.d = inputft_D[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.resultS, outputs_S[i]); -+ CHECK_EQ(test.resultD, outputs_D[i]); -+ } -+} -+ -+TEST(FMUL) { -+ const int kTableLength = 4; -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ float a; -+ float b; -+ float resultS; -+ double c; -+ double d; -+ double resultD; -+ }; -+ -+ TestFloat test; -+ // clang-format off -+ double inputfs_D[kTableLength] = { -+ 5.3, -5.3, 5.3, -2.9 -+ }; -+ double inputft_D[kTableLength] = { -+ 4.8, 4.8, -4.8, -0.29 -+ }; -+ -+ float inputfs_S[kTableLength] = { -+ 5.3, -5.3, 5.3, -2.9 -+ }; -+ float inputft_S[kTableLength] = { -+ 4.8, 4.8, -4.8, -0.29 -+ }; -+ // clang-format on -+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c))); -+ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d))); -+ __ fmul_s(f12, f8, f9); -+ __ fmul_d(f13, f10, f11); -+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputfs_S[i]; -+ test.b = inputft_S[i]; -+ test.c = inputfs_D[i]; -+ test.d = inputft_D[i]; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.resultS, inputfs_S[i] * inputft_S[i]); -+ CHECK_EQ(test.resultD, inputfs_D[i] * inputft_D[i]); -+ } -+} -+ -+TEST(FDIV) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct Test { -+ double dOp1; -+ double dOp2; -+ double dRes; -+ float fOp1; -+ float fOp2; -+ float fRes; -+ }; -+ -+ Test test; -+ -+ __ movfcsr2gr(a4); -+ __ movgr2fcsr(zero_reg); -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, dOp1))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(Test, dOp2))); -+ __ Fld_s(f10, MemOperand(a0, offsetof(Test, fOp1))); -+ __ Fld_s(f11, MemOperand(a0, offsetof(Test, fOp2))); -+ __ fdiv_d(f12, f8, f9); -+ __ fdiv_s(f13, f10, f11); -+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, dRes))); -+ __ Fst_s(f13, MemOperand(a0, offsetof(Test, fRes))); -+ -+ __ movgr2fcsr(a4); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ f.Call(&test, 0, 0, 0, 0); -+ const int test_size = 3; -+ // clang-format off -+ double dOp1[test_size] = { -+ 5.0, DBL_MAX, DBL_MAX}; -+ -+ double dOp2[test_size] = { -+ 2.0, 2.0, -DBL_MAX}; -+ -+ double dRes[test_size] = { -+ 2.5, DBL_MAX / 2.0, -1.0}; -+ -+ float fOp1[test_size] = { -+ 5.0, FLT_MAX, FLT_MAX}; -+ -+ float fOp2[test_size] = { -+ 2.0, 2.0, -FLT_MAX}; -+ -+ float fRes[test_size] = { -+ 2.5, FLT_MAX / 2.0, -1.0}; -+ // clang-format on -+ -+ for (int i = 0; i < test_size; i++) { -+ test.dOp1 = dOp1[i]; -+ test.dOp2 = dOp2[i]; -+ test.fOp1 = fOp1[i]; -+ test.fOp2 = fOp2[i]; -+ -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.dRes, dRes[i]); -+ CHECK_EQ(test.fRes, fRes[i]); -+ } -+ -+ test.dOp1 = DBL_MAX; -+ test.dOp2 = -0.0; -+ test.fOp1 = FLT_MAX; -+ test.fOp2 = -0.0; -+ -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK(!std::isfinite(test.dRes)); -+ CHECK(!std::isfinite(test.fRes)); -+ -+ test.dOp1 = 0.0; -+ test.dOp2 = -0.0; -+ test.fOp1 = 0.0; -+ test.fOp2 = -0.0; -+ -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK(std::isnan(test.dRes)); -+ CHECK(std::isnan(test.fRes)); -+ -+ test.dOp1 = std::numeric_limits::quiet_NaN(); -+ test.dOp2 = -5.0; -+ test.fOp1 = std::numeric_limits::quiet_NaN(); -+ test.fOp2 = -5.0; -+ -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK(std::isnan(test.dRes)); -+ CHECK(std::isnan(test.fRes)); -+} -+ -+TEST(FABS) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ double a; -+ float b; -+ }; -+ -+ TestFloat test; -+ -+ __ movfcsr2gr(a4); -+ __ movgr2fcsr(zero_reg); -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ fabs_d(f10, f8); -+ __ fabs_s(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, b))); -+ -+ __ movgr2fcsr(a4); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ test.a = -2.0; -+ test.b = -2.0; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.a, 2.0); -+ CHECK_EQ(test.b, 2.0); -+ -+ test.a = 2.0; -+ test.b = 2.0; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.a, 2.0); -+ CHECK_EQ(test.b, 2.0); -+ -+ // Testing biggest positive number -+ test.a = std::numeric_limits::max(); -+ test.b = std::numeric_limits::max(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.a, std::numeric_limits::max()); -+ CHECK_EQ(test.b, std::numeric_limits::max()); -+ -+ // Testing smallest negative number -+ test.a = -std::numeric_limits::max(); // lowest() -+ test.b = -std::numeric_limits::max(); // lowest() -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.a, std::numeric_limits::max()); -+ CHECK_EQ(test.b, std::numeric_limits::max()); -+ -+ // Testing smallest positive number -+ test.a = -std::numeric_limits::min(); -+ test.b = -std::numeric_limits::min(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.a, std::numeric_limits::min()); -+ CHECK_EQ(test.b, std::numeric_limits::min()); -+ -+ // Testing infinity -+ test.a = -+ -std::numeric_limits::max() / std::numeric_limits::min(); -+ test.b = -+ -std::numeric_limits::max() / std::numeric_limits::min(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.a, std::numeric_limits::max() / -+ std::numeric_limits::min()); -+ CHECK_EQ(test.b, std::numeric_limits::max() / -+ std::numeric_limits::min()); -+ -+ test.a = std::numeric_limits::quiet_NaN(); -+ test.b = std::numeric_limits::quiet_NaN(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK(std::isnan(test.a)); -+ CHECK(std::isnan(test.b)); -+ -+ test.a = std::numeric_limits::signaling_NaN(); -+ test.b = std::numeric_limits::signaling_NaN(); -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK(std::isnan(test.a)); -+ CHECK(std::isnan(test.b)); -+} -+ -+template -+struct TestCaseMaddMsub { -+ T fj, fk, fa, fd_fmadd, fd_fmsub, fd_fnmadd, fd_fnmsub; -+}; -+ -+template -+void helper_fmadd_fmsub_fnmadd_fnmsub(F func) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ T x = std::sqrt(static_cast(2.0)); -+ T y = std::sqrt(static_cast(3.0)); -+ T z = std::sqrt(static_cast(5.0)); -+ T x2 = 11.11, y2 = 22.22, z2 = 33.33; -+ // clang-format off -+ TestCaseMaddMsub test_cases[] = { -+ {x, y, z, 0.0, 0.0, 0.0, 0.0}, -+ {x, y, -z, 0.0, 0.0, 0.0, 0.0}, -+ {x, -y, z, 0.0, 0.0, 0.0, 0.0}, -+ {x, -y, -z, 0.0, 0.0, 0.0, 0.0}, -+ {-x, y, z, 0.0, 0.0, 0.0, 0.0}, -+ {-x, y, -z, 0.0, 0.0, 0.0, 0.0}, -+ {-x, -y, z, 0.0, 0.0, 0.0, 0.0}, -+ {-x, -y, -z, 0.0, 0.0, 0.0, 0.0}, -+ {-3.14, 0.2345, -123.000056, 0.0, 0.0, 0.0, 0.0}, -+ {7.3, -23.257, -357.1357, 0.0, 0.0, 0.0, 0.0}, -+ {x2, y2, z2, 0.0, 0.0, 0.0, 0.0}, -+ {x2, y2, -z2, 0.0, 0.0, 0.0, 0.0}, -+ {x2, -y2, z2, 0.0, 0.0, 0.0, 0.0}, -+ {x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0}, -+ {-x2, y2, z2, 0.0, 0.0, 0.0, 0.0}, -+ {-x2, y2, -z2, 0.0, 0.0, 0.0, 0.0}, -+ {-x2, -y2, z2, 0.0, 0.0, 0.0, 0.0}, -+ {-x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0}, -+ }; -+ // clang-format on -+ if (std::is_same::value) { -+ __ Fld_s(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, fj))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(TestCaseMaddMsub, fk))); -+ __ Fld_s(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fa))); -+ } else if (std::is_same::value) { -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, fj))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestCaseMaddMsub, fk))); -+ __ Fld_d(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fa))); -+ } else { -+ UNREACHABLE(); -+ } -+ -+ func(assm); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ -+ const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub); -+ TestCaseMaddMsub tc; -+ for (size_t i = 0; i < kTableLength; i++) { -+ tc.fj = test_cases[i].fj; -+ tc.fk = test_cases[i].fk; -+ tc.fa = test_cases[i].fa; -+ -+ f.Call(&tc, 0, 0, 0, 0); -+ -+ T res_fmadd; -+ T res_fmsub; -+ T res_fnmadd; -+ T res_fnmsub; -+ res_fmadd = std::fma(tc.fj, tc.fk, tc.fa); -+ res_fmsub = std::fma(tc.fj, tc.fk, -tc.fa); -+ res_fnmadd = -std::fma(tc.fj, tc.fk, tc.fa); -+ res_fnmsub = -std::fma(tc.fj, tc.fk, -tc.fa); -+ -+ CHECK_EQ(tc.fd_fmadd, res_fmadd); -+ CHECK_EQ(tc.fd_fmsub, res_fmsub); -+ CHECK_EQ(tc.fd_fnmadd, res_fnmadd); -+ CHECK_EQ(tc.fd_fnmsub, res_fnmsub); -+ } -+} -+ -+TEST(FMADD_FMSUB_FNMADD_FNMSUB_S) { -+ helper_fmadd_fmsub_fnmadd_fnmsub([](MacroAssembler& assm) { -+ __ fmadd_s(f11, f8, f9, f10); -+ __ Fst_s(f11, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmadd))); -+ __ fmsub_s(f12, f8, f9, f10); -+ __ Fst_s(f12, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmsub))); -+ __ fnmadd_s(f13, f8, f9, f10); -+ __ Fst_s(f13, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmadd))); -+ __ fnmsub_s(f14, f8, f9, f10); -+ __ Fst_s(f14, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmsub))); -+ }); -+} -+ -+TEST(FMADD_FMSUB_FNMADD_FNMSUB_D) { -+ helper_fmadd_fmsub_fnmadd_fnmsub([](MacroAssembler& assm) { -+ __ fmadd_d(f11, f8, f9, f10); -+ __ Fst_d(f11, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmadd))); -+ __ fmsub_d(f12, f8, f9, f10); -+ __ Fst_d(f12, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmsub))); -+ __ fnmadd_d(f13, f8, f9, f10); -+ __ Fst_d(f13, -+ MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmadd))); -+ __ fnmsub_d(f14, f8, f9, f10); -+ __ Fst_d(f14, -+ MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmsub))); -+ }); -+} -+ -+/* -+TEST(FSQRT_FRSQRT_FRECIP) { -+ const int kTableLength = 4; -+ const double deltaDouble = 2E-15; -+ const float deltaFloat = 2E-7; -+ const float sqrt2_s = sqrt(2); -+ const double sqrt2_d = sqrt(2); -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ struct TestFloat { -+ float a; -+ float resultS1; -+ float resultS2; -+ float resultS3; -+ double b; -+ double resultD1; -+ double resultD2; -+ double resultD3; -+ }; -+ TestFloat test; -+ // clang-format off -+ double inputs_D[kTableLength] = { -+ 0.0L, 4.0L, 2.0L, 4e-28L -+ }; -+ -+ double outputs_D[kTableLength] = { -+ 0.0L, 2.0L, sqrt2_d, 2e-14L -+ }; -+ float inputs_S[kTableLength] = { -+ 0.0, 4.0, 2.0, 4e-28 -+ }; -+ -+ float outputs_S[kTableLength] = { -+ 0.0, 2.0, sqrt2_s, 2e-14 -+ }; -+ // clang-format on -+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ fsqrt_s(f10, f8); -+ __ fsqrt_d(f11, f9); -+ __ frsqrt_s(f12, f8); -+ __ frsqrt_d(f13, f9); -+ __ frecip_s(f14, f8); -+ __ frecip_d(f15, f9); -+ __ Fst_s(f10, MemOperand(a0, offsetof(TestFloat, resultS1))); -+ __ Fst_d(f11, MemOperand(a0, offsetof(TestFloat, resultD1))); -+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS2))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD2))); -+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resultS3))); -+ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, resultD3))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ -+ for (int i = 0; i < kTableLength; i++) { -+ float f1; -+ double d1; -+ test.a = inputs_S[i]; -+ test.b = inputs_D[i]; -+ -+ f.Call(&test, 0, 0, 0, 0); -+ -+ CHECK_EQ(test.resultS1, outputs_S[i]); -+ CHECK_EQ(test.resultD1, outputs_D[i]); -+ -+ if (i != 0) { -+ f1 = test.resultS2 - 1.0F/outputs_S[i]; -+ f1 = (f1 < 0) ? f1 : -f1; -+ CHECK(f1 <= deltaFloat); -+ d1 = test.resultD2 - 1.0L/outputs_D[i]; -+ d1 = (d1 < 0) ? d1 : -d1; -+ CHECK(d1 <= deltaDouble); -+ f1 = test.resultS3 - 1.0F/inputs_S[i]; -+ f1 = (f1 < 0) ? f1 : -f1; -+ CHECK(f1 <= deltaFloat); -+ d1 = test.resultD3 - 1.0L/inputs_D[i]; -+ d1 = (d1 < 0) ? d1 : -d1; -+ CHECK(d1 <= deltaDouble); -+ } else { -+ CHECK_EQ(test.resultS2, 1.0F/outputs_S[i]); -+ CHECK_EQ(test.resultD2, 1.0L/outputs_D[i]); -+ CHECK_EQ(test.resultS3, 1.0F/inputs_S[i]); -+ CHECK_EQ(test.resultD3, 1.0L/inputs_D[i]); -+ } -+ } -+}*/ -+ -+TEST(LA15) { -+ // Test chaining of label usages within instructions (issue 1644). -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ Assembler assm(AssemblerOptions{}); -+ -+ Label target; -+ __ beq(a0, a1, &target); -+ __ nop(); -+ __ bne(a0, a1, &target); -+ __ nop(); -+ __ bind(&target); -+ __ nop(); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ f.Call(1, 1, 0, 0, 0); -+} -+ -+TEST(Trampoline) { -+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; -+ -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ Label done; -+ size_t nr_calls = kMaxBranchOffset / kInstrSize + 5; -+ -+ __ xor_(a2, a2, a2); -+ __ BranchShort(&done, eq, a0, Operand(a1)); -+ for (size_t i = 0; i < nr_calls; ++i) { -+ __ addi_d(a2, a2, 1); -+ } -+ __ bind(&done); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ -+ int64_t res = reinterpret_cast(f.Call(42, 42, 0, 0, 0)); -+ CHECK_EQ(0, res); -+} -+ -+#undef __ -+ -+} // namespace internal -+} // namespace v8 -diff --git a/deps/v8/test/cctest/test-disasm-loong64.cc b/deps/v8/test/cctest/test-disasm-loong64.cc -new file mode 100644 -index 00000000..8b074659 ---- /dev/null -+++ b/deps/v8/test/cctest/test-disasm-loong64.cc -@@ -0,0 +1,896 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+// -+ -+#include -+#include -+ -+#include "src/init/v8.h" -+ -+#include "src/codegen/macro-assembler.h" -+#include "src/debug/debug.h" -+#include "src/diagnostics/disasm.h" -+#include "src/diagnostics/disassembler.h" -+#include "src/execution/frames-inl.h" -+#include "test/cctest/cctest.h" -+ -+namespace v8 { -+namespace internal { -+ -+bool DisassembleAndCompare(byte* pc, const char* compare_string) { -+ disasm::NameConverter converter; -+ disasm::Disassembler disasm(converter); -+ EmbeddedVector disasm_buffer; -+ -+ /* if (prev_instr_compact_branch) { -+ disasm.InstructionDecode(disasm_buffer, pc); -+ pc += 4; -+ }*/ -+ -+ disasm.InstructionDecode(disasm_buffer, pc); -+ -+ if (strcmp(compare_string, disasm_buffer.begin()) != 0) { -+ fprintf(stderr, -+ "expected: \n" -+ "%s\n" -+ "disassembled: \n" -+ "%s\n\n", -+ compare_string, disasm_buffer.begin()); -+ return false; -+ } -+ return true; -+} -+ -+// Set up V8 to a state where we can at least run the assembler and -+// disassembler. Declare the variables and allocate the data structures used -+// in the rest of the macros. -+#define SET_UP() \ -+ CcTest::InitializeVM(); \ -+ Isolate* isolate = CcTest::i_isolate(); \ -+ HandleScope scope(isolate); \ -+ byte* buffer = reinterpret_cast(malloc(4 * 1024)); \ -+ Assembler assm(AssemblerOptions{}, \ -+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \ -+ bool failure = false; -+ -+// This macro assembles one instruction using the preallocated assembler and -+// disassembles the generated instruction, comparing the output to the expected -+// value. If the comparison fails an error message is printed, but the test -+// continues to run until the end. -+#define COMPARE(asm_, compare_string) \ -+ { \ -+ int pc_offset = assm.pc_offset(); \ -+ byte* progcounter = &buffer[pc_offset]; \ -+ assm.asm_; \ -+ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \ -+ } -+ -+// Verify that all invocations of the COMPARE macro passed successfully. -+// Exit with a failure if at least one of the tests failed. -+#define VERIFY_RUN() \ -+ if (failure) { \ -+ FATAL("LOONG64 Disassembler tests failed.\n"); \ -+ } -+ -+#define COMPARE_PC_REL(asm_, compare_string, offset) \ -+ { \ -+ int pc_offset = assm.pc_offset(); \ -+ byte* progcounter = &buffer[pc_offset]; \ -+ char str_with_address[100]; \ -+ printf("%p\n", static_cast(progcounter)); \ -+ snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \ -+ compare_string, static_cast(progcounter + (offset * 4))); \ -+ assm.asm_; \ -+ if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \ -+ } -+ -+TEST(TypeOp6) { -+ SET_UP(); -+ -+ COMPARE(jirl(ra, t7, 0), "4c000261 jirl ra, t7, 0"); -+ COMPARE(jirl(ra, t7, 32767), "4dfffe61 jirl ra, t7, 32767"); -+ COMPARE(jirl(ra, t7, -32768), "4e000261 jirl ra, t7, -32768"); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp6PC) { -+ SET_UP(); -+ -+ COMPARE_PC_REL(beqz(t7, 1048575), "43fffe6f beqz t7, 1048575", -+ 1048575); -+ COMPARE_PC_REL(beqz(t0, -1048576), "40000190 beqz t0, -1048576", -+ -1048576); -+ COMPARE_PC_REL(beqz(t1, 0), "400001a0 beqz t1, 0", 0); -+ -+ COMPARE_PC_REL(bnez(a2, 1048575), "47fffccf bnez a2, 1048575", -+ 1048575); -+ COMPARE_PC_REL(bnez(s3, -1048576), "44000350 bnez s3, -1048576", -+ -1048576); -+ COMPARE_PC_REL(bnez(t8, 0), "44000280 bnez t8, 0", 0); -+ -+ COMPARE_PC_REL(bceqz(FCC0, 1048575), "4bfffc0f bceqz fcc0, 1048575", -+ 1048575); -+ COMPARE_PC_REL(bceqz(FCC0, -1048576), -+ "48000010 bceqz fcc0, -1048576", -1048576); -+ COMPARE_PC_REL(bceqz(FCC0, 0), "48000000 bceqz fcc0, 0", 0); -+ -+ COMPARE_PC_REL(bcnez(FCC0, 1048575), "4bfffd0f bcnez fcc0, 1048575", -+ 1048575); -+ COMPARE_PC_REL(bcnez(FCC0, -1048576), -+ "48000110 bcnez fcc0, -1048576", -1048576); -+ COMPARE_PC_REL(bcnez(FCC0, 0), "48000100 bcnez fcc0, 0", 0); -+ -+ COMPARE_PC_REL(b(33554431), "53fffdff b 33554431", 33554431); -+ COMPARE_PC_REL(b(-33554432), "50000200 b -33554432", -33554432); -+ COMPARE_PC_REL(b(0), "50000000 b 0", 0); -+ -+ COMPARE_PC_REL(beq(t0, a6, 32767), "59fffd8a beq t0, a6, 32767", -+ 32767); -+ COMPARE_PC_REL(beq(t1, a0, -32768), "5a0001a4 beq t1, a0, -32768", -+ -32768); -+ COMPARE_PC_REL(beq(a4, t1, 0), "5800010d beq a4, t1, 0", 0); -+ -+ COMPARE_PC_REL(bne(a3, a4, 32767), "5dfffce8 bne a3, a4, 32767", -+ 32767); -+ COMPARE_PC_REL(bne(a6, a5, -32768), "5e000149 bne a6, a5, -32768", -+ -32768); -+ COMPARE_PC_REL(bne(a4, a5, 0), "5c000109 bne a4, a5, 0", 0); -+ -+ COMPARE_PC_REL(blt(a4, a6, 32767), "61fffd0a blt a4, a6, 32767", -+ 32767); -+ COMPARE_PC_REL(blt(a4, a5, -32768), "62000109 blt a4, a5, -32768", -+ -32768); -+ COMPARE_PC_REL(blt(a4, a6, 0), "6000010a blt a4, a6, 0", 0); -+ -+ COMPARE_PC_REL(bge(s7, a5, 32767), "65ffffc9 bge s7, a5, 32767", -+ 32767); -+ COMPARE_PC_REL(bge(a1, a3, -32768), "660000a7 bge a1, a3, -32768", -+ -32768); -+ COMPARE_PC_REL(bge(a5, s3, 0), "6400013a bge a5, s3, 0", 0); -+ -+ COMPARE_PC_REL(bltu(a5, s7, 32767), "69fffd3e bltu a5, s7, 32767", -+ 32767); -+ COMPARE_PC_REL(bltu(a4, a5, -32768), "6a000109 bltu a4, a5, -32768", -+ -32768); -+ COMPARE_PC_REL(bltu(a4, t6, 0), "68000112 bltu a4, t6, 0", 0); -+ -+ COMPARE_PC_REL(bgeu(a7, a6, 32767), "6dfffd6a bgeu a7, a6, 32767", -+ 32767); -+ COMPARE_PC_REL(bgeu(a5, a3, -32768), "6e000127 bgeu a5, a3, -32768", -+ -32768); -+ COMPARE_PC_REL(bgeu(t2, t1, 0), "6c0001cd bgeu t2, t1, 0", 0); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp7) { -+ SET_UP(); -+ -+ COMPARE(lu12i_w(a4, 524287), "14ffffe8 lu12i.w a4, 524287"); -+ COMPARE(lu12i_w(a5, -524288), "15000009 lu12i.w a5, -524288"); -+ COMPARE(lu12i_w(a6, 0), "1400000a lu12i.w a6, 0"); -+ -+ COMPARE(lu32i_d(a7, 524287), "16ffffeb lu32i.d a7, 524287"); -+ COMPARE(lu32i_d(t0, 524288), "1700000c lu32i.d t0, -524288"); -+ COMPARE(lu32i_d(t1, 0), "1600000d lu32i.d t1, 0"); -+ -+ COMPARE(pcaddi(t1, 1), "1800002d pcaddi t1, 1"); -+ COMPARE(pcaddi(t2, 524287), "18ffffee pcaddi t2, 524287"); -+ COMPARE(pcaddi(t3, -524288), "1900000f pcaddi t3, -524288"); -+ COMPARE(pcaddi(t4, 0), "18000010 pcaddi t4, 0"); -+ -+ COMPARE(pcalau12i(t5, 524287), "1afffff1 pcalau12i t5, 524287"); -+ COMPARE(pcalau12i(t6, -524288), "1b000012 pcalau12i t6, -524288"); -+ COMPARE(pcalau12i(a4, 0), "1a000008 pcalau12i a4, 0"); -+ -+ COMPARE(pcaddu12i(a5, 524287), "1cffffe9 pcaddu12i a5, 524287"); -+ COMPARE(pcaddu12i(a6, -524288), "1d00000a pcaddu12i a6, -524288"); -+ COMPARE(pcaddu12i(a7, 0), "1c00000b pcaddu12i a7, 0"); -+ -+ COMPARE(pcaddu18i(t0, 524287), "1effffec pcaddu18i t0, 524287"); -+ COMPARE(pcaddu18i(t1, -524288), "1f00000d pcaddu18i t1, -524288"); -+ COMPARE(pcaddu18i(t2, 0), "1e00000e pcaddu18i t2, 0"); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp8) { -+ SET_UP(); -+ -+ COMPARE(ll_w(t2, t3, 32764), "207ffdee ll.w t2, t3, 32764"); -+ COMPARE(ll_w(t3, t4, -32768), "2080020f ll.w t3, t4, -32768"); -+ COMPARE(ll_w(t5, t6, 0), "20000251 ll.w t5, t6, 0"); -+ -+ COMPARE(sc_w(a6, a7, 32764), "217ffd6a sc.w a6, a7, 32764"); -+ COMPARE(sc_w(t0, t1, -32768), "218001ac sc.w t0, t1, -32768"); -+ COMPARE(sc_w(t2, t3, 0), "210001ee sc.w t2, t3, 0"); -+ -+ COMPARE(ll_d(a0, a1, 32764), "227ffca4 ll.d a0, a1, 32764"); -+ COMPARE(ll_d(a2, a3, -32768), "228000e6 ll.d a2, a3, -32768"); -+ COMPARE(ll_d(a4, a5, 0), "22000128 ll.d a4, a5, 0"); -+ -+ COMPARE(sc_d(t4, t5, 32764), "237ffe30 sc.d t4, t5, 32764"); -+ COMPARE(sc_d(t6, a0, -32768), "23800092 sc.d t6, a0, -32768"); -+ COMPARE(sc_d(a1, a2, 0), "230000c5 sc.d a1, a2, 0"); -+ -+ COMPARE(ldptr_w(a4, a5, 32764), "247ffd28 ldptr.w a4, a5, 32764"); -+ COMPARE(ldptr_w(a6, a7, -32768), "2480016a ldptr.w a6, a7, -32768"); -+ COMPARE(ldptr_w(t0, t1, 0), "240001ac ldptr.w t0, t1, 0"); -+ -+ COMPARE(stptr_w(a4, a5, 32764), "257ffd28 stptr.w a4, a5, 32764"); -+ COMPARE(stptr_w(a6, a7, -32768), "2580016a stptr.w a6, a7, -32768"); -+ COMPARE(stptr_w(t0, t1, 0), "250001ac stptr.w t0, t1, 0"); -+ -+ COMPARE(ldptr_d(t2, t3, 32764), "267ffdee ldptr.d t2, t3, 32764"); -+ COMPARE(ldptr_d(t4, t5, -32768), "26800230 ldptr.d t4, t5, -32768"); -+ COMPARE(ldptr_d(t6, a4, 0), "26000112 ldptr.d t6, a4, 0"); -+ -+ COMPARE(stptr_d(a5, a6, 32764), "277ffd49 stptr.d a5, a6, 32764"); -+ COMPARE(stptr_d(a7, t0, -32768), "2780018b stptr.d a7, t0, -32768"); -+ COMPARE(stptr_d(t1, t2, 0), "270001cd stptr.d t1, t2, 0"); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp10) { -+ SET_UP(); -+ -+ COMPARE(bstrins_w(a4, a5, 31, 16), -+ "007f4128 bstrins.w a4, a5, 31, 16"); -+ COMPARE(bstrins_w(a6, a7, 5, 0), "0065016a bstrins.w a6, a7, 5, 0"); -+ -+ COMPARE(bstrins_d(a3, zero_reg, 17, 0), -+ "00910007 bstrins.d a3, zero_reg, 17, 0"); -+ COMPARE(bstrins_d(t1, zero_reg, 17, 0), -+ "0091000d bstrins.d t1, zero_reg, 17, 0"); -+ -+ COMPARE(bstrpick_w(t0, t1, 31, 29), -+ "007ff5ac bstrpick.w t0, t1, 31, 29"); -+ COMPARE(bstrpick_w(a4, a5, 16, 0), -+ "00708128 bstrpick.w a4, a5, 16, 0"); -+ -+ COMPARE(bstrpick_d(a5, a5, 31, 0), -+ "00df0129 bstrpick.d a5, a5, 31, 0"); -+ COMPARE(bstrpick_d(a4, a4, 25, 2), -+ "00d90908 bstrpick.d a4, a4, 25, 2"); -+ -+ COMPARE(slti(t2, a5, 2047), "021ffd2e slti t2, a5, 2047"); -+ COMPARE(slti(a7, a1, -2048), "022000ab slti a7, a1, -2048"); -+ -+ COMPARE(sltui(a7, a7, 2047), "025ffd6b sltui a7, a7, 2047"); -+ COMPARE(sltui(t1, t1, -2048), "026001ad sltui t1, t1, -2048"); -+ -+ COMPARE(addi_w(t0, t2, 2047), "029ffdcc addi.w t0, t2, 2047"); -+ COMPARE(addi_w(a0, a0, -2048), "02a00084 addi.w a0, a0, -2048"); -+ -+ COMPARE(addi_d(a0, zero_reg, 2047), -+ "02dffc04 addi.d a0, zero_reg, 2047"); -+ COMPARE(addi_d(t7, t7, -2048), "02e00273 addi.d t7, t7, -2048"); -+ -+ COMPARE(lu52i_d(a0, a0, 2047), "031ffc84 lu52i.d a0, a0, 2047"); -+ COMPARE(lu52i_d(a1, a1, -2048), "032000a5 lu52i.d a1, a1, -2048"); -+ -+ COMPARE(andi(s3, a3, 0xfff), "037ffcfa andi s3, a3, 0xfff"); -+ COMPARE(andi(a4, a4, 0), "03400108 andi a4, a4, 0x0"); -+ -+ COMPARE(ori(t6, t6, 0xfff), "03bffe52 ori t6, t6, 0xfff"); -+ COMPARE(ori(t6, t6, 0), "03800252 ori t6, t6, 0x0"); -+ -+ COMPARE(xori(t1, t1, 0xfff), "03fffdad xori t1, t1, 0xfff"); -+ COMPARE(xori(a3, a3, 0x0), "03c000e7 xori a3, a3, 0x0"); -+ -+ COMPARE(ld_b(a1, a1, 2047), "281ffca5 ld.b a1, a1, 2047"); -+ COMPARE(ld_b(a4, a4, -2048), "28200108 ld.b a4, a4, -2048"); -+ -+ COMPARE(ld_h(a4, a0, 2047), "285ffc88 ld.h a4, a0, 2047"); -+ COMPARE(ld_h(a4, a3, -2048), "286000e8 ld.h a4, a3, -2048"); -+ -+ COMPARE(ld_w(a6, a6, 2047), "289ffd4a ld.w a6, a6, 2047"); -+ COMPARE(ld_w(a5, a4, -2048), "28a00109 ld.w a5, a4, -2048"); -+ -+ COMPARE(ld_d(a0, a3, 2047), "28dffce4 ld.d a0, a3, 2047"); -+ COMPARE(ld_d(a6, fp, -2048), "28e002ca ld.d a6, fp, -2048"); -+ COMPARE(ld_d(a0, a6, 0), "28c00144 ld.d a0, a6, 0"); -+ -+ COMPARE(st_b(a4, a0, 2047), "291ffc88 st.b a4, a0, 2047"); -+ COMPARE(st_b(a6, a5, -2048), "2920012a st.b a6, a5, -2048"); -+ -+ COMPARE(st_h(a4, a0, 2047), "295ffc88 st.h a4, a0, 2047"); -+ COMPARE(st_h(t1, t2, -2048), "296001cd st.h t1, t2, -2048"); -+ -+ COMPARE(st_w(t3, a4, 2047), "299ffd0f st.w t3, a4, 2047"); -+ COMPARE(st_w(a3, t2, -2048), "29a001c7 st.w a3, t2, -2048"); -+ -+ COMPARE(st_d(s3, sp, 2047), "29dffc7a st.d s3, sp, 2047"); -+ COMPARE(st_d(fp, s6, -2048), "29e003b6 st.d fp, s6, -2048"); -+ -+ COMPARE(ld_bu(a6, a0, 2047), "2a1ffc8a ld.bu a6, a0, 2047"); -+ COMPARE(ld_bu(a7, a7, -2048), "2a20016b ld.bu a7, a7, -2048"); -+ -+ COMPARE(ld_hu(a7, a7, 2047), "2a5ffd6b ld.hu a7, a7, 2047"); -+ COMPARE(ld_hu(a3, a3, -2048), "2a6000e7 ld.hu a3, a3, -2048"); -+ -+ COMPARE(ld_wu(a3, a0, 2047), "2a9ffc87 ld.wu a3, a0, 2047"); -+ COMPARE(ld_wu(a3, a5, -2048), "2aa00127 ld.wu a3, a5, -2048"); -+ -+ COMPARE(fld_s(f0, a3, 2047), "2b1ffce0 fld.s f0, a3, 2047"); -+ COMPARE(fld_s(f0, a1, -2048), "2b2000a0 fld.s f0, a1, -2048"); -+ -+ COMPARE(fld_d(f0, a0, 2047), "2b9ffc80 fld.d f0, a0, 2047"); -+ COMPARE(fld_d(f0, fp, -2048), "2ba002c0 fld.d f0, fp, -2048"); -+ -+ COMPARE(fst_d(f0, fp, 2047), "2bdffec0 fst.d f0, fp, 2047"); -+ COMPARE(fst_d(f0, a0, -2048), "2be00080 fst.d f0, a0, -2048"); -+ -+ COMPARE(fst_s(f0, a5, 2047), "2b5ffd20 fst.s f0, a5, 2047"); -+ COMPARE(fst_s(f0, a3, -2048), "2b6000e0 fst.s f0, a3, -2048"); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp12) { -+ SET_UP(); -+ -+ COMPARE(fmadd_s(f0, f1, f2, f3), "08118820 fmadd.s f0, f1, f2, f3"); -+ COMPARE(fmadd_s(f4, f5, f6, f7), "081398a4 fmadd.s f4, f5, f6, f7"); -+ -+ COMPARE(fmadd_d(f8, f9, f10, f11), -+ "0825a928 fmadd.d f8, f9, f10, f11"); -+ COMPARE(fmadd_d(f12, f13, f14, f15), -+ "0827b9ac fmadd.d f12, f13, f14, f15"); -+ -+ COMPARE(fmsub_s(f0, f1, f2, f3), "08518820 fmsub.s f0, f1, f2, f3"); -+ COMPARE(fmsub_s(f4, f5, f6, f7), "085398a4 fmsub.s f4, f5, f6, f7"); -+ -+ COMPARE(fmsub_d(f8, f9, f10, f11), -+ "0865a928 fmsub.d f8, f9, f10, f11"); -+ COMPARE(fmsub_d(f12, f13, f14, f15), -+ "0867b9ac fmsub.d f12, f13, f14, f15"); -+ -+ COMPARE(fnmadd_s(f0, f1, f2, f3), -+ "08918820 fnmadd.s f0, f1, f2, f3"); -+ COMPARE(fnmadd_s(f4, f5, f6, f7), -+ "089398a4 fnmadd.s f4, f5, f6, f7"); -+ -+ COMPARE(fnmadd_d(f8, f9, f10, f11), -+ "08a5a928 fnmadd.d f8, f9, f10, f11"); -+ COMPARE(fnmadd_d(f12, f13, f14, f15), -+ "08a7b9ac fnmadd.d f12, f13, f14, f15"); -+ -+ COMPARE(fnmsub_s(f0, f1, f2, f3), -+ "08d18820 fnmsub.s f0, f1, f2, f3"); -+ COMPARE(fnmsub_s(f4, f5, f6, f7), -+ "08d398a4 fnmsub.s f4, f5, f6, f7"); -+ -+ COMPARE(fnmsub_d(f8, f9, f10, f11), -+ "08e5a928 fnmsub.d f8, f9, f10, f11"); -+ COMPARE(fnmsub_d(f12, f13, f14, f15), -+ "08e7b9ac fnmsub.d f12, f13, f14, f15"); -+ -+ COMPARE(fcmp_cond_s(CAF, f1, f2, FCC0), -+ "0c100820 fcmp.caf.s fcc0, f1, f2"); -+ COMPARE(fcmp_cond_s(CUN, f5, f6, FCC0), -+ "0c1418a0 fcmp.cun.s fcc0, f5, f6"); -+ COMPARE(fcmp_cond_s(CEQ, f9, f10, FCC0), -+ "0c122920 fcmp.ceq.s fcc0, f9, f10"); -+ COMPARE(fcmp_cond_s(CUEQ, f13, f14, FCC0), -+ "0c1639a0 fcmp.cueq.s fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_s(CLT, f1, f2, FCC0), -+ "0c110820 fcmp.clt.s fcc0, f1, f2"); -+ COMPARE(fcmp_cond_s(CULT, f5, f6, FCC0), -+ "0c1518a0 fcmp.cult.s fcc0, f5, f6"); -+ COMPARE(fcmp_cond_s(CLE, f9, f10, FCC0), -+ "0c132920 fcmp.cle.s fcc0, f9, f10"); -+ COMPARE(fcmp_cond_s(CULE, f13, f14, FCC0), -+ "0c1739a0 fcmp.cule.s fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_s(CNE, f1, f2, FCC0), -+ "0c180820 fcmp.cne.s fcc0, f1, f2"); -+ COMPARE(fcmp_cond_s(COR, f5, f6, FCC0), -+ "0c1a18a0 fcmp.cor.s fcc0, f5, f6"); -+ COMPARE(fcmp_cond_s(CUNE, f9, f10, FCC0), -+ "0c1c2920 fcmp.cune.s fcc0, f9, f10"); -+ COMPARE(fcmp_cond_s(SAF, f13, f14, FCC0), -+ "0c10b9a0 fcmp.saf.s fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_s(SUN, f1, f2, FCC0), -+ "0c148820 fcmp.sun.s fcc0, f1, f2"); -+ COMPARE(fcmp_cond_s(SEQ, f5, f6, FCC0), -+ "0c1298a0 fcmp.seq.s fcc0, f5, f6"); -+ COMPARE(fcmp_cond_s(SUEQ, f9, f10, FCC0), -+ "0c16a920 fcmp.sueq.s fcc0, f9, f10"); -+ // COMPARE(fcmp_cond_s(SLT, f13, f14, FCC0), -+ // "0c11b9a0 fcmp.slt.s fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_s(SULT, f1, f2, FCC0), -+ "0c158820 fcmp.sult.s fcc0, f1, f2"); -+ COMPARE(fcmp_cond_s(SLE, f5, f6, FCC0), -+ "0c1398a0 fcmp.sle.s fcc0, f5, f6"); -+ COMPARE(fcmp_cond_s(SULE, f9, f10, FCC0), -+ "0c17a920 fcmp.sule.s fcc0, f9, f10"); -+ COMPARE(fcmp_cond_s(SNE, f13, f14, FCC0), -+ "0c18b9a0 fcmp.sne.s fcc0, f13, f14"); -+ COMPARE(fcmp_cond_s(SOR, f13, f14, FCC0), -+ "0c1ab9a0 fcmp.sor.s fcc0, f13, f14"); -+ COMPARE(fcmp_cond_s(SUNE, f1, f2, FCC0), -+ "0c1c8820 fcmp.sune.s fcc0, f1, f2"); -+ -+ COMPARE(fcmp_cond_d(CAF, f1, f2, FCC0), -+ "0c200820 fcmp.caf.d fcc0, f1, f2"); -+ COMPARE(fcmp_cond_d(CUN, f5, f6, FCC0), -+ "0c2418a0 fcmp.cun.d fcc0, f5, f6"); -+ COMPARE(fcmp_cond_d(CEQ, f9, f10, FCC0), -+ "0c222920 fcmp.ceq.d fcc0, f9, f10"); -+ COMPARE(fcmp_cond_d(CUEQ, f13, f14, FCC0), -+ "0c2639a0 fcmp.cueq.d fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_d(CLT, f1, f2, FCC0), -+ "0c210820 fcmp.clt.d fcc0, f1, f2"); -+ COMPARE(fcmp_cond_d(CULT, f5, f6, FCC0), -+ "0c2518a0 fcmp.cult.d fcc0, f5, f6"); -+ COMPARE(fcmp_cond_d(CLE, f9, f10, FCC0), -+ "0c232920 fcmp.cle.d fcc0, f9, f10"); -+ COMPARE(fcmp_cond_d(CULE, f13, f14, FCC0), -+ "0c2739a0 fcmp.cule.d fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_d(CNE, f1, f2, FCC0), -+ "0c280820 fcmp.cne.d fcc0, f1, f2"); -+ COMPARE(fcmp_cond_d(COR, f5, f6, FCC0), -+ "0c2a18a0 fcmp.cor.d fcc0, f5, f6"); -+ COMPARE(fcmp_cond_d(CUNE, f9, f10, FCC0), -+ "0c2c2920 fcmp.cune.d fcc0, f9, f10"); -+ COMPARE(fcmp_cond_d(SAF, f13, f14, FCC0), -+ "0c20b9a0 fcmp.saf.d fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_d(SUN, f1, f2, FCC0), -+ "0c248820 fcmp.sun.d fcc0, f1, f2"); -+ COMPARE(fcmp_cond_d(SEQ, f5, f6, FCC0), -+ "0c2298a0 fcmp.seq.d fcc0, f5, f6"); -+ COMPARE(fcmp_cond_d(SUEQ, f9, f10, FCC0), -+ "0c26a920 fcmp.sueq.d fcc0, f9, f10"); -+ // COMPARE(fcmp_cond_d(SLT, f13, f14, FCC0), -+ // "0c21b9a0 fcmp.slt.d fcc0, f13, f14"); -+ -+ COMPARE(fcmp_cond_d(SULT, f1, f2, FCC0), -+ "0c258820 fcmp.sult.d fcc0, f1, f2"); -+ COMPARE(fcmp_cond_d(SLE, f5, f6, FCC0), -+ "0c2398a0 fcmp.sle.d fcc0, f5, f6"); -+ COMPARE(fcmp_cond_d(SULE, f9, f10, FCC0), -+ "0c27a920 fcmp.sule.d fcc0, f9, f10"); -+ COMPARE(fcmp_cond_d(SNE, f13, f14, FCC0), -+ "0c28b9a0 fcmp.sne.d fcc0, f13, f14"); -+ COMPARE(fcmp_cond_d(SOR, f13, f14, FCC0), -+ "0c2ab9a0 fcmp.sor.d fcc0, f13, f14"); -+ COMPARE(fcmp_cond_d(SUNE, f1, f2, FCC0), -+ "0c2c8820 fcmp.sune.d fcc0, f1, f2"); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp14) { -+ SET_UP(); -+ -+ COMPARE(alsl_w(a0, a1, a2, 1), "000418a4 alsl.w a0, a1, a2, 1"); -+ COMPARE(alsl_w(a3, a4, a5, 3), "00052507 alsl.w a3, a4, a5, 3"); -+ COMPARE(alsl_w(a6, a7, t0, 4), "0005b16a alsl.w a6, a7, t0, 4"); -+ -+ COMPARE(alsl_wu(t1, t2, t3, 1), "00063dcd alsl.wu t1, t2, t3, 1"); -+ COMPARE(alsl_wu(t4, t5, t6, 3), "00074a30 alsl.wu t4, t5, t6, 3"); -+ COMPARE(alsl_wu(a0, a1, a2, 4), "000798a4 alsl.wu a0, a1, a2, 4"); -+ -+ COMPARE(alsl_d(a3, a4, a5, 1), "002c2507 alsl.d a3, a4, a5, 1"); -+ COMPARE(alsl_d(a6, a7, t0, 3), "002d316a alsl.d a6, a7, t0, 3"); -+ COMPARE(alsl_d(t1, t2, t3, 4), "002dbdcd alsl.d t1, t2, t3, 4"); -+ -+ COMPARE(bytepick_w(t4, t5, t6, 0), -+ "00084a30 bytepick.w t4, t5, t6, 0"); -+ COMPARE(bytepick_w(a0, a1, a2, 3), -+ "000998a4 bytepick.w a0, a1, a2, 3"); -+ -+ COMPARE(bytepick_d(a6, a7, t0, 0), -+ "000c316a bytepick.d a6, a7, t0, 0"); -+ COMPARE(bytepick_d(t4, t5, t6, 7), -+ "000fca30 bytepick.d t4, t5, t6, 7"); -+ -+ COMPARE(slli_w(a3, a3, 31), "0040fce7 slli.w a3, a3, 31"); -+ COMPARE(slli_w(a6, a6, 1), "0040854a slli.w a6, a6, 1"); -+ -+ COMPARE(slli_d(t3, t2, 63), "0041fdcf slli.d t3, t2, 63"); -+ COMPARE(slli_d(t4, a6, 1), "00410550 slli.d t4, a6, 1"); -+ -+ COMPARE(srli_w(a7, a7, 31), "0044fd6b srli.w a7, a7, 31"); -+ COMPARE(srli_w(a4, a4, 1), "00448508 srli.w a4, a4, 1"); -+ -+ COMPARE(srli_d(a4, a3, 63), "0045fce8 srli.d a4, a3, 63"); -+ COMPARE(srli_d(a4, a4, 1), "00450508 srli.d a4, a4, 1"); -+ -+ COMPARE(srai_d(a0, a0, 63), "0049fc84 srai.d a0, a0, 63"); -+ COMPARE(srai_d(a4, a1, 1), "004904a8 srai.d a4, a1, 1"); -+ -+ COMPARE(srai_w(s4, a3, 31), "0048fcfb srai.w s4, a3, 31"); -+ COMPARE(srai_w(s4, a5, 1), "0048853b srai.w s4, a5, 1"); -+ -+ COMPARE(rotri_d(t7, t6, 1), "004d0653 rotri.d t7, t6, 1"); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp17) { -+ SET_UP(); -+ -+ COMPARE(sltu(t5, t4, a4), "0012a211 sltu t5, t4, a4"); -+ COMPARE(sltu(t4, zero_reg, t4), "0012c010 sltu t4, zero_reg, t4"); -+ -+ COMPARE(add_w(a4, a4, a6), "00102908 add.w a4, a4, a6"); -+ COMPARE(add_w(a5, a6, t3), "00103d49 add.w a5, a6, t3"); -+ -+ COMPARE(add_d(a4, t0, t1), "0010b588 add.d a4, t0, t1"); -+ COMPARE(add_d(a6, a3, t1), "0010b4ea add.d a6, a3, t1"); -+ -+ COMPARE(sub_w(a7, a7, a2), "0011196b sub.w a7, a7, a2"); -+ COMPARE(sub_w(a2, a2, s3), "001168c6 sub.w a2, a2, s3"); -+ -+ COMPARE(sub_d(s3, ra, s3), "0011e83a sub.d s3, ra, s3"); -+ COMPARE(sub_d(a0, a1, a2), "001198a4 sub.d a0, a1, a2"); -+ -+ COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6"); -+ COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4"); -+ -+ COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3"); -+ COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5"); -+ -+ COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0"); -+ COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3"); -+ -+ COMPARE(or_(s3, sp, zero_reg), "0015007a or s3, sp, zero_reg"); -+ COMPARE(or_(a4, a0, zero_reg), "00150088 or a4, a0, zero_reg"); -+ -+ COMPARE(and_(sp, sp, t6), "0014c863 and sp, sp, t6"); -+ COMPARE(and_(a3, a3, a7), "0014ace7 and a3, a3, a7"); -+ -+ COMPARE(nor(a7, a7, a7), "00142d6b nor a7, a7, a7"); -+ COMPARE(nor(t4, t5, t6), "00144a30 nor t4, t5, t6"); -+ -+ COMPARE(xor_(a0, a1, a2), "001598a4 xor a0, a1, a2"); -+ COMPARE(xor_(a3, a4, a5), "0015a507 xor a3, a4, a5"); -+ -+ COMPARE(orn(a6, a7, t0), "0016316a orn a6, a7, t0"); -+ COMPARE(orn(t1, t2, t3), "00163dcd orn t1, t2, t3"); -+ -+ COMPARE(andn(t4, t5, t6), "0016ca30 andn t4, t5, t6"); -+ COMPARE(andn(a0, a1, a2), "001698a4 andn a0, a1, a2"); -+ -+ COMPARE(sll_w(a3, t0, a7), "00172d87 sll.w a3, t0, a7"); -+ COMPARE(sll_w(a3, a4, a3), "00171d07 sll.w a3, a4, a3"); -+ -+ COMPARE(srl_w(a3, a4, a3), "00179d07 srl.w a3, a4, a3"); -+ COMPARE(srl_w(a3, t1, t4), "0017c1a7 srl.w a3, t1, t4"); -+ -+ COMPARE(sra_w(a4, t4, a4), "00182208 sra.w a4, t4, a4"); -+ COMPARE(sra_w(a3, t1, a6), "001829a7 sra.w a3, t1, a6"); -+ -+ COMPARE(sll_d(a3, a1, a3), "00189ca7 sll.d a3, a1, a3"); -+ COMPARE(sll_d(a7, a4, t0), "0018b10b sll.d a7, a4, t0"); -+ -+ COMPARE(srl_d(a7, a7, t0), "0019316b srl.d a7, a7, t0"); -+ COMPARE(srl_d(t0, a6, t0), "0019314c srl.d t0, a6, t0"); -+ -+ COMPARE(sra_d(a3, a4, a5), "0019a507 sra.d a3, a4, a5"); -+ COMPARE(sra_d(a6, a7, t0), "0019b16a sra.d a6, a7, t0"); -+ -+ COMPARE(rotr_d(t1, t2, t3), "001bbdcd rotr.d t1, t2, t3"); -+ COMPARE(rotr_d(t4, t5, t6), "001bca30 rotr.d t4, t5, t6"); -+ -+ COMPARE(rotr_w(a0, a1, a2), "001b18a4 rotr.w a0, a1, a2"); -+ COMPARE(rotr_w(a3, a4, a5), "001b2507 rotr.w a3, a4, a5"); -+ -+ COMPARE(mul_w(t8, a5, t7), "001c4d34 mul.w t8, a5, t7"); -+ COMPARE(mul_w(t4, t5, t6), "001c4a30 mul.w t4, t5, t6"); -+ -+ COMPARE(mulh_w(s3, a3, t7), "001cccfa mulh.w s3, a3, t7"); -+ COMPARE(mulh_w(a0, a1, a2), "001c98a4 mulh.w a0, a1, a2"); -+ -+ COMPARE(mulh_wu(a6, a7, t0), "001d316a mulh.wu a6, a7, t0"); -+ COMPARE(mulh_wu(t1, t2, t3), "001d3dcd mulh.wu t1, t2, t3"); -+ -+ COMPARE(mul_d(t2, a5, t1), "001db52e mul.d t2, a5, t1"); -+ COMPARE(mul_d(a4, a4, a5), "001da508 mul.d a4, a4, a5"); -+ -+ COMPARE(mulh_d(a3, a4, a5), "001e2507 mulh.d a3, a4, a5"); -+ COMPARE(mulh_d(a6, a7, t0), "001e316a mulh.d a6, a7, t0"); -+ -+ COMPARE(mulh_du(t1, t2, t3), "001ebdcd mulh.du t1, t2, t3"); -+ COMPARE(mulh_du(t4, t5, t6), "001eca30 mulh.du t4, t5, t6"); -+ -+ COMPARE(mulw_d_w(a0, a1, a2), "001f18a4 mulw.d.w a0, a1, a2"); -+ COMPARE(mulw_d_w(a3, a4, a5), "001f2507 mulw.d.w a3, a4, a5"); -+ -+ COMPARE(mulw_d_wu(a6, a7, t0), "001fb16a mulw.d.wu a6, a7, t0"); -+ COMPARE(mulw_d_wu(t1, t2, t3), "001fbdcd mulw.d.wu t1, t2, t3"); -+ -+ COMPARE(div_w(a5, a5, a3), "00201d29 div.w a5, a5, a3"); -+ COMPARE(div_w(t4, t5, t6), "00204a30 div.w t4, t5, t6"); -+ -+ COMPARE(mod_w(a6, t3, a6), "0020a9ea mod.w a6, t3, a6"); -+ COMPARE(mod_w(a3, a4, a3), "00209d07 mod.w a3, a4, a3"); -+ -+ COMPARE(div_wu(t1, t2, t3), "00213dcd div.wu t1, t2, t3"); -+ COMPARE(div_wu(t4, t5, t6), "00214a30 div.wu t4, t5, t6"); -+ -+ COMPARE(mod_wu(a0, a1, a2), "002198a4 mod.wu a0, a1, a2"); -+ COMPARE(mod_wu(a3, a4, a5), "0021a507 mod.wu a3, a4, a5"); -+ -+ COMPARE(div_d(t0, t0, a6), "0022298c div.d t0, t0, a6"); -+ COMPARE(div_d(a7, a7, a5), "0022256b div.d a7, a7, a5"); -+ -+ COMPARE(mod_d(a6, a7, t0), "0022b16a mod.d a6, a7, t0"); -+ COMPARE(mod_d(t1, t2, t3), "0022bdcd mod.d t1, t2, t3"); -+ -+ COMPARE(div_du(t4, t5, t6), "00234a30 div.du t4, t5, t6"); -+ COMPARE(div_du(a0, a1, a2), "002318a4 div.du a0, a1, a2"); -+ -+ COMPARE(mod_du(a3, a4, a5), "0023a507 mod.du a3, a4, a5"); -+ COMPARE(mod_du(a6, a7, t0), "0023b16a mod.du a6, a7, t0"); -+ -+ COMPARE(fadd_s(f3, f4, f5), "01009483 fadd.s f3, f4, f5"); -+ COMPARE(fadd_s(f6, f7, f8), "0100a0e6 fadd.s f6, f7, f8"); -+ -+ COMPARE(fadd_d(f0, f1, f0), "01010020 fadd.d f0, f1, f0"); -+ COMPARE(fadd_d(f0, f1, f2), "01010820 fadd.d f0, f1, f2"); -+ -+ COMPARE(fsub_s(f9, f10, f11), "0102ad49 fsub.s f9, f10, f11"); -+ COMPARE(fsub_s(f12, f13, f14), "0102b9ac fsub.s f12, f13, f14"); -+ -+ COMPARE(fsub_d(f30, f0, f30), "0103781e fsub.d f30, f0, f30"); -+ COMPARE(fsub_d(f0, f0, f1), "01030400 fsub.d f0, f0, f1"); -+ -+ COMPARE(fmul_s(f15, f16, f17), "0104c60f fmul.s f15, f16, f17"); -+ COMPARE(fmul_s(f18, f19, f20), "0104d272 fmul.s f18, f19, f20"); -+ -+ COMPARE(fmul_d(f0, f0, f1), "01050400 fmul.d f0, f0, f1"); -+ COMPARE(fmul_d(f0, f0, f0), "01050000 fmul.d f0, f0, f0"); -+ -+ COMPARE(fdiv_s(f0, f1, f2), "01068820 fdiv.s f0, f1, f2"); -+ COMPARE(fdiv_s(f3, f4, f5), "01069483 fdiv.s f3, f4, f5"); -+ -+ COMPARE(fdiv_d(f0, f0, f1), "01070400 fdiv.d f0, f0, f1"); -+ COMPARE(fdiv_d(f0, f1, f0), "01070020 fdiv.d f0, f1, f0"); -+ -+ COMPARE(fmax_s(f9, f10, f11), "0108ad49 fmax.s f9, f10, f11"); -+ COMPARE(fmin_s(f6, f7, f8), "010aa0e6 fmin.s f6, f7, f8"); -+ -+ COMPARE(fmax_d(f0, f1, f0), "01090020 fmax.d f0, f1, f0"); -+ COMPARE(fmin_d(f0, f1, f0), "010b0020 fmin.d f0, f1, f0"); -+ -+ COMPARE(fmaxa_s(f12, f13, f14), "010cb9ac fmaxa.s f12, f13, f14"); -+ COMPARE(fmina_s(f15, f16, f17), "010ec60f fmina.s f15, f16, f17"); -+ -+ COMPARE(fmaxa_d(f18, f19, f20), "010d5272 fmaxa.d f18, f19, f20"); -+ COMPARE(fmina_d(f0, f1, f2), "010f0820 fmina.d f0, f1, f2"); -+ -+ COMPARE(ldx_b(a0, a1, a2), "380018a4 ldx.b a0, a1, a2"); -+ COMPARE(ldx_h(a3, a4, a5), "38042507 ldx.h a3, a4, a5"); -+ COMPARE(ldx_w(a6, a7, t0), "3808316a ldx.w a6, a7, t0"); -+ -+ COMPARE(stx_b(t1, t2, t3), "38103dcd stx.b t1, t2, t3"); -+ COMPARE(stx_h(t4, t5, t6), "38144a30 stx.h t4, t5, t6"); -+ COMPARE(stx_w(a0, a1, a2), "381818a4 stx.w a0, a1, a2"); -+ -+ COMPARE(ldx_bu(a3, a4, a5), "38202507 ldx.bu a3, a4, a5"); -+ COMPARE(ldx_hu(a6, a7, t0), "3824316a ldx.hu a6, a7, t0"); -+ COMPARE(ldx_wu(t1, t2, t3), "38283dcd ldx.wu t1, t2, t3"); -+ -+ COMPARE(ldx_d(a2, s6, t6), "380c4ba6 ldx.d a2, s6, t6"); -+ COMPARE(ldx_d(t7, s6, t6), "380c4bb3 ldx.d t7, s6, t6"); -+ -+ COMPARE(stx_d(a4, a3, t6), "381c48e8 stx.d a4, a3, t6"); -+ COMPARE(stx_d(a0, a3, t6), "381c48e4 stx.d a0, a3, t6"); -+ -+ COMPARE(dbar(0), "38720000 dbar 0x0(0)"); -+ COMPARE(ibar(5555), "387295b3 ibar 0x15b3(5555)"); -+ -+ COMPARE(break_(0), "002a0000 break code: 0x0(0)"); -+ COMPARE(break_(0x3fc0), "002a3fc0 break code: 0x3fc0(16320)"); -+ -+ COMPARE(fldx_s(f3, a4, a5), "38302503 fldx.s f3, a4, a5"); -+ COMPARE(fldx_d(f6, a7, t0), "38343166 fldx.d f6, a7, t0"); -+ -+ COMPARE(fstx_s(f1, t2, t3), "38383dc1 fstx.s f1, t2, t3"); -+ COMPARE(fstx_d(f4, t5, t6), "383c4a24 fstx.d f4, t5, t6"); -+ -+ COMPARE(amswap_w(a4, a5, a6), "38602548 amswap.w a4, a5, a6"); -+ COMPARE(amswap_d(a7, t0, t1), "3860b1ab amswap.d a7, t0, t1"); -+ -+ COMPARE(amadd_w(t2, t3, t4), "38613e0e amadd.w t2, t3, t4"); -+ COMPARE(amadd_d(t5, t6, a0), "3861c891 amadd.d t5, t6, a0"); -+ -+ COMPARE(amand_w(a1, a2, a3), "386218e5 amand.w a1, a2, a3"); -+ COMPARE(amand_d(a4, a5, a6), "3862a548 amand.d a4, a5, a6"); -+ -+ COMPARE(amor_w(a7, t0, t1), "386331ab amor.w a7, t0, t1"); -+ COMPARE(amor_d(t2, t3, t4), "3863be0e amor.d t2, t3, t4"); -+ -+ COMPARE(amxor_w(t5, t6, a0), "38644891 amxor.w t5, t6, a0"); -+ COMPARE(amxor_d(a1, a2, a3), "386498e5 amxor.d a1, a2, a3"); -+ -+ COMPARE(ammax_w(a4, a5, a6), "38652548 ammax.w a4, a5, a6"); -+ COMPARE(ammax_d(a7, t0, t1), "3865b1ab ammax.d a7, t0, t1"); -+ -+ COMPARE(ammin_w(t2, t3, t4), "38663e0e ammin.w t2, t3, t4"); -+ COMPARE(ammin_d(t5, t6, a0), "3866c891 ammin.d t5, t6, a0"); -+ -+ COMPARE(ammax_wu(a1, a2, a3), "386718e5 ammax.wu a1, a2, a3"); -+ COMPARE(ammax_du(a4, a5, a6), "3867a548 ammax.du a4, a5, a6"); -+ -+ COMPARE(ammin_wu(a7, t0, t1), "386831ab ammin.wu a7, t0, t1"); -+ COMPARE(ammin_du(t2, t3, t4), "3868be0e ammin.du t2, t3, t4"); -+ -+ COMPARE(ammax_db_d(a0, a1, a2), "386e94c4 ammax_db.d a0, a1, a2"); -+ COMPARE(ammax_db_du(a3, a4, a5), "3870a127 ammax_db.du a3, a4, a5"); -+ -+ COMPARE(ammax_db_w(a6, a7, t0), "386e2d8a ammax_db.w a6, a7, t0"); -+ COMPARE(ammax_db_wu(t1, t2, t3), "387039ed ammax_db.wu t1, t2, t3"); -+ -+ COMPARE(ammin_db_d(t4, t5, t6), "386fc650 ammin_db.d t4, t5, t6"); -+ COMPARE(ammin_db_du(a0, a1, a2), "387194c4 ammin_db.du a0, a1, a2"); -+ -+ COMPARE(ammin_db_wu(a3, a4, a5), "38712127 ammin_db.wu a3, a4, a5"); -+ COMPARE(ammin_db_w(a6, a7, t0), "386f2d8a ammin_db.w a6, a7, t0"); -+ -+ COMPARE(fscaleb_s(f0, f1, f2), "01108820 fscaleb.s f0, f1, f2"); -+ COMPARE(fscaleb_d(f3, f4, f5), "01111483 fscaleb.d f3, f4, f5"); -+ -+ COMPARE(fcopysign_s(f6, f7, f8), "0112a0e6 fcopysign.s f6, f7, f8"); -+ COMPARE(fcopysign_d(f9, f10, f12), -+ "01133149 fcopysign.d f9, f10, f12"); -+ -+ VERIFY_RUN(); -+} -+ -+TEST(TypeOp22) { -+ SET_UP(); -+ -+ COMPARE(clz_w(a3, a0), "00001487 clz.w a3, a0"); -+ COMPARE(ctz_w(a0, a1), "00001ca4 ctz.w a0, a1"); -+ COMPARE(clz_d(a2, a3), "000024e6 clz.d a2, a3"); -+ COMPARE(ctz_d(a4, a5), "00002d28 ctz.d a4, a5"); -+ -+ COMPARE(clo_w(a0, a1), "000010a4 clo.w a0, a1"); -+ COMPARE(cto_w(a2, a3), "000018e6 cto.w a2, a3"); -+ COMPARE(clo_d(a4, a5), "00002128 clo.d a4, a5"); -+ COMPARE(cto_d(a6, a7), "0000296a cto.d a6, a7"); -+ -+ COMPARE(revb_2h(a6, a7), "0000316a revb.2h a6, a7"); -+ COMPARE(revb_4h(t0, t1), "000035ac revb.4h t0, t1"); -+ COMPARE(revb_2w(t2, t3), "000039ee revb.2w t2, t3"); -+ COMPARE(revb_d(t4, t5), "00003e30 revb.d t4, t5"); -+ -+ COMPARE(revh_2w(a0, a1), "000040a4 revh.2w a0, a1"); -+ COMPARE(revh_d(a2, a3), "000044e6 revh.d a2, a3"); -+ -+ COMPARE(bitrev_4b(a4, a5), "00004928 bitrev.4b a4, a5"); -+ COMPARE(bitrev_8b(a6, a7), "00004d6a bitrev.8b a6, a7"); -+ COMPARE(bitrev_w(t0, t1), "000051ac bitrev.w t0, t1"); -+ COMPARE(bitrev_d(t2, t3), "000055ee bitrev.d t2, t3"); -+ -+ COMPARE(ext_w_b(t4, t5), "00005e30 ext.w.b t4, t5"); -+ COMPARE(ext_w_h(a0, a1), "000058a4 ext.w.h a0, a1"); -+ -+ COMPARE(fabs_s(f2, f3), "01140462 fabs.s f2, f3"); -+ COMPARE(fabs_d(f0, f0), "01140800 fabs.d f0, f0"); -+ -+ COMPARE(fneg_s(f0, f1), "01141420 fneg.s f0, f1"); -+ COMPARE(fneg_d(f0, f0), "01141800 fneg.d f0, f0"); -+ -+ COMPARE(fsqrt_s(f4, f5), "011444a4 fsqrt.s f4, f5"); -+ COMPARE(fsqrt_d(f0, f0), "01144800 fsqrt.d f0, f0"); -+ -+ COMPARE(fmov_s(f6, f7), "011494e6 fmov.s f6, f7"); -+ COMPARE(fmov_d(f0, f1), "01149820 fmov.d f0, f1"); -+ COMPARE(fmov_d(f1, f0), "01149801 fmov.d f1, f0"); -+ -+ COMPARE(movgr2fr_d(f0, t6), "0114aa40 movgr2fr.d f0, t6"); -+ COMPARE(movgr2fr_d(f1, t6), "0114aa41 movgr2fr.d f1, t6"); -+ -+ COMPARE(movgr2fr_w(f30, a3), "0114a4fe movgr2fr.w f30, a3"); -+ COMPARE(movgr2fr_w(f30, a0), "0114a49e movgr2fr.w f30, a0"); -+ -+ COMPARE(movgr2frh_w(f30, t6), "0114ae5e movgr2frh.w f30, t6"); -+ COMPARE(movgr2frh_w(f0, a3), "0114ace0 movgr2frh.w f0, a3"); -+ -+ COMPARE(movfr2gr_s(a3, f30), "0114b7c7 movfr2gr.s a3, f30"); -+ -+ COMPARE(movfr2gr_d(a6, f30), "0114bbca movfr2gr.d a6, f30"); -+ COMPARE(movfr2gr_d(t7, f30), "0114bbd3 movfr2gr.d t7, f30"); -+ -+ COMPARE(movfrh2gr_s(a5, f0), "0114bc09 movfrh2gr.s a5, f0"); -+ COMPARE(movfrh2gr_s(a4, f0), "0114bc08 movfrh2gr.s a4, f0"); -+ -+ COMPARE(movgr2fcsr(a2), "0114c0c0 movgr2fcsr fcsr, a2"); -+ COMPARE(movfcsr2gr(a4), "0114c808 movfcsr2gr a4, fcsr"); -+ -+ COMPARE(movfr2cf(FCC0, f0), "0114d000 movfr2cf fcc0, f0"); -+ COMPARE(movcf2fr(f1, FCC1), "0114d421 movcf2fr f1, fcc1"); -+ -+ COMPARE(movgr2cf(FCC2, a0), "0114d882 movgr2cf fcc2, a0"); -+ COMPARE(movcf2gr(a1, FCC3), "0114dc65 movcf2gr a1, fcc3"); -+ -+ COMPARE(fcvt_s_d(f0, f0), "01191800 fcvt.s.d f0, f0"); -+ COMPARE(fcvt_d_s(f0, f0), "01192400 fcvt.d.s f0, f0"); -+ -+ COMPARE(ftintrm_w_s(f8, f9), "011a0528 ftintrm.w.s f8, f9"); -+ COMPARE(ftintrm_w_d(f10, f11), "011a096a ftintrm.w.d f10, f11"); -+ COMPARE(ftintrm_l_s(f12, f13), "011a25ac ftintrm.l.s f12, f13"); -+ COMPARE(ftintrm_l_d(f14, f15), "011a29ee ftintrm.l.d f14, f15"); -+ -+ COMPARE(ftintrp_w_s(f16, f17), "011a4630 ftintrp.w.s f16, f17"); -+ COMPARE(ftintrp_w_d(f18, f19), "011a4a72 ftintrp.w.d f18, f19"); -+ COMPARE(ftintrp_l_s(f20, f21), "011a66b4 ftintrp.l.s f20, f21"); -+ COMPARE(ftintrp_l_d(f0, f1), "011a6820 ftintrp.l.d f0, f1"); -+ -+ COMPARE(ftintrz_w_s(f30, f4), "011a849e ftintrz.w.s f30, f4"); -+ COMPARE(ftintrz_w_d(f30, f4), "011a889e ftintrz.w.d f30, f4"); -+ COMPARE(ftintrz_l_s(f30, f0), "011aa41e ftintrz.l.s f30, f0"); -+ COMPARE(ftintrz_l_d(f30, f30), "011aabde ftintrz.l.d f30, f30"); -+ -+ COMPARE(ftintrne_w_s(f2, f3), "011ac462 ftintrne.w.s f2, f3"); -+ COMPARE(ftintrne_w_d(f4, f5), "011ac8a4 ftintrne.w.d f4, f5"); -+ COMPARE(ftintrne_l_s(f6, f7), "011ae4e6 ftintrne.l.s f6, f7"); -+ COMPARE(ftintrne_l_d(f8, f9), "011ae928 ftintrne.l.d f8, f9"); -+ -+ COMPARE(ftint_w_s(f10, f11), "011b056a ftint.w.s f10, f11"); -+ COMPARE(ftint_w_d(f12, f13), "011b09ac ftint.w.d f12, f13"); -+ COMPARE(ftint_l_s(f14, f15), "011b25ee ftint.l.s f14, f15"); -+ COMPARE(ftint_l_d(f16, f17), "011b2a30 ftint.l.d f16, f17"); -+ -+ COMPARE(ffint_s_w(f18, f19), "011d1272 ffint.s.w f18, f19"); -+ COMPARE(ffint_s_l(f20, f21), "011d1ab4 ffint.s.l f20, f21"); -+ COMPARE(ffint_d_w(f0, f1), "011d2020 ffint.d.w f0, f1"); -+ COMPARE(ffint_d_l(f2, f3), "011d2862 ffint.d.l f2, f3"); -+ -+ COMPARE(frint_s(f4, f5), "011e44a4 frint.s f4, f5"); -+ COMPARE(frint_d(f6, f7), "011e48e6 frint.d f6, f7"); -+ -+ COMPARE(frecip_s(f8, f9), "01145528 frecip.s f8, f9"); -+ COMPARE(frecip_d(f10, f11), "0114596a frecip.d f10, f11"); -+ -+ COMPARE(frsqrt_s(f12, f13), "011465ac frsqrt.s f12, f13"); -+ COMPARE(frsqrt_d(f14, f15), "011469ee frsqrt.d f14, f15"); -+ -+ COMPARE(fclass_s(f16, f17), "01143630 fclass.s f16, f17"); -+ COMPARE(fclass_d(f18, f19), "01143a72 fclass.d f18, f19"); -+ -+ COMPARE(flogb_s(f20, f21), "011426b4 flogb.s f20, f21"); -+ COMPARE(flogb_d(f0, f1), "01142820 flogb.d f0, f1"); -+ -+ VERIFY_RUN(); -+} -+ -+} // namespace internal -+} // namespace v8 -diff --git a/deps/v8/test/cctest/test-macro-assembler-loong64.cc b/deps/v8/test/cctest/test-macro-assembler-loong64.cc -new file mode 100644 -index 00000000..ef536b86 ---- /dev/null -+++ b/deps/v8/test/cctest/test-macro-assembler-loong64.cc -@@ -0,0 +1,2894 @@ -+// Copyright 2013 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+#include -+ -+#include // NOLINT(readability/streams) -+ -+#include "src/base/utils/random-number-generator.h" -+#include "src/codegen/macro-assembler.h" -+#include "src/execution/simulator.h" -+#include "src/init/v8.h" -+#include "src/objects/heap-number.h" -+#include "src/objects/objects-inl.h" -+#include "src/utils/ostreams.h" -+#include "test/cctest/cctest.h" -+ -+namespace v8 { -+namespace internal { -+ -+// TODO(mips64): Refine these signatures per test case. -+using FV = void*(int64_t x, int64_t y, int p2, int p3, int p4); -+using F1 = void*(int x, int p1, int p2, int p3, int p4); -+using F2 = void*(int x, int y, int p2, int p3, int p4); -+using F3 = void*(void* p, int p1, int p2, int p3, int p4); -+using F4 = void*(void* p0, void* p1, int p2, int p3, int p4); -+ -+#define __ masm-> -+ -+TEST(BYTESWAP) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ struct T { -+ uint64_t s8; -+ uint64_t s4; -+ uint64_t s2; -+ uint64_t u4; -+ uint64_t u2; -+ }; -+ -+ T t; -+ // clang-format off -+ uint64_t test_values[] = {0x5612FFCD9D327ACC, -+ 0x781A15C3, -+ 0xFCDE, -+ 0x9F, -+ 0xC81A15C3, -+ 0x8000000000000000, -+ 0xFFFFFFFFFFFFFFFF, -+ 0x0000000080000000, -+ 0x0000000000008000}; -+ // clang-format on -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ -+ MacroAssembler* masm = &assembler; -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, s8))); -+ __ ByteSwapSigned(a4, a4, 8); -+ __ St_d(a4, MemOperand(a0, offsetof(T, s8))); -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, s4))); -+ __ ByteSwapSigned(a4, a4, 4); -+ __ St_d(a4, MemOperand(a0, offsetof(T, s4))); -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, s2))); -+ __ ByteSwapSigned(a4, a4, 2); -+ __ St_d(a4, MemOperand(a0, offsetof(T, s2))); -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, u4))); -+ __ ByteSwapSigned(a4, a4, 4); -+ __ St_d(a4, MemOperand(a0, offsetof(T, u4))); -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, u2))); -+ __ ByteSwapSigned(a4, a4, 2); -+ __ St_d(a4, MemOperand(a0, offsetof(T, u2))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ -+ for (size_t i = 0; i < arraysize(test_values); i++) { -+ int32_t in_s4 = static_cast(test_values[i]); -+ int16_t in_s2 = static_cast(test_values[i]); -+ uint32_t in_u4 = static_cast(test_values[i]); -+ uint16_t in_u2 = static_cast(test_values[i]); -+ -+ t.s8 = test_values[i]; -+ t.s4 = static_cast(in_s4); -+ t.s2 = static_cast(in_s2); -+ t.u4 = static_cast(in_u4); -+ t.u2 = static_cast(in_u2); -+ -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(ByteReverse(test_values[i]), t.s8); -+ CHECK_EQ(ByteReverse(in_s4), static_cast(t.s4)); -+ CHECK_EQ(ByteReverse(in_s2), static_cast(t.s2)); -+ CHECK_EQ(ByteReverse(in_u4), static_cast(t.u4)); -+ CHECK_EQ(ByteReverse(in_u2), static_cast(t.u2)); -+ } -+} -+ -+TEST(LoadConstants) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope handles(isolate); -+ -+ int64_t refConstants[64]; -+ int64_t result[64]; -+ -+ int64_t mask = 1; -+ for (int i = 0; i < 64; i++) { -+ refConstants[i] = ~(mask << i); -+ } -+ -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ __ or_(a4, a0, zero_reg); -+ for (int i = 0; i < 64; i++) { -+ // Load constant. -+ __ li(a5, Operand(refConstants[i])); -+ __ St_d(a5, MemOperand(a4, zero_reg)); -+ __ Add_d(a4, a4, Operand(kPointerSize)); -+ } -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ (void)f.Call(reinterpret_cast(result), 0, 0, 0, 0); -+ // Check results. -+ for (int i = 0; i < 64; i++) { -+ CHECK(refConstants[i] == result[i]); -+ } -+} -+ -+TEST(LoadAddress) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope handles(isolate); -+ -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ Label to_jump, skip; -+ __ mov(a4, a0); -+ -+ __ Branch(&skip); -+ __ bind(&to_jump); -+ __ nop(); -+ __ nop(); -+ __ jirl(zero_reg, ra, 0); -+ __ bind(&skip); -+ __ li(a4, Operand(masm->jump_address(&to_jump)), ADDRESS_LOAD); -+ int check_size = masm->InstructionsGeneratedSince(&skip); -+ CHECK_EQ(3, check_size); -+ __ jirl(zero_reg, a4, 0); -+ __ stop(); -+ __ stop(); -+ __ stop(); -+ __ stop(); -+ __ stop(); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ (void)f.Call(0, 0, 0, 0, 0); -+ // Check results. -+} -+ -+TEST(jump_tables4) { -+ // Similar to test-assembler-mips jump_tables1, with extra test for branch -+ // trampoline required before emission of the dd table (where trampolines are -+ // blocked), and proper transition to long-branch mode. -+ // Regression test for v8:4294. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ const int kNumCases = 512; -+ int values[kNumCases]; -+ isolate->random_number_generator()->NextBytes(values, sizeof(values)); -+ Label labels[kNumCases]; -+ Label near_start, end, done; -+ -+ __ Push(ra); -+ __ xor_(a2, a2, a2); -+ -+ __ Branch(&end); -+ __ bind(&near_start); -+ -+ for (int i = 0; i < 32768 - 256; ++i) { -+ __ Add_d(a2, a2, 1); -+ } -+ -+ __ GenerateSwitchTable(a0, kNumCases, -+ [&labels](size_t i) { return labels + i; }); -+ -+ for (int i = 0; i < kNumCases; ++i) { -+ __ bind(&labels[i]); -+ __ li(a2, values[i]); -+ __ Branch(&done); -+ } -+ -+ __ bind(&done); -+ __ Pop(ra); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ __ bind(&end); -+ __ Branch(&near_start); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kNumCases; ++i) { -+ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); -+ ::printf("f(%d) = %" PRId64 "\n", i, res); -+ CHECK_EQ(values[i], res); -+ } -+} -+ -+TEST(jump_tables6) { -+ // Similar to test-assembler-mips jump_tables1, with extra test for branch -+ // trampoline required after emission of the dd table (where trampolines are -+ // blocked). This test checks if number of really generated instructions is -+ // greater than number of counted instructions from code, as we are expecting -+ // generation of trampoline in this case -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ const int kSwitchTableCases = 40; -+ -+ const int kMaxBranchOffset = (1 << (18 - 1)) - 1; -+ const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize; -+ const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize; -+ -+ const int kMaxOffsetForTrampolineStart = -+ kMaxBranchOffset - 16 * kTrampolineSlotsSize; -+ const int kFillInstr = (kMaxOffsetForTrampolineStart / kInstrSize) - -+ (kSwitchTablePrologueSize + 2 * kSwitchTableCases) - -+ 20; -+ -+ int values[kSwitchTableCases]; -+ isolate->random_number_generator()->NextBytes(values, sizeof(values)); -+ Label labels[kSwitchTableCases]; -+ Label near_start, end, done; -+ -+ __ Push(ra); -+ __ xor_(a2, a2, a2); -+ -+ int offs1 = masm->pc_offset(); -+ int gen_insn = 0; -+ -+ __ Branch(&end); -+ gen_insn += 1; -+ __ bind(&near_start); -+ -+ for (int i = 0; i < kFillInstr; ++i) { -+ __ Add_d(a2, a2, 1); -+ } -+ gen_insn += kFillInstr; -+ -+ __ GenerateSwitchTable(a0, kSwitchTableCases, -+ [&labels](size_t i) { return labels + i; }); -+ gen_insn += (kSwitchTablePrologueSize + 2 * kSwitchTableCases); -+ -+ for (int i = 0; i < kSwitchTableCases; ++i) { -+ __ bind(&labels[i]); -+ __ li(a2, values[i]); -+ __ Branch(&done); -+ } -+ gen_insn += 3 * kSwitchTableCases; -+ -+ // If offset from here to first branch instr is greater than max allowed -+ // offset for trampoline ... -+ CHECK_LT(kMaxOffsetForTrampolineStart, masm->pc_offset() - offs1); -+ // ... number of generated instructions must be greater then "gen_insn", -+ // as we are expecting trampoline generation -+ CHECK_LT(gen_insn, (masm->pc_offset() - offs1) / kInstrSize); -+ -+ __ bind(&done); -+ __ Pop(ra); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ __ bind(&end); -+ __ Branch(&near_start); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kSwitchTableCases; ++i) { -+ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); -+ ::printf("f(%d) = %" PRId64 "\n", i, res); -+ CHECK_EQ(values[i], res); -+ } -+} -+ -+static uint64_t run_alsl_w(uint32_t rj, uint32_t rk, int8_t sa) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ __ Alsl_w(a2, a0, a1, sa); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assembler.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ -+ uint64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(ALSL_W) { -+ CcTest::InitializeVM(); -+ struct TestCaseAlsl { -+ int32_t rj; -+ int32_t rk; -+ uint8_t sa; -+ uint64_t expected_res; -+ }; -+ // clang-format off -+ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res -+ {0x1, 0x4, 1, 0x6}, -+ {0x1, 0x4, 2, 0x8}, -+ {0x1, 0x4, 3, 0xC}, -+ {0x1, 0x4, 4, 0x14}, -+ {0x1, 0x4, 5, 0x24}, -+ {0x1, 0x0, 1, 0x2}, -+ {0x1, 0x0, 2, 0x4}, -+ {0x1, 0x0, 3, 0x8}, -+ {0x1, 0x0, 4, 0x10}, -+ {0x1, 0x0, 5, 0x20}, -+ {0x0, 0x4, 1, 0x4}, -+ {0x0, 0x4, 2, 0x4}, -+ {0x0, 0x4, 3, 0x4}, -+ {0x0, 0x4, 4, 0x4}, -+ {0x0, 0x4, 5, 0x4}, -+ -+ // Shift overflow. -+ {INT32_MAX, 0x4, 1, 0x2}, -+ {INT32_MAX >> 1, 0x4, 2, 0x0}, -+ {INT32_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC}, -+ {INT32_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4}, -+ {INT32_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4}, -+ -+ // Signed addition overflow. -+ {0x1, INT32_MAX - 1, 1, 0xFFFFFFFF80000000}, -+ {0x1, INT32_MAX - 3, 2, 0xFFFFFFFF80000000}, -+ {0x1, INT32_MAX - 7, 3, 0xFFFFFFFF80000000}, -+ {0x1, INT32_MAX - 15, 4, 0xFFFFFFFF80000000}, -+ {0x1, INT32_MAX - 31, 5, 0xFFFFFFFF80000000}, -+ -+ // Addition overflow. -+ {0x1, -2, 1, 0x0}, -+ {0x1, -4, 2, 0x0}, -+ {0x1, -8, 3, 0x0}, -+ {0x1, -16, 4, 0x0}, -+ {0x1, -32, 5, 0x0}}; -+ // clang-format on -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_alsl_w(tc[i].rj, tc[i].rk, tc[i].sa); -+ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Alsl_w(a0, %x, %x, %hhu)\n", -+ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+ -+static uint64_t run_alsl_d(uint64_t rj, uint64_t rk, int8_t sa) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ __ Alsl_d(a2, a0, a1, sa); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assembler.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ -+ uint64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(ALSL_D) { -+ CcTest::InitializeVM(); -+ struct TestCaseAlsl { -+ int64_t rj; -+ int64_t rk; -+ uint8_t sa; -+ uint64_t expected_res; -+ }; -+ // clang-format off -+ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res -+ {0x1, 0x4, 1, 0x6}, -+ {0x1, 0x4, 2, 0x8}, -+ {0x1, 0x4, 3, 0xC}, -+ {0x1, 0x4, 4, 0x14}, -+ {0x1, 0x4, 5, 0x24}, -+ {0x1, 0x0, 1, 0x2}, -+ {0x1, 0x0, 2, 0x4}, -+ {0x1, 0x0, 3, 0x8}, -+ {0x1, 0x0, 4, 0x10}, -+ {0x1, 0x0, 5, 0x20}, -+ {0x0, 0x4, 1, 0x4}, -+ {0x0, 0x4, 2, 0x4}, -+ {0x0, 0x4, 3, 0x4}, -+ {0x0, 0x4, 4, 0x4}, -+ {0x0, 0x4, 5, 0x4}, -+ -+ // Shift overflow. -+ {INT64_MAX, 0x4, 1, 0x2}, -+ {INT64_MAX >> 1, 0x4, 2, 0x0}, -+ {INT64_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC}, -+ {INT64_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4}, -+ {INT64_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4}, -+ -+ // Signed addition overflow. -+ {0x1, INT64_MAX - 1, 1, 0x8000000000000000}, -+ {0x1, INT64_MAX - 3, 2, 0x8000000000000000}, -+ {0x1, INT64_MAX - 7, 3, 0x8000000000000000}, -+ {0x1, INT64_MAX - 15, 4, 0x8000000000000000}, -+ {0x1, INT64_MAX - 31, 5, 0x8000000000000000}, -+ -+ // Addition overflow. -+ {0x1, -2, 1, 0x0}, -+ {0x1, -4, 2, 0x0}, -+ {0x1, -8, 3, 0x0}, -+ {0x1, -16, 4, 0x0}, -+ {0x1, -32, 5, 0x0}}; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t res = run_alsl_d(tc[i].rj, tc[i].rk, tc[i].sa); -+ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Dlsa(v0, %" PRIx64 ", %" PRIx64 -+ ", %hhu)\n", -+ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa); -+ CHECK_EQ(tc[i].expected_res, res); -+ } -+} -+// clang-format off -+static const std::vector ffint_ftintrz_uint32_test_values() { -+ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00, -+ 0x7FFFFFFF, 0x80000000, 0x80000001, -+ 0x80FFFF00, 0x8FFFFFFF, 0xFFFFFFFF}; -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+static const std::vector ffint_ftintrz_int32_test_values() { -+ static const int32_t kValues[] = { -+ static_cast(0x00000000), static_cast(0x00000001), -+ static_cast(0x00FFFF00), static_cast(0x7FFFFFFF), -+ static_cast(0x80000000), static_cast(0x80000001), -+ static_cast(0x80FFFF00), static_cast(0x8FFFFFFF), -+ static_cast(0xFFFFFFFF)}; -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+static const std::vector ffint_ftintrz_uint64_test_values() { -+ static const uint64_t kValues[] = { -+ 0x0000000000000000, 0x0000000000000001, 0x0000FFFFFFFF0000, -+ 0x7FFFFFFFFFFFFFFF, 0x8000000000000000, 0x8000000000000001, -+ 0x8000FFFFFFFF0000, 0x8FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}; -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+static const std::vector ffint_ftintrz_int64_test_values() { -+ static const int64_t kValues[] = {static_cast(0x0000000000000000), -+ static_cast(0x0000000000000001), -+ static_cast(0x0000FFFFFFFF0000), -+ static_cast(0x7FFFFFFFFFFFFFFF), -+ static_cast(0x8000000000000000), -+ static_cast(0x8000000000000001), -+ static_cast(0x8000FFFFFFFF0000), -+ static_cast(0x8FFFFFFFFFFFFFFF), -+ static_cast(0xFFFFFFFFFFFFFFFF)}; -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ // clang-off on -+ -+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... } -+#define FOR_INPUTS(ctype, itype, var, test_vector) \ -+ std::vector var##_vec = test_vector(); \ -+ for (std::vector::iterator var = var##_vec.begin(); \ -+ var != var##_vec.end(); ++var) -+ -+#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \ -+ std::vector var##_vec = test_vector(); \ -+ std::vector::iterator var; \ -+ std::vector::reverse_iterator var2; \ -+ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \ -+ var != var##_vec.end(); ++var, ++var2) -+ -+#define FOR_ENUM_INPUTS(var, type, test_vector) \ -+ FOR_INPUTS(enum type, type, var, test_vector) -+#define FOR_STRUCT_INPUTS(var, type, test_vector) \ -+ FOR_INPUTS(struct type, type, var, test_vector) -+#define FOR_INT32_INPUTS(var, test_vector) \ -+ FOR_INPUTS(int32_t, int32, var, test_vector) -+#define FOR_INT32_INPUTS2(var, var2, test_vector) \ -+ FOR_INPUTS2(int32_t, int32, var, var2, test_vector) -+#define FOR_INT64_INPUTS(var, test_vector) \ -+ FOR_INPUTS(int64_t, int64, var, test_vector) -+#define FOR_UINT32_INPUTS(var, test_vector) \ -+ FOR_INPUTS(uint32_t, uint32, var, test_vector) -+#define FOR_UINT64_INPUTS(var, test_vector) \ -+ FOR_INPUTS(uint64_t, uint64, var, test_vector) -+ -+template -+RET_TYPE run_CVT(IN_TYPE x, Func GenerateConvertInstructionFunc) { -+ using F_CVT = RET_TYPE(IN_TYPE x0, int x1, int x2, int x3, int x4); -+ -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assm; -+ -+ GenerateConvertInstructionFunc(masm); -+ __ movfr2gr_d(a2, f9); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ -+ return reinterpret_cast(f.Call(x, 0, 0, 0, 0)); -+} -+ -+TEST(Ffint_s_uw_Ftintrz_uw_s) { -+ CcTest::InitializeVM(); -+ FOR_UINT32_INPUTS(i, ffint_ftintrz_uint32_test_values) { -+ -+ uint32_t input = *i; -+ auto fn = [](MacroAssembler* masm) { -+ __ Ffint_s_uw(f8, a0); -+ __ movgr2frh_w(f9, zero_reg); -+ __ Ftintrz_uw_s(f9, f8, f10); -+ }; -+ CHECK_EQ(static_cast(input), run_CVT(input, fn)); -+ } -+} -+ -+TEST(Ffint_s_ul_Ftintrz_ul_s) { -+ CcTest::InitializeVM(); -+ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { -+ uint64_t input = *i; -+ auto fn = [](MacroAssembler* masm) { -+ __ Ffint_s_ul(f8, a0); -+ __ Ftintrz_ul_s(f9, f8, f10, a2); -+ }; -+ CHECK_EQ(static_cast(input), run_CVT(input, fn)); -+ } -+} -+ -+TEST(Ffint_d_uw_Ftintrz_uw_d) { -+ CcTest::InitializeVM(); -+ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { -+ uint32_t input = *i; -+ auto fn = [](MacroAssembler* masm) { -+ __ Ffint_d_uw(f8, a0); -+ __ movgr2frh_w(f9, zero_reg); -+ __ Ftintrz_uw_d(f9, f8, f10); -+ }; -+ CHECK_EQ(static_cast(input), run_CVT(input, fn)); -+ } -+} -+ -+TEST(Ffint_d_ul_Ftintrz_ul_d) { -+ CcTest::InitializeVM(); -+ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { -+ uint64_t input = *i; -+ auto fn = [](MacroAssembler* masm) { -+ __ Ffint_d_ul(f8, a0); -+ __ Ftintrz_ul_d(f9, f8, f10, a2); -+ }; -+ CHECK_EQ(static_cast(input), run_CVT(input, fn)); -+ } -+} -+ -+TEST(Ffint_d_l_Ftintrz_l_ud) { -+ CcTest::InitializeVM(); -+ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) { -+ int64_t input = *i; -+ uint64_t abs_input = (input < 0) ? -input : input; -+ auto fn = [](MacroAssembler* masm) { -+ __ movgr2fr_d(f8, a0); -+ __ ffint_d_l(f10, f8); -+ __ Ftintrz_l_ud(f9, f10, f11); -+ }; -+ CHECK_EQ(static_cast(abs_input), run_CVT(input, fn)); -+ } -+} -+ -+TEST(ffint_d_l_Ftint_l_d) { -+ CcTest::InitializeVM(); -+ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) { -+ int64_t input = *i; -+ auto fn = [](MacroAssembler* masm) { -+ __ movgr2fr_d(f8, a0); -+ __ ffint_d_l(f10, f8); -+ __ Ftintrz_l_d(f9, f10); -+ }; -+ CHECK_EQ(static_cast(input), run_CVT(input, fn)); -+ } -+} -+ -+TEST(ffint_d_w_Ftint_w_d) { -+ CcTest::InitializeVM(); -+ FOR_INT32_INPUTS(i, ffint_ftintrz_int32_test_values) { -+ int32_t input = *i; -+ auto fn = [](MacroAssembler* masm) { -+ __ movgr2fr_w(f8, a0); -+ __ ffint_d_w(f10, f8); -+ __ Ftintrz_w_d(f9, f10); -+ __ movfr2gr_s(a4, f9); -+ __ movgr2fr_d(f9, a4); -+ }; -+ CHECK_EQ(static_cast(input), run_CVT(input, fn)); -+ } -+} -+ -+ -+static const std::vector overflow_int64_test_values() { -+ // clang-format off -+ static const int64_t kValues[] = {static_cast(0xF000000000000000), -+ static_cast(0x0000000000000001), -+ static_cast(0xFF00000000000000), -+ static_cast(0x0000F00111111110), -+ static_cast(0x0F00001000000000), -+ static_cast(0x991234AB12A96731), -+ static_cast(0xB0FFFF0F0F0F0F01), -+ static_cast(0x00006FFFFFFFFFFF), -+ static_cast(0xFFFFFFFFFFFFFFFF)}; -+ // clang-format on -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+TEST(OverflowInstructions) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope handles(isolate); -+ -+ struct T { -+ int64_t lhs; -+ int64_t rhs; -+ int64_t output_add1; -+ int64_t output_add2; -+ int64_t output_sub1; -+ int64_t output_sub2; -+ int64_t output_mul1; -+ int64_t output_mul2; -+ int64_t overflow_add1; -+ int64_t overflow_add2; -+ int64_t overflow_sub1; -+ int64_t overflow_sub2; -+ int64_t overflow_mul1; -+ int64_t overflow_mul2; -+ }; -+ T t; -+ -+ FOR_INT64_INPUTS(i, overflow_int64_test_values) { -+ FOR_INT64_INPUTS(j, overflow_int64_test_values) { -+ int64_t ii = *i; -+ int64_t jj = *j; -+ int64_t expected_add, expected_sub; -+ int32_t ii32 = static_cast(ii); -+ int32_t jj32 = static_cast(jj); -+ int32_t expected_mul; -+ int64_t expected_add_ovf, expected_sub_ovf, expected_mul_ovf; -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ __ ld_d(t0, a0, offsetof(T, lhs)); -+ __ ld_d(t1, a0, offsetof(T, rhs)); -+ -+ __ AdddOverflow(t2, t0, Operand(t1), t3); -+ __ st_d(t2, a0, offsetof(T, output_add1)); -+ __ st_d(t3, a0, offsetof(T, overflow_add1)); -+ __ or_(t3, zero_reg, zero_reg); -+ __ AdddOverflow(t0, t0, Operand(t1), t3); -+ __ st_d(t0, a0, offsetof(T, output_add2)); -+ __ st_d(t3, a0, offsetof(T, overflow_add2)); -+ -+ __ ld_d(t0, a0, offsetof(T, lhs)); -+ __ ld_d(t1, a0, offsetof(T, rhs)); -+ -+ __ SubdOverflow(t2, t0, Operand(t1), t3); -+ __ st_d(t2, a0, offsetof(T, output_sub1)); -+ __ st_d(t3, a0, offsetof(T, overflow_sub1)); -+ __ or_(t3, zero_reg, zero_reg); -+ __ SubdOverflow(t0, t0, Operand(t1), t3); -+ __ st_d(t0, a0, offsetof(T, output_sub2)); -+ __ st_d(t3, a0, offsetof(T, overflow_sub2)); -+ -+ __ ld_d(t0, a0, offsetof(T, lhs)); -+ __ ld_d(t1, a0, offsetof(T, rhs)); -+ __ slli_w(t0, t0, 0); -+ __ slli_w(t1, t1, 0); -+ -+ __ MulOverflow(t2, t0, Operand(t1), t3); -+ __ st_d(t2, a0, offsetof(T, output_mul1)); -+ __ st_d(t3, a0, offsetof(T, overflow_mul1)); -+ __ or_(t3, zero_reg, zero_reg); -+ __ MulOverflow(t0, t0, Operand(t1), t3); -+ __ st_d(t0, a0, offsetof(T, output_mul2)); -+ __ st_d(t3, a0, offsetof(T, overflow_mul2)); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = -+ Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.lhs = ii; -+ t.rhs = jj; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ expected_add_ovf = base::bits::SignedAddOverflow64(ii, jj, &expected_add); -+ expected_sub_ovf = base::bits::SignedSubOverflow64(ii, jj, &expected_sub); -+ expected_mul_ovf = -+ base::bits::SignedMulOverflow32(ii32, jj32, &expected_mul); -+ -+ CHECK_EQ(expected_add_ovf, t.overflow_add1 < 0); -+ CHECK_EQ(expected_sub_ovf, t.overflow_sub1 < 0); -+ CHECK_EQ(expected_mul_ovf, t.overflow_mul1 != 0); -+ -+ CHECK_EQ(t.overflow_add1, t.overflow_add2); -+ CHECK_EQ(t.overflow_sub1, t.overflow_sub2); -+ CHECK_EQ(t.overflow_mul1, t.overflow_mul2); -+ -+ CHECK_EQ(expected_add, t.output_add1); -+ CHECK_EQ(expected_add, t.output_add2); -+ CHECK_EQ(expected_sub, t.output_sub1); -+ CHECK_EQ(expected_sub, t.output_sub2); -+ if (!expected_mul_ovf) { -+ CHECK_EQ(expected_mul, t.output_mul1); -+ CHECK_EQ(expected_mul, t.output_mul2); -+ } -+ } -+ } -+} -+ -+TEST(min_max_nan) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct TestFloat { -+ double a; -+ double b; -+ double c; -+ double d; -+ float e; -+ float f; -+ float g; -+ float h; -+ }; -+ -+ TestFloat test; -+ const double dnan = std::numeric_limits::quiet_NaN(); -+ const double dinf = std::numeric_limits::infinity(); -+ const double dminf = -std::numeric_limits::infinity(); -+ const float fnan = std::numeric_limits::quiet_NaN(); -+ const float finf = std::numeric_limits::infinity(); -+ const float fminf = -std::numeric_limits::infinity(); -+ const int kTableLength = 13; -+ -+ // clang-format off -+ double inputsa[kTableLength] = {dnan, 3.0, -0.0, 0.0, 42.0, dinf, dminf, -+ dinf, dnan, 3.0, dinf, dnan, dnan}; -+ double inputsb[kTableLength] = {dnan, 2.0, 0.0, -0.0, dinf, 42.0, dinf, -+ dminf, 3.0, dnan, dnan, dinf, dnan}; -+ double outputsdmin[kTableLength] = {dnan, 2.0, -0.0, -0.0, 42.0, -+ 42.0, dminf, dminf, dnan, dnan, -+ dnan, dnan, dnan}; -+ double outputsdmax[kTableLength] = {dnan, 3.0, 0.0, 0.0, dinf, dinf, dinf, -+ dinf, dnan, dnan, dnan, dnan, dnan}; -+ -+ float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf, -+ finf, fnan, 3.0, finf, fnan, fnan}; -+ float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf, -+ fminf, 3.0, fnan, fnan, finf, fnan}; -+ float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf, -+ fminf, fnan, fnan, fnan, fnan, fnan}; -+ float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf, -+ finf, fnan, fnan, fnan, fnan, fnan}; -+ -+ // clang-format on -+ auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) { -+ __ bind(nan); -+ __ LoadRoot(t8, RootIndex::kNanValue); -+ __ Fld_d(dst, FieldMemOperand(t8, HeapNumber::kValueOffset)); -+ __ Branch(back); -+ }; -+ -+ auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) { -+ __ bind(nan); -+ __ Move(dst, fnan); -+ __ Branch(back); -+ }; -+ -+ Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan; -+ Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan; -+ -+ __ push(s6); -+ __ InitializeRootRegister(); -+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); -+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); -+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, e))); -+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, f))); -+ __ Float64Min(f12, f8, f9, &handle_mind_nan); -+ __ bind(&back_mind_nan); -+ __ Float64Max(f13, f8, f9, &handle_maxd_nan); -+ __ bind(&back_maxd_nan); -+ __ Float32Min(f14, f10, f11, &handle_mins_nan); -+ __ bind(&back_mins_nan); -+ __ Float32Max(f15, f10, f11, &handle_maxs_nan); -+ __ bind(&back_maxs_nan); -+ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, c))); -+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, d))); -+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g))); -+ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h))); -+ __ pop(s6); -+ __ jirl(zero_reg, ra, 0); -+ -+ handle_dnan(f12, &handle_mind_nan, &back_mind_nan); -+ handle_dnan(f13, &handle_maxd_nan, &back_maxd_nan); -+ handle_snan(f14, &handle_mins_nan, &back_mins_nan); -+ handle_snan(f15, &handle_maxs_nan, &back_maxs_nan); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputsa[i]; -+ test.b = inputsb[i]; -+ test.e = inputse[i]; -+ test.f = inputsf[i]; -+ -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c))); -+ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d))); -+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); -+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); -+ } -+} -+ -+template -+bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset, -+ IN_TYPE value, Func GenerateUnalignedInstructionFunc) { -+ using F_CVT = int32_t(char* x0, int x1, int x2, int x3, int x4); -+ -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assm; -+ IN_TYPE res; -+ -+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ -+ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE)); -+ f.Call(memory_buffer, 0, 0, 0, 0); -+ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE)); -+ -+ return res == value; -+} -+ -+static const std::vector unsigned_test_values() { -+ // clang-format off -+ static const uint64_t kValues[] = { -+ 0x2180F18A06384414, 0x000A714532102277, 0xBC1ACCCF180649F0, -+ 0x8000000080008000, 0x0000000000000001, 0xFFFFFFFFFFFFFFFF, -+ }; -+ // clang-format on -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+static const std::vector unsigned_test_offset() { -+ static const int32_t kValues[] = {// value, offset -+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB}; -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+static const std::vector unsigned_test_offset_increment() { -+ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+TEST(Ld_b) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ uint16_t value = static_cast(*i & 0xFFFF); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Ld_b(a2, MemOperand(a0, in_offset)); -+ __ St_b(a2, MemOperand(a0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_1)); -+ -+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ mov(t0, a0); -+ __ Ld_b(a0, MemOperand(a0, in_offset)); -+ __ St_b(a0, MemOperand(t0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_2)); -+ -+ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ mov(t0, a0); -+ __ Ld_bu(a0, MemOperand(a0, in_offset)); -+ __ St_b(a0, MemOperand(t0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_3)); -+ -+ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Ld_bu(a2, MemOperand(a0, in_offset)); -+ __ St_b(a2, MemOperand(a0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_4)); -+ } -+ } -+ } -+} -+ -+TEST(Ld_b_bitextension) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ uint16_t value = static_cast(*i & 0xFFFF); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ Label success, fail, end, different; -+ __ Ld_b(t0, MemOperand(a0, in_offset)); -+ __ Ld_bu(t1, MemOperand(a0, in_offset)); -+ __ Branch(&different, ne, t0, Operand(t1)); -+ -+ // If signed and unsigned values are same, check -+ // the upper bits to see if they are zero -+ __ srai_w(t0, t0, 7); -+ __ Branch(&success, eq, t0, Operand(zero_reg)); -+ __ Branch(&fail); -+ -+ // If signed and unsigned values are different, -+ // check that the upper bits are complementary -+ __ bind(&different); -+ __ srai_w(t1, t1, 7); -+ __ Branch(&fail, ne, t1, Operand(1)); -+ __ srai_w(t0, t0, 7); -+ __ addi_d(t0, t0, 1); -+ __ Branch(&fail, ne, t0, Operand(zero_reg)); -+ // Fall through to success -+ -+ __ bind(&success); -+ __ Ld_b(t0, MemOperand(a0, in_offset)); -+ __ St_b(t0, MemOperand(a0, out_offset)); -+ __ Branch(&end); -+ __ bind(&fail); -+ __ St_b(zero_reg, MemOperand(a0, out_offset)); -+ __ bind(&end); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn)); -+ } -+ } -+ } -+} -+ -+TEST(Ld_h) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ uint16_t value = static_cast(*i & 0xFFFF); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Ld_h(a2, MemOperand(a0, in_offset)); -+ __ St_h(a2, MemOperand(a0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_1)); -+ -+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ mov(t0, a0); -+ __ Ld_h(a0, MemOperand(a0, in_offset)); -+ __ St_h(a0, MemOperand(t0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_2)); -+ -+ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ mov(t0, a0); -+ __ Ld_hu(a0, MemOperand(a0, in_offset)); -+ __ St_h(a0, MemOperand(t0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_3)); -+ -+ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Ld_hu(a2, MemOperand(a0, in_offset)); -+ __ St_h(a2, MemOperand(a0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_4)); -+ } -+ } -+ } -+} -+ -+TEST(Ld_h_bitextension) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ uint16_t value = static_cast(*i & 0xFFFF); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ Label success, fail, end, different; -+ __ Ld_h(t0, MemOperand(a0, in_offset)); -+ __ Ld_hu(t1, MemOperand(a0, in_offset)); -+ __ Branch(&different, ne, t0, Operand(t1)); -+ -+ // If signed and unsigned values are same, check -+ // the upper bits to see if they are zero -+ __ srai_w(t0, t0, 15); -+ __ Branch(&success, eq, t0, Operand(zero_reg)); -+ __ Branch(&fail); -+ -+ // If signed and unsigned values are different, -+ // check that the upper bits are complementary -+ __ bind(&different); -+ __ srai_w(t1, t1, 15); -+ __ Branch(&fail, ne, t1, Operand(1)); -+ __ srai_w(t0, t0, 15); -+ __ addi_d(t0, t0, 1); -+ __ Branch(&fail, ne, t0, Operand(zero_reg)); -+ // Fall through to success -+ -+ __ bind(&success); -+ __ Ld_h(t0, MemOperand(a0, in_offset)); -+ __ St_h(t0, MemOperand(a0, out_offset)); -+ __ Branch(&end); -+ __ bind(&fail); -+ __ St_h(zero_reg, MemOperand(a0, out_offset)); -+ __ bind(&end); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn)); -+ } -+ } -+ } -+} -+ -+TEST(Ld_w) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ uint32_t value = static_cast(*i & 0xFFFFFFFF); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Ld_w(a2, MemOperand(a0, in_offset)); -+ __ St_w(a2, MemOperand(a0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_1)); -+ -+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ mov(t0, a0); -+ __ Ld_w(a0, MemOperand(a0, in_offset)); -+ __ St_w(a0, MemOperand(t0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, -+ run_Unaligned(buffer_middle, in_offset, out_offset, -+ (uint32_t)value, fn_2)); -+ -+ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Ld_wu(a2, MemOperand(a0, in_offset)); -+ __ St_w(a2, MemOperand(a0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_3)); -+ -+ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ mov(t0, a0); -+ __ Ld_wu(a0, MemOperand(a0, in_offset)); -+ __ St_w(a0, MemOperand(t0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, -+ run_Unaligned(buffer_middle, in_offset, out_offset, -+ (uint32_t)value, fn_4)); -+ } -+ } -+ } -+} -+ -+TEST(Ld_w_extension) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ uint32_t value = static_cast(*i & 0xFFFFFFFF); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ Label success, fail, end, different; -+ __ Ld_w(t0, MemOperand(a0, in_offset)); -+ __ Ld_wu(t1, MemOperand(a0, in_offset)); -+ __ Branch(&different, ne, t0, Operand(t1)); -+ -+ // If signed and unsigned values are same, check -+ // the upper bits to see if they are zero -+ __ srai_d(t0, t0, 31); -+ __ Branch(&success, eq, t0, Operand(zero_reg)); -+ __ Branch(&fail); -+ -+ // If signed and unsigned values are different, -+ // check that the upper bits are complementary -+ __ bind(&different); -+ __ srai_d(t1, t1, 31); -+ __ Branch(&fail, ne, t1, Operand(1)); -+ __ srai_d(t0, t0, 31); -+ __ addi_d(t0, t0, 1); -+ __ Branch(&fail, ne, t0, Operand(zero_reg)); -+ // Fall through to success -+ -+ __ bind(&success); -+ __ Ld_w(t0, MemOperand(a0, in_offset)); -+ __ St_w(t0, MemOperand(a0, out_offset)); -+ __ Branch(&end); -+ __ bind(&fail); -+ __ St_w(zero_reg, MemOperand(a0, out_offset)); -+ __ bind(&end); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn)); -+ } -+ } -+ } -+} -+ -+TEST(Ld_d) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ uint64_t value = *i; -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Ld_d(a2, MemOperand(a0, in_offset)); -+ __ St_d(a2, MemOperand(a0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn_1)); -+ -+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ mov(t0, a0); -+ __ Ld_d(a0, MemOperand(a0, in_offset)); -+ __ St_d(a0, MemOperand(t0, out_offset)); -+ __ or_(a0, a2, zero_reg); -+ }; -+ CHECK_EQ(true, -+ run_Unaligned(buffer_middle, in_offset, out_offset, -+ (uint32_t)value, fn_2)); -+ } -+ } -+ } -+} -+ -+TEST(Fld_s) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ float value = static_cast(*i & 0xFFFFFFFF); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Fld_s(f0, MemOperand(a0, in_offset)); -+ __ Fst_s(f0, MemOperand(a0, out_offset)); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn)); -+ } -+ } -+ } -+} -+ -+TEST(Fld_d) { -+ CcTest::InitializeVM(); -+ -+ static const int kBufferSize = 300 * KB; -+ char memory_buffer[kBufferSize]; -+ char* buffer_middle = memory_buffer + (kBufferSize / 2); -+ -+ FOR_UINT64_INPUTS(i, unsigned_test_values) { -+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { -+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { -+ double value = static_cast(*i); -+ int32_t in_offset = *j1 + *k1; -+ int32_t out_offset = *j2 + *k2; -+ -+ auto fn = [](MacroAssembler* masm, int32_t in_offset, -+ int32_t out_offset) { -+ __ Fld_d(f0, MemOperand(a0, in_offset)); -+ __ Fst_d(f0, MemOperand(a0, out_offset)); -+ }; -+ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, -+ out_offset, value, fn)); -+ } -+ } -+ } -+} -+ -+static const std::vector sltu_test_values() { -+ // clang-format off -+ static const uint64_t kValues[] = { -+ 0, -+ 1, -+ 0x7FE, -+ 0x7FF, -+ 0x800, -+ 0x801, -+ 0xFFE, -+ 0xFFF, -+ 0xFFFFFFFFFFFFF7FE, -+ 0xFFFFFFFFFFFFF7FF, -+ 0xFFFFFFFFFFFFF800, -+ 0xFFFFFFFFFFFFF801, -+ 0xFFFFFFFFFFFFFFFE, -+ 0xFFFFFFFFFFFFFFFF, -+ }; -+ // clang-format on -+ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -+} -+ -+template -+bool run_Sltu(uint64_t rj, uint64_t rk, Func GenerateSltuInstructionFunc) { -+ using F_CVT = int64_t(uint64_t x0, uint64_t x1, int x2, int x3, int x4); -+ -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assm; -+ -+ GenerateSltuInstructionFunc(masm, rk); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ assm.GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); -+ return res == 1; -+} -+ -+TEST(Sltu) { -+ CcTest::InitializeVM(); -+ -+ FOR_UINT64_INPUTS(i, sltu_test_values) { -+ FOR_UINT64_INPUTS(j, sltu_test_values) { -+ uint64_t rj = *i; -+ uint64_t rk = *j; -+ -+ auto fn_1 = [](MacroAssembler* masm, uint64_t imm) { -+ __ Sltu(a2, a0, Operand(imm)); -+ }; -+ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_1)); -+ -+ auto fn_2 = [](MacroAssembler* masm, uint64_t imm) { -+ __ Sltu(a2, a0, a1); -+ }; -+ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_2)); -+ } -+ } -+} -+ -+template -+static GeneratedCode GenerateMacroFloat32MinMax(MacroAssembler* masm) { -+ T a = T::from_code(8); // f8 -+ T b = T::from_code(9); // f9 -+ T c = T::from_code(10); // f10 -+ -+ Label ool_min_abc, ool_min_aab, ool_min_aba; -+ Label ool_max_abc, ool_max_aab, ool_max_aba; -+ -+ Label done_min_abc, done_min_aab, done_min_aba; -+ Label done_max_abc, done_max_aab, done_max_aba; -+ -+#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ -+ __ Fld_s(x, MemOperand(a0, offsetof(Inputs, src1_))); \ -+ __ Fld_s(y, MemOperand(a0, offsetof(Inputs, src2_))); \ -+ __ fminmax(res, x, y, &ool); \ -+ __ bind(&done); \ -+ __ Fst_s(a, MemOperand(a1, offsetof(Results, res_field))) -+ -+ // a = min(b, c); -+ FLOAT_MIN_MAX(Float32Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); -+ // a = min(a, b); -+ FLOAT_MIN_MAX(Float32Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); -+ // a = min(b, a); -+ FLOAT_MIN_MAX(Float32Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); -+ -+ // a = max(b, c); -+ FLOAT_MIN_MAX(Float32Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); -+ // a = max(a, b); -+ FLOAT_MIN_MAX(Float32Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); -+ // a = max(b, a); -+ FLOAT_MIN_MAX(Float32Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); -+ -+#undef FLOAT_MIN_MAX -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ // Generate out-of-line cases. -+ __ bind(&ool_min_abc); -+ __ Float32MinOutOfLine(a, b, c); -+ __ Branch(&done_min_abc); -+ -+ __ bind(&ool_min_aab); -+ __ Float32MinOutOfLine(a, a, b); -+ __ Branch(&done_min_aab); -+ -+ __ bind(&ool_min_aba); -+ __ Float32MinOutOfLine(a, b, a); -+ __ Branch(&done_min_aba); -+ -+ __ bind(&ool_max_abc); -+ __ Float32MaxOutOfLine(a, b, c); -+ __ Branch(&done_max_abc); -+ -+ __ bind(&ool_max_aab); -+ __ Float32MaxOutOfLine(a, a, b); -+ __ Branch(&done_max_aab); -+ -+ __ bind(&ool_max_aba); -+ __ Float32MaxOutOfLine(a, b, a); -+ __ Branch(&done_max_aba); -+ -+ CodeDesc desc; -+ masm->GetCode(masm->isolate(), &desc); -+ Handle code = -+ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build(); -+#ifdef DEBUG -+ StdoutStream os; -+ code->Print(os); -+#endif -+ return GeneratedCode::FromCode(*code); -+} -+ -+TEST(macro_float_minmax_f32) { -+ // Test the Float32Min and Float32Max macros. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct Inputs { -+ float src1_; -+ float src2_; -+ }; -+ -+ struct Results { -+ // Check all register aliasing possibilities in order to exercise all -+ // code-paths in the macro assembler. -+ float min_abc_; -+ float min_aab_; -+ float min_aba_; -+ float max_abc_; -+ float max_aab_; -+ float max_aba_; -+ }; -+ -+ GeneratedCode f = -+ GenerateMacroFloat32MinMax(masm); -+ -+#define CHECK_MINMAX(src1, src2, min, max) \ -+ do { \ -+ Inputs inputs = {src1, src2}; \ -+ Results results; \ -+ f.Call(&inputs, &results, 0, 0, 0); \ -+ CHECK_EQ(bit_cast(min), bit_cast(results.min_abc_)); \ -+ CHECK_EQ(bit_cast(min), bit_cast(results.min_aab_)); \ -+ CHECK_EQ(bit_cast(min), bit_cast(results.min_aba_)); \ -+ CHECK_EQ(bit_cast(max), bit_cast(results.max_abc_)); \ -+ CHECK_EQ(bit_cast(max), bit_cast(results.max_aab_)); \ -+ CHECK_EQ(bit_cast(max), bit_cast(results.max_aba_)); \ -+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \ -+ } while (0) -+ -+ float nan_a = std::numeric_limits::quiet_NaN(); -+ float nan_b = std::numeric_limits::quiet_NaN(); -+ -+ CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f); -+ CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f); -+ CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f); -+ CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f); -+ CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f); -+ CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f); -+ CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f); -+ CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f); -+ -+ CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f); -+ CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f); -+ CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f); -+ CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f); -+ -+ CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a); -+ CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a); -+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); -+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); -+ -+#undef CHECK_MINMAX -+} -+ -+template -+static GeneratedCode GenerateMacroFloat64MinMax(MacroAssembler* masm) { -+ T a = T::from_code(8); // f8 -+ T b = T::from_code(9); // f9 -+ T c = T::from_code(10); // f10 -+ -+ Label ool_min_abc, ool_min_aab, ool_min_aba; -+ Label ool_max_abc, ool_max_aab, ool_max_aba; -+ -+ Label done_min_abc, done_min_aab, done_min_aba; -+ Label done_max_abc, done_max_aab, done_max_aba; -+ -+#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ -+ __ Fld_d(x, MemOperand(a0, offsetof(Inputs, src1_))); \ -+ __ Fld_d(y, MemOperand(a0, offsetof(Inputs, src2_))); \ -+ __ fminmax(res, x, y, &ool); \ -+ __ bind(&done); \ -+ __ Fst_d(a, MemOperand(a1, offsetof(Results, res_field))) -+ -+ // a = min(b, c); -+ FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); -+ // a = min(a, b); -+ FLOAT_MIN_MAX(Float64Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); -+ // a = min(b, a); -+ FLOAT_MIN_MAX(Float64Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); -+ -+ // a = max(b, c); -+ FLOAT_MIN_MAX(Float64Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); -+ // a = max(a, b); -+ FLOAT_MIN_MAX(Float64Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); -+ // a = max(b, a); -+ FLOAT_MIN_MAX(Float64Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); -+ -+#undef FLOAT_MIN_MAX -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ // Generate out-of-line cases. -+ __ bind(&ool_min_abc); -+ __ Float64MinOutOfLine(a, b, c); -+ __ Branch(&done_min_abc); -+ -+ __ bind(&ool_min_aab); -+ __ Float64MinOutOfLine(a, a, b); -+ __ Branch(&done_min_aab); -+ -+ __ bind(&ool_min_aba); -+ __ Float64MinOutOfLine(a, b, a); -+ __ Branch(&done_min_aba); -+ -+ __ bind(&ool_max_abc); -+ __ Float64MaxOutOfLine(a, b, c); -+ __ Branch(&done_max_abc); -+ -+ __ bind(&ool_max_aab); -+ __ Float64MaxOutOfLine(a, a, b); -+ __ Branch(&done_max_aab); -+ -+ __ bind(&ool_max_aba); -+ __ Float64MaxOutOfLine(a, b, a); -+ __ Branch(&done_max_aba); -+ -+ CodeDesc desc; -+ masm->GetCode(masm->isolate(), &desc); -+ Handle code = -+ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build(); -+#ifdef DEBUG -+ StdoutStream os; -+ code->Print(os); -+#endif -+ return GeneratedCode::FromCode(*code); -+} -+ -+TEST(macro_float_minmax_f64) { -+ // Test the Float64Min and Float64Max macros. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct Inputs { -+ double src1_; -+ double src2_; -+ }; -+ -+ struct Results { -+ // Check all register aliasing possibilities in order to exercise all -+ // code-paths in the macro assembler. -+ double min_abc_; -+ double min_aab_; -+ double min_aba_; -+ double max_abc_; -+ double max_aab_; -+ double max_aba_; -+ }; -+ -+ GeneratedCode f = -+ GenerateMacroFloat64MinMax(masm); -+ -+#define CHECK_MINMAX(src1, src2, min, max) \ -+ do { \ -+ Inputs inputs = {src1, src2}; \ -+ Results results; \ -+ f.Call(&inputs, &results, 0, 0, 0); \ -+ CHECK_EQ(bit_cast(min), bit_cast(results.min_abc_)); \ -+ CHECK_EQ(bit_cast(min), bit_cast(results.min_aab_)); \ -+ CHECK_EQ(bit_cast(min), bit_cast(results.min_aba_)); \ -+ CHECK_EQ(bit_cast(max), bit_cast(results.max_abc_)); \ -+ CHECK_EQ(bit_cast(max), bit_cast(results.max_aab_)); \ -+ CHECK_EQ(bit_cast(max), bit_cast(results.max_aba_)); \ -+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \ -+ } while (0) -+ -+ double nan_a = std::numeric_limits::quiet_NaN(); -+ double nan_b = std::numeric_limits::quiet_NaN(); -+ -+ CHECK_MINMAX(1.0, -1.0, -1.0, 1.0); -+ CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0); -+ CHECK_MINMAX(0.0, -1.0, -1.0, 0.0); -+ CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0); -+ CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0); -+ CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0); -+ CHECK_MINMAX(0.0, 1.0, 0.0, 1.0); -+ CHECK_MINMAX(1.0, 0.0, 0.0, 1.0); -+ -+ CHECK_MINMAX(0.0, 0.0, 0.0, 0.0); -+ CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0); -+ CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0); -+ CHECK_MINMAX(0.0, -0.0, -0.0, 0.0); -+ -+ CHECK_MINMAX(0.0, nan_a, nan_a, nan_a); -+ CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a); -+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); -+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); -+ -+#undef CHECK_MINMAX -+} -+ -+uint64_t run_Sub_w(uint64_t imm, int32_t num_instr) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ Label code_start; -+ __ bind(&code_start); -+ __ Sub_w(a2, zero_reg, Operand(imm)); -+ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ -+ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(SUB_W) { -+ CcTest::InitializeVM(); -+ -+ // Test Subu macro-instruction for min_int12 and max_int12 border cases. -+ // For subtracting int16 immediate values we use addiu. -+ -+ struct TestCaseSub { -+ uint64_t imm; -+ uint64_t expected_res; -+ int32_t num_instr; -+ }; -+ -+ // We call Sub_w(v0, zero_reg, imm) to test cases listed below. -+ // 0 - imm = expected_res -+ // clang-format off -+ struct TestCaseSub tc[] = { -+ // imm, expected_res, num_instr -+ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12 -+ // The test case above generates ori + add_w instruction sequence. -+ // We can't have just addi_ because -min_int12 > max_int12 so use -+ // register. We can load min_int12 to at register with addi_w and then -+ // subtract at with sub_w, but now we use ori + add_w because -min_int12 can -+ // be loaded using ori. -+ {0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1 -+ // Generates addi_w -+ // max_int12 + 1 is not int12 but -(max_int12 + 1) is, just use addi_w. -+ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1 -+ // Generates ori + add_w -+ // To load this value to at we need two instructions and another one to -+ // subtract, lu12i + ori + sub_w. But we can load -value to at using just -+ // ori and then add at register with add_w. -+ {0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2 -+ // Generates ori + sub_w -+ // Not int12 but is uint12, load value to at with ori and subtract with -+ // sub_w. -+ {0x00010000, 0xFFFFFFFFFFFF0000, 2}, -+ // Generates lu12i_w + sub_w -+ // Load value using lui to at and subtract with subu. -+ {0x00010001, 0xFFFFFFFFFFFEFFFF, 3}, -+ // Generates lu12i + ori + sub_w -+ // We have to generate three instructions in this case. -+ {0x7FFFFFFF, 0xFFFFFFFF80000001, 3}, // max_int32 -+ // Generates lu12i_w + ori + sub_w -+ {0xFFFFFFFF80000000, 0xFFFFFFFF80000000, 2}, // min_int32 -+ // The test case above generates lu12i + sub_w intruction sequence. -+ // The result of 0 - min_int32 eqauls max_int32 + 1, which wraps around to -+ // min_int32 again. -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ CHECK_EQ(tc[i].expected_res, run_Sub_w(tc[i].imm, tc[i].num_instr)); -+ } -+} -+ -+uint64_t run_Sub_d(uint64_t imm, int32_t num_instr) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ Label code_start; -+ __ bind(&code_start); -+ __ Sub_d(a2, zero_reg, Operand(imm)); -+ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+#ifdef OBJECT_PRINT -+ code->Print(std::cout); -+#endif -+ auto f = GeneratedCode::FromCode(*code); -+ -+ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ return res; -+} -+ -+TEST(SUB_D) { -+ CcTest::InitializeVM(); -+ -+ // Test Sub_d macro-instruction for min_int12 and max_int12 border cases. -+ // For subtracting int12 immediate values we use addi_d. -+ -+ struct TestCaseSub { -+ uint64_t imm; -+ uint64_t expected_res; -+ int32_t num_instr; -+ }; -+ // We call Sub(v0, zero_reg, imm) to test cases listed below. -+ // 0 - imm = expected_res -+ // clang-format off -+ struct TestCaseSub tc[] = { -+ // imm, expected_res, num_instr -+ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12 -+ // The test case above generates addi_d instruction. -+ // This is int12 value and we can load it using just addi_d. -+ { 0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1 -+ // Generates addi_d -+ // max_int12 + 1 is not int12 but is uint12, just use ori. -+ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1 -+ // Generates ori + add_d -+ { 0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2 -+ // Generates ori + add_d -+ { 0x00001000, 0xFFFFFFFFFFFFF000, 2}, // max_uint12 + 1 -+ // Generates lu12i_w + sub_d -+ { 0x00001001, 0xFFFFFFFFFFFFEFFF, 3}, // max_uint12 + 2 -+ // Generates lu12i_w + ori + sub_d -+ {0x00000000FFFFFFFF, 0xFFFFFFFF00000001, 3}, // max_uint32 -+ // Generates addi_w + li32i_d + sub_d -+ {0x00000000FFFFFFFE, 0xFFFFFFFF00000002, 3}, // max_uint32 - 1 -+ // Generates addi_w + li32i_d + sub_d -+ {0xFFFFFFFF80000000, 0x80000000, 2}, // min_int32 -+ // Generates lu12i_w + sub_d -+ {0x0000000080000000, 0xFFFFFFFF80000000, 2}, // max_int32 + 1 -+ // Generates lu12i_w + add_d -+ {0xFFFF0000FFFF8765, 0x0000FFFF0000789B, 4}, -+ // Generates lu12i_w + ori + lu32i_d + sub -+ {0x1234ABCD87654321, 0xEDCB5432789ABCDF, 5}, -+ // Generates lu12i_w + ori + lu32i_d + lu52i_d + sub -+ {0xFFFF789100000000, 0x876F00000000, 3}, -+ // Generates xor + lu32i_d + sub -+ {0xF12F789100000000, 0xED0876F00000000, 4}, -+ // Generates xor + lu32i_d + lu52i_d + sub -+ {0xF120000000000800, 0xEDFFFFFFFFFF800, 3}, -+ // Generates ori + lu52i_d + sub -+ {0xFFF0000000000000, 0x10000000000000, 2} -+ // Generates lu52i_d + sub -+ }; -+ // clang-format on -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ CHECK_EQ(tc[i].expected_res, run_Sub_d(tc[i].imm, tc[i].num_instr)); -+ } -+} -+ -+TEST(Move) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct T { -+ float a; -+ float b; -+ float result_a; -+ float result_b; -+ double c; -+ double d; -+ double e; -+ double result_c; -+ double result_d; -+ double result_e; -+ }; -+ T t; -+ __ li(a4, static_cast(0x80000000)); -+ __ St_w(a4, MemOperand(a0, offsetof(T, a))); -+ __ li(a5, static_cast(0x12345678)); -+ __ St_w(a5, MemOperand(a0, offsetof(T, b))); -+ __ li(a6, static_cast(0x8877665544332211)); -+ __ St_d(a6, MemOperand(a0, offsetof(T, c))); -+ __ li(a7, static_cast(0x1122334455667788)); -+ __ St_d(a7, MemOperand(a0, offsetof(T, d))); -+ __ li(t0, static_cast(0)); -+ __ St_d(t0, MemOperand(a0, offsetof(T, e))); -+ -+ __ Move(f8, static_cast(0x80000000)); -+ __ Move(f9, static_cast(0x12345678)); -+ __ Move(f10, static_cast(0x8877665544332211)); -+ __ Move(f11, static_cast(0x1122334455667788)); -+ __ Move(f12, static_cast(0)); -+ __ Fst_s(f8, MemOperand(a0, offsetof(T, result_a))); -+ __ Fst_s(f9, MemOperand(a0, offsetof(T, result_b))); -+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_c))); -+ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_d))); -+ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_e))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ f.Call(&t, 0, 0, 0, 0); -+ CHECK_EQ(t.a, t.result_a); -+ CHECK_EQ(t.b, t.result_b); -+ CHECK_EQ(t.c, t.result_c); -+ CHECK_EQ(t.d, t.result_d); -+ CHECK_EQ(t.e, t.result_e); -+} -+ -+TEST(Movz_Movn) { -+ const int kTableLength = 4; -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct Test { -+ int64_t rt; -+ int64_t a; -+ int64_t b; -+ int64_t bold; -+ int64_t b1; -+ int64_t bold1; -+ int32_t c; -+ int32_t d; -+ int32_t dold; -+ int32_t d1; -+ int32_t dold1; -+ }; -+ -+ Test test; -+ // clang-format off -+ int64_t inputs_D[kTableLength] = { -+ 7, 8, -9, -10 -+ }; -+ int32_t inputs_W[kTableLength] = { -+ 3, 4, -5, -6 -+ }; -+ -+ int32_t outputs_W[kTableLength] = { -+ 3, 4, -5, -6 -+ }; -+ int64_t outputs_D[kTableLength] = { -+ 7, 8, -9, -10 -+ }; -+ // clang-format on -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(Test, a))); -+ __ Ld_w(a5, MemOperand(a0, offsetof(Test, c))); -+ __ Ld_d(a6, MemOperand(a0, offsetof(Test, rt))); -+ __ li(t0, 1); -+ __ li(t1, 1); -+ __ li(t2, 1); -+ __ li(t3, 1); -+ __ St_d(t0, MemOperand(a0, offsetof(Test, bold))); -+ __ St_d(t1, MemOperand(a0, offsetof(Test, bold1))); -+ __ St_w(t2, MemOperand(a0, offsetof(Test, dold))); -+ __ St_w(t3, MemOperand(a0, offsetof(Test, dold1))); -+ __ Movz(t0, a4, a6); -+ __ Movn(t1, a4, a6); -+ __ Movz(t2, a5, a6); -+ __ Movn(t3, a5, a6); -+ __ St_d(t0, MemOperand(a0, offsetof(Test, b))); -+ __ St_d(t1, MemOperand(a0, offsetof(Test, b1))); -+ __ St_w(t2, MemOperand(a0, offsetof(Test, d))); -+ __ St_w(t3, MemOperand(a0, offsetof(Test, d1))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ test.a = inputs_D[i]; -+ test.c = inputs_W[i]; -+ -+ test.rt = 1; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.b, test.bold); -+ CHECK_EQ(test.d, test.dold); -+ CHECK_EQ(test.b1, outputs_D[i]); -+ CHECK_EQ(test.d1, outputs_W[i]); -+ -+ test.rt = 0; -+ f.Call(&test, 0, 0, 0, 0); -+ CHECK_EQ(test.b, outputs_D[i]); -+ CHECK_EQ(test.d, outputs_W[i]); -+ CHECK_EQ(test.b1, test.bold1); -+ CHECK_EQ(test.d1, test.dold1); -+ } -+} -+ -+TEST(macro_instructions1) { -+ // Test 32bit calculate instructions macros. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ Label exit, error; -+ -+ __ li(a4, 0x00000004); -+ __ li(a5, 0x00001234); -+ __ li(a6, 0x12345678); -+ __ li(a7, 0x7FFFFFFF); -+ __ li(t0, static_cast(0xFFFFFFFC)); -+ __ li(t1, static_cast(0xFFFFEDCC)); -+ __ li(t2, static_cast(0xEDCBA988)); -+ __ li(t3, static_cast(0x80000000)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ add_w(a2, a7, t1); -+ __ Add_w(a3, t1, a7); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ __ Add_w(t4, t1, static_cast(0x7FFFFFFF)); -+ __ Branch(&error, ne, a2, Operand(t4)); -+ __ addi_w(a2, a6, 0x800); -+ __ Add_w(a3, a6, 0xFFFFF800); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ mul_w(a2, t1, a7); -+ __ Mul_w(a3, t1, a7); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ __ Mul_w(t4, t1, static_cast(0x7FFFFFFF)); -+ __ Branch(&error, ne, a2, Operand(t4)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ mulh_w(a2, t1, a7); -+ __ Mulh_w(a3, t1, a7); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ __ Mulh_w(t4, t1, static_cast(0x7FFFFFFF)); -+ __ Branch(&error, ne, a2, Operand(t4)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Mulh_wu(a2, a4, static_cast(0xFFFFEDCC)); -+ __ Branch(&error, ne, a2, Operand(0x3)); -+ __ Mulh_wu(a3, a4, t1); -+ __ Branch(&error, ne, a3, Operand(0x3)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ div_w(a2, a7, t2); -+ __ Div_w(a3, a7, t2); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ __ Div_w(t4, a7, static_cast(0xEDCBA988)); -+ __ Branch(&error, ne, a2, Operand(t4)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Div_wu(a2, a7, a5); -+ __ Branch(&error, ne, a2, Operand(0x70821)); -+ __ Div_wu(a3, t0, static_cast(0x00001234)); -+ __ Branch(&error, ne, a3, Operand(0xE1042)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Mod_w(a2, a6, a5); -+ __ Branch(&error, ne, a2, Operand(0xDA8)); -+ __ Mod_w(a3, t2, static_cast(0x00001234)); -+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFF258)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Mod_wu(a2, a6, a5); -+ __ Branch(&error, ne, a2, Operand(0xDA8)); -+ __ Mod_wu(a3, t2, static_cast(0x00001234)); -+ __ Branch(&error, ne, a3, Operand(0xF0)); -+ -+ __ li(a2, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ __ li(a2, 0x666); -+ -+ __ bind(&exit); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+TEST(macro_instructions2) { -+ // Test 64bit calculate instructions macros. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ Label exit, error; -+ -+ __ li(a4, 0x17312); -+ __ li(a5, 0x1012131415161718); -+ __ li(a6, 0x51F4B764A26E7412); -+ __ li(a7, 0x7FFFFFFFFFFFFFFF); -+ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); -+ __ li(t1, static_cast(0xDF6B8F35A10E205C)); -+ __ li(t2, static_cast(0x81F25A87C4236841)); -+ __ li(t3, static_cast(0x8000000000000000)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ add_d(a2, a7, t1); -+ __ Add_d(a3, t1, a7); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ __ Add_d(t4, t1, Operand(0x7FFFFFFFFFFFFFFF)); -+ __ Branch(&error, ne, a2, Operand(t4)); -+ __ addi_d(a2, a6, 0x800); -+ __ Add_d(a3, a6, Operand(0xFFFFFFFFFFFFF800)); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Mul_d(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0)); -+ __ Mul_d(a3, t0, Operand(0xDF6B8F35A10E205C)); -+ __ Branch(&error, ne, a3, Operand(0x57ad69f40f870584)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Mulh_d(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); -+ __ Mulh_d(a3, t0, Operand(0xDF6B8F35A10E205C)); -+ __ Branch(&error, ne, a3, Operand(0x15d)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Div_d(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ Div_d(a3, t1, Operand(0x17312)); -+ __ Branch(&error, ne, a3, Operand(0xffffe985f631e6d9)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Div_du(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ Div_du(a3, t1, 0x17312); -+ __ Branch(&error, ne, a3, Operand(0x9a22ffd3973d)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Mod_d(a2, a6, a4); -+ __ Branch(&error, ne, a2, Operand(0x13558)); -+ __ Mod_d(a3, t2, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(0xfffffffffffffb0a)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Mod_du(a2, a6, a4); -+ __ Branch(&error, ne, a2, Operand(0x13558)); -+ __ Mod_du(a3, t2, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841)); -+ -+ __ li(a2, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ __ li(a2, 0x666); -+ -+ __ bind(&exit); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+TEST(macro_instructions3) { -+ // Test 64bit calculate instructions macros. -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ Label exit, error; -+ -+ __ li(a4, 0x17312); -+ __ li(a5, 0x1012131415161718); -+ __ li(a6, 0x51F4B764A26E7412); -+ __ li(a7, 0x7FFFFFFFFFFFFFFF); -+ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); -+ __ li(t1, static_cast(0xDF6B8F35A10E205C)); -+ __ li(t2, static_cast(0x81F25A87C4236841)); -+ __ li(t3, static_cast(0x8000000000000000)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ And(a2, a4, a5); -+ __ Branch(&error, ne, a2, Operand(0x1310)); -+ __ And(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); -+ __ Branch(&error, ne, a3, Operand(0x51F4B764A26E7412)); -+ __ andi(a2, a6, 0xDCB); -+ __ And(a3, a6, Operand(0xDCB)); -+ __ Branch(&error, ne, a3, Operand(a2)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Or(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f)); -+ __ Or(a3, t2, Operand(0x8000000000000000)); -+ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841)); -+ __ ori(a2, a5, 0xDCB); -+ __ Or(a3, a5, Operand(0xDCB)); -+ __ Branch(&error, ne, a2, Operand(a3)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Orn(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); -+ __ Orn(a3, t2, Operand(0x81F25A87C4236841)); -+ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Xor(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b)); -+ __ Xor(a3, t2, Operand(0x8000000000000000)); -+ __ Branch(&error, ne, a3, Operand(0x1f25a87c4236841)); -+ __ Xor(a2, t2, Operand(0xDCB)); -+ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Nor(a2, a4, a5); -+ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5)); -+ __ Nor(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); -+ __ Branch(&error, ne, a3, Operand(0x8000000000000000)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Andn(a2, a4, a5); -+ __ Branch(&error, ne, a2, Operand(0x16002)); -+ __ Andn(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0))); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Orn(a2, t0, t1); -+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); -+ __ Orn(a3, t2, Operand(0x8000000000000000)); -+ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Neg(a2, a7); -+ __ Branch(&error, ne, a2, Operand(0x8000000000000001)); -+ __ Neg(a3, t0); -+ __ Branch(&error, ne, a3, Operand(0xAB9)); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Slt(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ Slt(a3, a7, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0))); -+ __ Slt(a3, a4, 0x800); -+ __ Branch(&error, ne, a3, Operand(static_cast(0))); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Sle(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ Sle(a3, t0, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); -+ __ Sle(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Sleu(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(0x1)); -+ __ Sleu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); -+ __ Sleu(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Sge(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ Sge(a3, t0, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); -+ __ Sge(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Sgeu(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ Sgeu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); -+ __ Sgeu(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Sgt(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ Sgt(a3, t0, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0))); -+ __ Sgt(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); -+ -+ __ or_(a2, zero_reg, zero_reg); -+ __ or_(a3, zero_reg, zero_reg); -+ __ Sgtu(a2, a5, a6); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ __ Sgtu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); -+ __ Branch(&error, ne, a3, Operand(static_cast(0))); -+ __ Sgtu(a2, a7, t0); -+ __ Branch(&error, ne, a2, Operand(static_cast(0))); -+ -+ __ li(a2, 0x31415926); -+ __ b(&exit); -+ -+ __ bind(&error); -+ __ li(a2, 0x666); -+ -+ __ bind(&exit); -+ __ or_(a0, a2, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); -+ -+ CHECK_EQ(0x31415926L, res); -+} -+ -+TEST(Rotr_w) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct T { -+ int32_t input; -+ int32_t result_rotr_0; -+ int32_t result_rotr_4; -+ int32_t result_rotr_8; -+ int32_t result_rotr_12; -+ int32_t result_rotr_16; -+ int32_t result_rotr_20; -+ int32_t result_rotr_24; -+ int32_t result_rotr_28; -+ int32_t result_rotr_32; -+ int32_t result_rotri_0; -+ int32_t result_rotri_4; -+ int32_t result_rotri_8; -+ int32_t result_rotri_12; -+ int32_t result_rotri_16; -+ int32_t result_rotri_20; -+ int32_t result_rotri_24; -+ int32_t result_rotri_28; -+ int32_t result_rotri_32; -+ }; -+ T t; -+ -+ __ Ld_w(a4, MemOperand(a0, offsetof(T, input))); -+ -+ __ Rotr_w(a5, a4, 0); -+ __ Rotr_w(a6, a4, 0x04); -+ __ Rotr_w(a7, a4, 0x08); -+ __ Rotr_w(t0, a4, 0x0C); -+ __ Rotr_w(t1, a4, 0x10); -+ __ Rotr_w(t2, a4, -0x0C); -+ __ Rotr_w(t3, a4, -0x08); -+ __ Rotr_w(t4, a4, -0x04); -+ __ Rotr_w(t5, a4, 0x20); -+ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotr_0))); -+ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotr_4))); -+ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotr_8))); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_12))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_16))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotr_20))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotr_24))); -+ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotr_28))); -+ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotr_32))); -+ -+ __ li(t5, 0); -+ __ Rotr_w(a5, a4, t5); -+ __ li(t5, 0x04); -+ __ Rotr_w(a6, a4, t5); -+ __ li(t5, 0x08); -+ __ Rotr_w(a7, a4, t5); -+ __ li(t5, 0x0C); -+ __ Rotr_w(t0, a4, t5); -+ __ li(t5, 0x10); -+ __ Rotr_w(t1, a4, t5); -+ __ li(t5, -0x0C); -+ __ Rotr_w(t2, a4, t5); -+ __ li(t5, -0x08); -+ __ Rotr_w(t3, a4, t5); -+ __ li(t5, -0x04); -+ __ Rotr_w(t4, a4, t5); -+ __ li(t5, 0x20); -+ __ Rotr_w(t5, a4, t5); -+ -+ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotri_0))); -+ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotri_4))); -+ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotri_8))); -+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_12))); -+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_16))); -+ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_20))); -+ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_24))); -+ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotri_28))); -+ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotri_32))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.input = 0x12345678; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_rotr_0); -+ CHECK_EQ(static_cast(0x81234567), t.result_rotr_4); -+ CHECK_EQ(static_cast(0x78123456), t.result_rotr_8); -+ CHECK_EQ(static_cast(0x67812345), t.result_rotr_12); -+ CHECK_EQ(static_cast(0x56781234), t.result_rotr_16); -+ CHECK_EQ(static_cast(0x45678123), t.result_rotr_20); -+ CHECK_EQ(static_cast(0x34567812), t.result_rotr_24); -+ CHECK_EQ(static_cast(0x23456781), t.result_rotr_28); -+ CHECK_EQ(static_cast(0x12345678), t.result_rotr_32); -+ -+ CHECK_EQ(static_cast(0x12345678), t.result_rotri_0); -+ CHECK_EQ(static_cast(0x81234567), t.result_rotri_4); -+ CHECK_EQ(static_cast(0x78123456), t.result_rotri_8); -+ CHECK_EQ(static_cast(0x67812345), t.result_rotri_12); -+ CHECK_EQ(static_cast(0x56781234), t.result_rotri_16); -+ CHECK_EQ(static_cast(0x45678123), t.result_rotri_20); -+ CHECK_EQ(static_cast(0x34567812), t.result_rotri_24); -+ CHECK_EQ(static_cast(0x23456781), t.result_rotri_28); -+ CHECK_EQ(static_cast(0x12345678), t.result_rotri_32); -+} -+ -+TEST(Rotr_d) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct T { -+ int64_t input; -+ int64_t result_rotr_0; -+ int64_t result_rotr_8; -+ int64_t result_rotr_16; -+ int64_t result_rotr_24; -+ int64_t result_rotr_32; -+ int64_t result_rotr_40; -+ int64_t result_rotr_48; -+ int64_t result_rotr_56; -+ int64_t result_rotr_64; -+ int64_t result_rotri_0; -+ int64_t result_rotri_8; -+ int64_t result_rotri_16; -+ int64_t result_rotri_24; -+ int64_t result_rotri_32; -+ int64_t result_rotri_40; -+ int64_t result_rotri_48; -+ int64_t result_rotri_56; -+ int64_t result_rotri_64; -+ }; -+ T t; -+ -+ __ Ld_d(a4, MemOperand(a0, offsetof(T, input))); -+ -+ __ Rotr_d(a5, a4, 0); -+ __ Rotr_d(a6, a4, 0x08); -+ __ Rotr_d(a7, a4, 0x10); -+ __ Rotr_d(t0, a4, 0x18); -+ __ Rotr_d(t1, a4, 0x20); -+ __ Rotr_d(t2, a4, -0x18); -+ __ Rotr_d(t3, a4, -0x10); -+ __ Rotr_d(t4, a4, -0x08); -+ __ Rotr_d(t5, a4, 0x40); -+ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotr_0))); -+ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotr_8))); -+ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotr_16))); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_24))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_32))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotr_40))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotr_48))); -+ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotr_56))); -+ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotr_64))); -+ -+ __ li(t5, 0); -+ __ Rotr_d(a5, a4, t5); -+ __ li(t5, 0x08); -+ __ Rotr_d(a6, a4, t5); -+ __ li(t5, 0x10); -+ __ Rotr_d(a7, a4, t5); -+ __ li(t5, 0x18); -+ __ Rotr_d(t0, a4, t5); -+ __ li(t5, 0x20); -+ __ Rotr_d(t1, a4, t5); -+ __ li(t5, -0x18); -+ __ Rotr_d(t2, a4, t5); -+ __ li(t5, -0x10); -+ __ Rotr_d(t3, a4, t5); -+ __ li(t5, -0x08); -+ __ Rotr_d(t4, a4, t5); -+ __ li(t5, 0x40); -+ __ Rotr_d(t5, a4, t5); -+ -+ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotri_0))); -+ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotri_8))); -+ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotri_16))); -+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_24))); -+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_32))); -+ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_40))); -+ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_48))); -+ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotri_56))); -+ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotri_64))); -+ -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ t.input = 0x0123456789ABCDEF; -+ f.Call(&t, 0, 0, 0, 0); -+ -+ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotr_0); -+ CHECK_EQ(static_cast(0xEF0123456789ABCD), t.result_rotr_8); -+ CHECK_EQ(static_cast(0xCDEF0123456789AB), t.result_rotr_16); -+ CHECK_EQ(static_cast(0xABCDEF0123456789), t.result_rotr_24); -+ CHECK_EQ(static_cast(0x89ABCDEF01234567), t.result_rotr_32); -+ CHECK_EQ(static_cast(0x6789ABCDEF012345), t.result_rotr_40); -+ CHECK_EQ(static_cast(0x456789ABCDEF0123), t.result_rotr_48); -+ CHECK_EQ(static_cast(0x23456789ABCDEF01), t.result_rotr_56); -+ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotr_64); -+ -+ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotri_0); -+ CHECK_EQ(static_cast(0xEF0123456789ABCD), t.result_rotri_8); -+ CHECK_EQ(static_cast(0xCDEF0123456789AB), t.result_rotri_16); -+ CHECK_EQ(static_cast(0xABCDEF0123456789), t.result_rotri_24); -+ CHECK_EQ(static_cast(0x89ABCDEF01234567), t.result_rotri_32); -+ CHECK_EQ(static_cast(0x6789ABCDEF012345), t.result_rotri_40); -+ CHECK_EQ(static_cast(0x456789ABCDEF0123), t.result_rotri_48); -+ CHECK_EQ(static_cast(0x23456789ABCDEF01), t.result_rotri_56); -+ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotri_64); -+} -+ -+TEST(macro_instructions4) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct T { -+ double a; -+ float b; -+ double result_floor_a; -+ float result_floor_b; -+ double result_ceil_a; -+ float result_ceil_b; -+ double result_trunc_a; -+ float result_trunc_b; -+ double result_round_a; -+ float result_round_b; -+ }; -+ T t; -+ -+ const int kTableLength = 16; -+ -+ // clang-format off -+ double inputs_d[kTableLength] = { -+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, -+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, -+ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307, -+ std::numeric_limits::max() - 0.1, -+ std::numeric_limits::infinity() -+ }; -+ float inputs_s[kTableLength] = { -+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, -+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, -+ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37, -+ std::numeric_limits::lowest() + 0.6, -+ std::numeric_limits::infinity() -+ }; -+ float outputs_round_s[kTableLength] = { -+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, -+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, -+ 1.7976931348623157E+38, 0, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_round_d[kTableLength] = { -+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, -+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, -+ 1.7976931348623157E+308, 0, -+ std::numeric_limits::max(), -+ std::numeric_limits::infinity() -+ }; -+ float outputs_trunc_s[kTableLength] = { -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 1.7976931348623157E+38, 0, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_trunc_d[kTableLength] = { -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 1.7976931348623157E+308, 0, -+ std::numeric_limits::max() - 1, -+ std::numeric_limits::infinity() -+ }; -+ float outputs_ceil_s[kTableLength] = { -+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 1.7976931348623157E38, 1, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_ceil_d[kTableLength] = { -+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, -+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, -+ 1.7976931348623157E308, 1, -+ std::numeric_limits::max(), -+ std::numeric_limits::infinity() -+ }; -+ float outputs_floor_s[kTableLength] = { -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, -+ 1.7976931348623157E38, 0, -+ std::numeric_limits::lowest() + 1, -+ std::numeric_limits::infinity() -+ }; -+ double outputs_floor_d[kTableLength] = { -+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, -+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, -+ 1.7976931348623157E308, 0, -+ std::numeric_limits::max(), -+ std::numeric_limits::infinity() -+ }; -+ // clang-format on -+ -+ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); -+ __ Fld_s(f9, MemOperand(a0, offsetof(T, b))); -+ __ Floor_d(f10, f8); -+ __ Floor_s(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_floor_a))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_floor_b))); -+ __ Ceil_d(f10, f8); -+ __ Ceil_s(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_ceil_a))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_ceil_b))); -+ __ Trunc_d(f10, f8); -+ __ Trunc_s(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_trunc_a))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_trunc_b))); -+ __ Round_d(f10, f8); -+ __ Round_s(f11, f9); -+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_round_a))); -+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_round_b))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ for (int i = 0; i < kTableLength; i++) { -+ t.a = inputs_d[i]; -+ t.b = inputs_s[i]; -+ f.Call(&t, 0, 0, 0, 0); -+ CHECK_EQ(t.result_floor_a, outputs_floor_d[i]); -+ CHECK_EQ(t.result_floor_b, outputs_floor_s[i]); -+ CHECK_EQ(t.result_ceil_a, outputs_ceil_d[i]); -+ CHECK_EQ(t.result_ceil_b, outputs_ceil_s[i]); -+ CHECK_EQ(t.result_trunc_a, outputs_trunc_d[i]); -+ CHECK_EQ(t.result_trunc_b, outputs_trunc_s[i]); -+ CHECK_EQ(t.result_round_a, outputs_round_d[i]); -+ CHECK_EQ(t.result_round_b, outputs_round_s[i]); -+ } -+} -+ -+uint64_t run_ExtractBits(uint64_t source, int pos, int size, bool sign_extend) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ if (sign_extend) { -+ __ ExtractBits(t0, a0, a1, size, true); -+ } else { -+ __ ExtractBits(t0, a0, a1, size); -+ } -+ __ or_(a0, t0, zero_reg); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(source, pos, 0, 0, 0)); -+ return res; -+} -+ -+TEST(ExtractBits) { -+ CcTest::InitializeVM(); -+ -+ struct TestCase { -+ uint64_t source; -+ int pos; -+ int size; -+ bool sign_extend; -+ uint64_t res; -+ }; -+ -+ // clang-format off -+ struct TestCase tc[] = { -+ //source, pos, size, sign_extend, res; -+ {0x800, 4, 8, false, 0x80}, -+ {0x800, 4, 8, true, 0xFFFFFFFFFFFFFF80}, -+ {0x800, 5, 8, true, 0x40}, -+ {0x40000, 3, 16, false, 0x8000}, -+ {0x40000, 3, 16, true, 0xFFFFFFFFFFFF8000}, -+ {0x40000, 4, 16, true, 0x4000}, -+ {0x200000000, 2, 32, false, 0x80000000}, -+ {0x200000000, 2, 32, true, 0xFFFFFFFF80000000}, -+ {0x200000000, 3, 32, true, 0x40000000}, -+ }; -+ // clang-format on -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t result = -+ run_ExtractBits(tc[i].source, tc[i].pos, tc[i].size, tc[i].sign_extend); -+ CHECK_EQ(tc[i].res, result); -+ } -+} -+ -+uint64_t run_InsertBits(uint64_t dest, uint64_t source, int pos, int size) { -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ __ InsertBits(a0, a1, a2, size); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ uint64_t res = reinterpret_cast(f.Call(dest, source, pos, 0, 0)); -+ return res; -+} -+ -+TEST(InsertBits) { -+ CcTest::InitializeVM(); -+ -+ struct TestCase { -+ uint64_t dest; -+ uint64_t source; -+ int pos; -+ int size; -+ uint64_t res; -+ }; -+ -+ // clang-format off -+ struct TestCase tc[] = { -+ //dest source, pos, size, res; -+ {0x11111111, 0x1234, 32, 16, 0x123411111111}, -+ {0x111111111111, 0xFFFFF, 24, 10, 0x1113FF111111}, -+ {0x1111111111111111, 0xFEDCBA, 16, 4, 0x11111111111A1111}, -+ }; -+ // clang-format on -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ uint64_t result = -+ run_InsertBits(tc[i].dest, tc[i].source, tc[i].pos, tc[i].size); -+ CHECK_EQ(tc[i].res, result); -+ } -+} -+ -+TEST(Popcnt) { -+ CcTest::InitializeVM(); -+ Isolate* isolate = CcTest::i_isolate(); -+ HandleScope scope(isolate); -+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); -+ MacroAssembler* masm = &assembler; -+ -+ struct TestCase { -+ uint32_t a; -+ uint64_t b; -+ int expected_a; -+ int expected_b; -+ int result_a; -+ int result_b; -+ }; -+ // clang-format off -+ struct TestCase tc[] = { -+ { 0x12345678, 0x1122334455667788, 13, 26, 0, 0}, -+ { 0x1234, 0x123456, 5, 9, 0, 0}, -+ { 0xFFF00000, 0xFFFF000000000000, 12, 16, 0, 0}, -+ { 0xFF000012, 0xFFFF000000001234, 10, 21, 0, 0} -+ }; -+ // clang-format on -+ -+ __ Ld_w(t0, MemOperand(a0, offsetof(TestCase, a))); -+ __ Ld_d(t1, MemOperand(a0, offsetof(TestCase, b))); -+ __ Popcnt_w(t2, t0); -+ __ Popcnt_d(t3, t1); -+ __ St_w(t2, MemOperand(a0, offsetof(TestCase, result_a))); -+ __ St_w(t3, MemOperand(a0, offsetof(TestCase, result_b))); -+ __ jirl(zero_reg, ra, 0); -+ -+ CodeDesc desc; -+ masm->GetCode(isolate, &desc); -+ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); -+ auto f = GeneratedCode::FromCode(*code); -+ -+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); -+ for (size_t i = 0; i < nr_test_cases; ++i) { -+ f.Call(&tc[i], 0, 0, 0, 0); -+ CHECK_EQ(tc[i].expected_a, tc[i].result_a); -+ CHECK_EQ(tc[i].expected_b, tc[i].result_b); -+ } -+} -+ -+#undef __ -+ -+} // namespace internal -+} // namespace v8 -diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py -index 9d5cbf05..e4ff98c0 100755 ---- a/deps/v8/tools/dev/gm.py -+++ b/deps/v8/tools/dev/gm.py -@@ -39,7 +39,7 @@ BUILD_TARGETS_ALL = ["all"] - - # All arches that this script understands. - ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", -- "s390", "s390x", "android_arm", "android_arm64"] -+ "s390", "s390x", "android_arm", "android_arm64", "loong64"] - # Arches that get built/run when you don't specify any. - DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"] - # Modes that this script understands. -@@ -246,7 +246,7 @@ class Config(object): - if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\"" - if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\"" - if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", -- "s390", "s390x"): -+ "s390", "s390x", "loong64"): - return "\nv8_target_cpu = \"%s\"" % self.arch - return "" - -diff --git a/doc/api/process.md b/doc/api/process.md -index 601fc892..2b61a4e2 100644 ---- a/doc/api/process.md -+++ b/doc/api/process.md -@@ -636,8 +636,8 @@ added: v0.5.0 - * {string} - - The operating system CPU architecture for which the Node.js binary was compiled. --Possible values are: `'arm'`, `'arm64'`, `'ia32'`, `'mips'`,`'mipsel'`, `'ppc'`, --`'ppc64'`, `'s390'`, `'s390x'`, `'x32'`, and `'x64'`. -+Possible values are: `'arm'`, `'arm64'`, `'ia32'`, `'mips'`,`'mipsel'`, `'loong64'`, -+`ppc'`, `'ppc64'`, `'s390'`, `'s390x'`, `'x32'`, and `'x64'`. - - ```js - console.log(`This processor architecture is ${process.arch}`); -diff --git a/test/fixtures/wasi/subdir/input_link.txt b/test/fixtures/wasi/subdir/input_link.txt -new file mode 100644 -index 00000000..4c380537 ---- /dev/null -+++ b/test/fixtures/wasi/subdir/input_link.txt -@@ -0,0 +1 @@ -+hello from input.txt -diff --git a/test/fixtures/wasi/subdir/outside.txt b/test/fixtures/wasi/subdir/outside.txt -new file mode 100644 -index 00000000..044c4b96 ---- /dev/null -+++ b/test/fixtures/wasi/subdir/outside.txt -@@ -0,0 +1,2 @@ -+this file is part of the WASI tests. it exists outside of the sandbox, and -+should be inaccessible from the WASI tests. -diff --git a/tools/inspector_protocol/lib/Maybe_h.template b/tools/inspector_protocol/lib/Maybe_h.template -index 22cfac6b..d1c19bed 100644 ---- a/tools/inspector_protocol/lib/Maybe_h.template -+++ b/tools/inspector_protocol/lib/Maybe_h.template -@@ -32,10 +32,14 @@ - #define IP_TARGET_ARCH_MIPS 1 - #endif - -+#if defined(__loongarch64) -+#define IP_TARGET_ARCH_LOONG64 1 -+#endif -+ - // Allowing the use of noexcept by removing the keyword on older compilers that - // do not support adding noexcept to default members. - #if ((IP_GNUC_PREREQ(4, 9, 0) && !defined(IP_TARGET_ARCH_MIPS) && \ -- !defined(IP_TARGET_ARCH_MIPS64)) || \ -+ !defined(IP_TARGET_ARCH_MIPS64)) || !defined(IP_TARGET_ARCH_LOONG64) || \ - (defined(__clang__) && __cplusplus > 201300L)) - #define IP_NOEXCEPT noexcept - #else -@@ -134,6 +138,7 @@ public: - #undef IP_GNUC_PREREQ - #undef IP_TARGET_ARCH_MIPS64 - #undef IP_TARGET_ARCH_MIPS -+#undef IP_TARGET_ARCH_LOONG64 - #undef IP_NOEXCEPT - - #endif // !defined({{"_".join(config.protocol.namespace)}}_Maybe_h) -diff --git a/tools/v8_gypfiles/toolchain.gypi b/tools/v8_gypfiles/toolchain.gypi -index d4bad70d..53cabb67 100644 ---- a/tools/v8_gypfiles/toolchain.gypi -+++ b/tools/v8_gypfiles/toolchain.gypi -@@ -933,6 +933,69 @@ - }], #'_toolset=="host" - ], - }], # v8_target_arch=="mips64el" -+ ['v8_target_arch=="loong64"', { -+ 'defines': [ -+ 'V8_TARGET_ARCH_LOONG64', -+ ], -+ 'conditions': [ -+ [ 'v8_can_use_fpu_instructions=="true"', { -+ 'defines': [ -+ 'CAN_USE_FPU_INSTRUCTIONS', -+ ], -+ }], -+ ], -+ 'target_conditions': [ -+ ['_toolset=="target"', { -+ 'conditions': [ -+ ['v8_target_arch==target_arch', { -+ # Target built with a Mips CXX compiler. -+ 'variables': { -+ 'ldso_path%': ' +Date: Fri, 27 Jan 2023 01:20:38 +0000 +Subject: [PATCH] deps(http-cache-semantics): Don't use regex to trim + whitespace + +upstream-patch: https://github.com/kornelski/http-cache-semantics/commit/560b2d8ef452bbba20ffed69dc155d63ac757b74 +Signed-off-by: rpm-build +--- + deps/npm/node_modules/http-cache-semantics/node4/index.js | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/deps/npm/node_modules/http-cache-semantics/node4/index.js b/deps/npm/node_modules/http-cache-semantics/node4/index.js +index bcdaebe..e427106 100644 +--- a/deps/npm/node_modules/http-cache-semantics/node4/index.js ++++ b/deps/npm/node_modules/http-cache-semantics/node4/index.js +@@ -21,7 +21,7 @@ function parseCacheControl(header) { + + // TODO: When there is more than one value present for a given directive (e.g., two Expires header fields, multiple Cache-Control: max-age directives), + // the directive's value is considered invalid. Caches are encouraged to consider responses that have invalid freshness information to be stale +- var parts = header.trim().split(/\s*,\s*/); // TODO: lame parsing ++ var parts = header.trim().split(/,/); + for (var _iterator = parts, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { + var _ref; + +@@ -36,11 +36,11 @@ function parseCacheControl(header) { + + var part = _ref; + +- var _part$split = part.split(/\s*=\s*/, 2), ++ var _part$split = part.split(/=/, 2), + k = _part$split[0], + v = _part$split[1]; + +- cc[k] = v === undefined ? true : v.replace(/^"|"$/g, ''); // TODO: lame unquoting ++ cc[k.trim()] = v === undefined ? true : v.trim().replace(/^"|"$/g, ''); + } + + return cc; +@@ -556,4 +556,4 @@ module.exports = function () { + }; + + return CachePolicy; +-}(); +\ No newline at end of file ++}(); +-- +2.39.2 + diff --git a/0003-deps-qs-parse-ignore-__proto__-keys-CVE-2022-24999.patch b/0003-deps-qs-parse-ignore-__proto__-keys-CVE-2022-24999.patch deleted file mode 100644 index 81064b3..0000000 --- a/0003-deps-qs-parse-ignore-__proto__-keys-CVE-2022-24999.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 00da0b65c4c6bd75be2b91fba196be520e8ccf00 Mon Sep 17 00:00:00 2001 -From: Jordan Harband -Date: Mon, 27 Dec 2021 19:15:57 -0800 -Subject: [PATCH] deps(qs/parse): ignore `__proto__` keys (CVE-2022-24999) - -Signed-off-by: rpm-build ---- - deps/npm/node_modules/qs/lib/parse.js | 2 +- - deps/npm/node_modules/qs/test/parse.js | 60 ++++++++++++++++++++++++++ - 2 files changed, 61 insertions(+), 1 deletion(-) - -diff --git a/deps/npm/node_modules/qs/lib/parse.js b/deps/npm/node_modules/qs/lib/parse.js -index 8c9872e..08e623a 100644 ---- a/deps/npm/node_modules/qs/lib/parse.js -+++ b/deps/npm/node_modules/qs/lib/parse.js -@@ -69,7 +69,7 @@ var parseObject = function (chain, val, options) { - ) { - obj = []; - obj[index] = leaf; -- } else { -+ } else if (cleanRoot !== '__proto__') { - obj[cleanRoot] = leaf; - } - } -diff --git a/deps/npm/node_modules/qs/test/parse.js b/deps/npm/node_modules/qs/test/parse.js -index 0f8fe45..3e93784 100644 ---- a/deps/npm/node_modules/qs/test/parse.js -+++ b/deps/npm/node_modules/qs/test/parse.js -@@ -515,6 +515,66 @@ test('parse()', function (t) { - st.end(); - }); - -+ t.test('dunder proto is ignored', function (st) { -+ var payload = 'categories[__proto__]=login&categories[__proto__]&categories[length]=42'; -+ var result = qs.parse(payload, { allowPrototypes: true }); -+ -+ st.deepEqual( -+ result, -+ { -+ categories: { -+ length: '42' -+ } -+ }, -+ 'silent [[Prototype]] payload' -+ ); -+ -+ var plainResult = qs.parse(payload, { allowPrototypes: true, plainObjects: true }); -+ -+ st.deepEqual( -+ plainResult, -+ { -+ __proto__: null, -+ categories: { -+ __proto__: null, -+ length: '42' -+ } -+ }, -+ 'silent [[Prototype]] payload: plain objects' -+ ); -+ -+ var query = qs.parse('categories[__proto__]=cats&categories[__proto__]=dogs&categories[some][json]=toInject', { allowPrototypes: true }); -+ -+ st.notOk(Array.isArray(query.categories), 'is not an array'); -+ st.notOk(query.categories instanceof Array, 'is not instanceof an array'); -+ st.deepEqual(query.categories, { some: { json: 'toInject' } }); -+ st.equal(JSON.stringify(query.categories), '{"some":{"json":"toInject"}}', 'stringifies as a non-array'); -+ -+ st.deepEqual( -+ qs.parse('foo[__proto__][hidden]=value&foo[bar]=stuffs', { allowPrototypes: true }), -+ { -+ foo: { -+ bar: 'stuffs' -+ } -+ }, -+ 'hidden values' -+ ); -+ -+ st.deepEqual( -+ qs.parse('foo[__proto__][hidden]=value&foo[bar]=stuffs', { allowPrototypes: true, plainObjects: true }), -+ { -+ __proto__: null, -+ foo: { -+ __proto__: null, -+ bar: 'stuffs' -+ } -+ }, -+ 'hidden values: plain objects' -+ ); -+ -+ st.end(); -+ }); -+ - t.test('can return null objects', { skip: !Object.create }, function (st) { - var expected = Object.create(null); - expected.a = Object.create(null); --- -2.38.1 - diff --git a/0004-deps-cares-Add-str-len-check-in-config_sortlist-to-a.patch b/0004-deps-cares-Add-str-len-check-in-config_sortlist-to-a.patch new file mode 100644 index 0000000..c81988f --- /dev/null +++ b/0004-deps-cares-Add-str-len-check-in-config_sortlist-to-a.patch @@ -0,0 +1,52 @@ +From 58725d71e4306c83a474d6c3035e72580d0c4592 Mon Sep 17 00:00:00 2001 +From: hopper-vul <118949689+hopper-vul@users.noreply.github.com> +Date: Wed, 18 Jan 2023 22:14:26 +0800 +Subject: [PATCH] deps(cares): Add str len check in config_sortlist to avoid + stack overflow (#497) + +In ares_set_sortlist, it calls config_sortlist(..., sortstr) to parse +the input str and initialize a sortlist configuration. + +However, ares_set_sortlist has not any checks about the validity of the input str. +It is very easy to create an arbitrary length stack overflow with the unchecked +`memcpy(ipbuf, str, q-str);` and `memcpy(ipbufpfx, str, q-str);` +statements in the config_sortlist call, which could potentially cause severe +security impact in practical programs. + +This commit add necessary check for `ipbuf` and `ipbufpfx` which avoid the +potential stack overflows. + +fixes #496 + +Fix By: @hopper-vul + +Signed-off-by: rpm-build +--- + deps/cares/src/lib/ares_init.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/deps/cares/src/lib/ares_init.c b/deps/cares/src/lib/ares_init.c +index de5d86c..d5858f6 100644 +--- a/deps/cares/src/lib/ares_init.c ++++ b/deps/cares/src/lib/ares_init.c +@@ -2243,6 +2243,8 @@ static int config_sortlist(struct apattern **sortlist, int *nsort, + q = str; + while (*q && *q != '/' && *q != ';' && !ISSPACE(*q)) + q++; ++ if (q-str >= 16) ++ return ARES_EBADSTR; + memcpy(ipbuf, str, q-str); + ipbuf[q-str] = '\0'; + /* Find the prefix */ +@@ -2251,6 +2253,8 @@ static int config_sortlist(struct apattern **sortlist, int *nsort, + const char *str2 = q+1; + while (*q && *q != ';' && !ISSPACE(*q)) + q++; ++ if (q-str >= 32) ++ return ARES_EBADSTR; + memcpy(ipbufpfx, str, q-str); + ipbufpfx[q-str] = '\0'; + str = str2; +-- +2.39.2 + diff --git a/download b/download index 4324e3d..940330d 100644 --- a/download +++ b/download @@ -1,3 +1,3 @@ 8d30ae61833be02b1a9baa0f4c485fd2 cjs-module-lexer-1.2.2.tar.gz -1b0cbd32bc9176c66e1aa945fa14ba82 node-v14.21.1-stripped.tar.gz +8585fe25f84b2d2a3b97b5c8da05e95c node-v14.21.3-stripped.tar.gz 7b6ec4e1c3e39397bdd09087e2437bfd wasi-sdk-wasi-sdk-11.tar.gz diff --git a/nodejs.spec b/nodejs.spec index 5ab6dd6..da638c6 100644 --- a/nodejs.spec +++ b/nodejs.spec @@ -1,5 +1,4 @@ %bcond_with debug -%define anolis_release .0.1 # PowerPC, s390x and aarch64 segfault during Debug builds # https://github.com/nodejs/node/issues/20642 @@ -31,7 +30,7 @@ # This is used by both the nodejs package and the npm subpackage that # has a separate version - the name is special so that rpmdev-bumpspec # will bump this rather than adding .1 to the end. -%global baserelease 2 +%global baserelease 1 %{?!_pkgdocdir:%global _pkgdocdir %{_docdir}/%{name}-%{version}} @@ -43,7 +42,7 @@ %global nodejs_epoch 1 %global nodejs_major 14 %global nodejs_minor 21 -%global nodejs_patch 1 +%global nodejs_patch 3 %global nodejs_abi %{nodejs_major}.%{nodejs_minor} %global nodejs_version %{nodejs_major}.%{nodejs_minor}.%{nodejs_patch} %global nodejs_release %{baserelease} @@ -122,7 +121,7 @@ %global npm_epoch 1 %global npm_major 6 %global npm_minor 14 -%global npm_patch 17 +%global npm_patch 18 %global npm_version %{npm_major}.%{npm_minor}.%{npm_patch} # uvwasi - from deps/uvwasi/include/uvwasi.h @@ -147,7 +146,7 @@ Name: nodejs Epoch: %{nodejs_epoch} Version: %{nodejs_version} -Release: %{nodejs_release}%{anolis_release}%{?dist} +Release: %{nodejs_release}%{?dist} Summary: JavaScript runtime License: MIT and ASL 2.0 and ISC and BSD Group: Development/Languages @@ -185,10 +184,8 @@ Source102: https://github.com/WebAssembly/wasi-sdk/archive/wasi-sdk-11/wasi-sdk- Patch1: 0001-Disable-running-gyp-on-shared-deps.patch # Dependency vulnerabilities Patch2: 0002-deps-ansi-regex-fix-potential-ReDoS.patch -Patch3: 0003-deps-qs-parse-ignore-__proto__-keys-CVE-2022-24999.patch - -# add LoongArch support -Patch5: 0001-add-LoongArch-support.patch +Patch3: 0003-deps-http-cache-semantics-Don-t-use-regex-to-trim-wh.patch +Patch4: 0004-deps-cares-Add-str-len-check-in-config_sortlist-to-a.patch BuildRequires: make BuildRequires: python3-devel @@ -283,9 +280,9 @@ Provides: bundled(histogram) = %{histogram_version} # Make sure we keep NPM up to date when we update Node.js %if 0%{?rhel} < 8 # EPEL doesn't support Recommends, so make it strict -Requires: npm = %{npm_epoch}:%{npm_version}-%{npm_release}%{anolis_release}%{?dist} +Requires: npm >= %{npm_epoch}:%{npm_version}-%{npm_release}%{?dist} %else -Recommends: npm = %{npm_epoch}:%{npm_version}-%{npm_release}%{anolis_release}%{?dist} +Recommends: npm >= %{npm_epoch}:%{npm_version}-%{npm_release}%{?dist} %endif %description @@ -299,7 +296,7 @@ real-time applications that run across distributed devices. %package devel Summary: JavaScript runtime - development headers Group: Development/Languages -Requires: %{name}%{?_isa} = %{epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} +Requires: %{name}%{?_isa} = %{epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} Requires: openssl-devel%{?_isa} Requires: zlib-devel%{?_isa} Requires: brotli-devel%{?_isa} @@ -315,7 +312,7 @@ Development headers for the Node.js JavaScript runtime. %package full-i18n Summary: Non-English locale data for Node.js -Requires: %{name}%{?_isa} = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} +Requires: %{name}%{?_isa} = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} %description full-i18n Optional data files to provide full-icu support for Node.js. Remove this @@ -326,16 +323,16 @@ package to save space if non-English locales are not needed. Summary: Node.js Package Manager Epoch: %{npm_epoch} Version: %{npm_version} -Release: %{npm_release}%{anolis_release}%{?dist} +Release: %{npm_release}%{?dist} # We used to ship npm separately, but it is so tightly integrated with Node.js # (and expected to be present on all Node.js systems) that we ship it bundled # now. Obsoletes: npm < 0:3.5.4-6 Provides: npm = %{npm_epoch}:%{npm_version} -Requires: nodejs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} +Requires: nodejs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} %if 0%{?fedora} || 0%{?rhel} >= 8 -Recommends: nodejs-docs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} +Recommends: nodejs-docs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} %endif # Do not add epoch to the virtual NPM provides or it will break @@ -355,8 +352,8 @@ BuildArch: noarch # We don't require that the main package be installed to # use the docs, but if it is installed, make sure the # version always matches -Conflicts: %{name} > %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} -Conflicts: %{name} < %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} +Conflicts: %{name} > %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} +Conflicts: %{name} < %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} %description docs The API documentation for the Node.js JavaScript runtime. @@ -683,7 +680,6 @@ end %doc %{_mandir}/man5/shrinkwrap-json.5* %doc %{_mandir}/man7/config.7* %doc %{_mandir}/man7/developers.7* -%doc %{_mandir}/man7/disputes.7* %doc %{_mandir}/man7/orgs.7* %doc %{_mandir}/man7/registry.7* %doc %{_mandir}/man7/removal.7* @@ -700,8 +696,11 @@ end %changelog -* Mon Jan 30 2023 Shi Pujin - 1:14.21.1-2.0.1 -- add LoongArch support +* Mon Mar 06 2023 Jan StanÄ›k - 1:14.21.3-1 +- Rebase to 14.21.3 + Resolves: rhbz#2153712 + Resolves: CVE-2022-25881 CVE-2023-23918 CVE-2023-23920 CVE-2022-38900 + Resolves: CVE-2022-4904 * Thu Dec 08 2022 Jan StanÄ›k - 1:14.21.1-2 - Apply upstream fix for CVE-2022-24999 @@ -711,7 +710,7 @@ end * Wed Nov 16 2022 Jan StanÄ›k - 1:14.21.1-1 - Rebase to version 14.21.1 - Resolves: rhbz#2129805 CVE-2022-43548 CVE-2022-3517 + Resolves: rhbz#2129805 CVE-2022-43548 * Fri Oct 07 2022 Jan StanÄ›k - 1:14.20.1-2 - Record issues fixed in the current version -- Gitee From 4398e19f76daa6ad021747538a9820176807a76b Mon Sep 17 00:00:00 2001 From: Shi Pujin Date: Wed, 26 Oct 2022 18:21:05 +0800 Subject: [PATCH 2/3] add LoongArch support # Conflicts: # nodejs.spec --- 0001-add-LoongArch-support.patch | 52473 +++++++++++++++++++++++++++++ nodejs.spec | 27 +- 2 files changed, 52490 insertions(+), 10 deletions(-) create mode 100644 0001-add-LoongArch-support.patch diff --git a/0001-add-LoongArch-support.patch b/0001-add-LoongArch-support.patch new file mode 100644 index 0000000..828b05d --- /dev/null +++ b/0001-add-LoongArch-support.patch @@ -0,0 +1,52473 @@ +From 5bdab65c2e83c01c048915d0870c83efa4b51294 Mon Sep 17 00:00:00 2001 +From: Shi Pujin +Date: Wed, 26 Oct 2022 15:07:55 +0800 +Subject: [PATCH] add LoongArch support + + +diff --git a/configure.py b/configure.py +index 892e1d42..34ca13c6 100755 +--- a/configure.py ++++ b/configure.py +@@ -56,7 +56,7 @@ parser = optparse.OptionParser() + valid_os = ('win', 'mac', 'solaris', 'freebsd', 'openbsd', 'linux', + 'android', 'aix', 'cloudabi') + valid_arch = ('arm', 'arm64', 'ia32', 'mips', 'mipsel', 'mips64el', 'ppc', +- 'ppc64', 'x32','x64', 'x86', 'x86_64', 's390x') ++ 'ppc64', 'x32','x64', 'x86', 'x86_64', 's390x', 'loong64') + valid_arm_float_abi = ('soft', 'softfp', 'hard') + valid_arm_fpu = ('vfp', 'vfpv3', 'vfpv3-d16', 'neon') + valid_mips_arch = ('loongson', 'r1', 'r2', 'r6', 'rx') +@@ -987,6 +987,7 @@ def host_arch_cc(): + '__PPC__' : 'ppc64', + '__x86_64__' : 'x64', + '__s390x__' : 's390x', ++ '__loongarch64' : 'loong64', + } + + rtn = 'ia32' # default +@@ -1013,6 +1014,7 @@ def host_arch_win(): + 'x86' : 'ia32', + 'arm' : 'arm', + 'mips' : 'mips', ++ 'loongarch' : 'loong64', + } + + return matchup.get(arch, 'ia32') +@@ -1061,6 +1063,14 @@ def clang_version_ge(version_checked): + return True + return False + ++def configure_loong64(o): ++ can_use_fpu_instructions = 'true' ++ o['variables']['v8_can_use_fpu_instructions'] = b(can_use_fpu_instructions) ++ o['variables']['loong64_fpu_mode'] = 'hard' ++ host_byteorder = 'little' ++ o['variables']['v8_host_byteorder'] = host_byteorder ++ ++ + def gcc_version_ge(version_checked): + for compiler in [(CC, 'c'), (CXX, 'c++')]: + ok, is_clang, clang_version, gcc_version = \ +@@ -1122,6 +1132,8 @@ def configure_node(o): + configure_arm(o) + elif target_arch in ('mips', 'mipsel', 'mips64el'): + configure_mips(o, target_arch) ++ elif target_arch == 'loong64': ++ configure_loong64(o) + + if flavor == 'aix': + o['variables']['node_target_type'] = 'static_library' +diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn +index a8100ad6..af723df4 100644 +--- a/deps/v8/BUILD.gn ++++ b/deps/v8/BUILD.gn +@@ -692,6 +692,16 @@ config("toolchain") { + cflags += [ "-march=z196" ] + } + } ++ ++ # loong64 simulators. ++ if (target_is_simulator && v8_current_cpu == "loong64") { ++ defines += [ "_LOONG64_TARGET_SIMULATOR" ] ++ } ++ ++ if (v8_current_cpu == "loong64") { ++ defines += [ "V8_TARGET_ARCH_LOONG64" ] ++ } ++ + if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { + if (v8_current_cpu == "ppc") { + defines += [ "V8_TARGET_ARCH_PPC" ] +@@ -1715,6 +1725,11 @@ v8_source_set("v8_initializers") { + ### gcmole(arch:mips64el) ### + "src/builtins/mips64/builtins-mips64.cc", + ] ++ } else if (v8_current_cpu == "loong64") { ++ sources += [ ++ ### gcmole(arch:loong64) ### ++ "src/builtins/loong64/builtins-loong64.cc", ++ ] + } else if (v8_current_cpu == "ppc") { + sources += [ + ### gcmole(arch:ppc) ### +@@ -3413,6 +3428,33 @@ v8_source_set("v8_base_without_compiler") { + "src/regexp/mips64/regexp-macro-assembler-mips64.h", + "src/wasm/baseline/mips64/liftoff-assembler-mips64.h", + ] ++ } else if (v8_current_cpu == "loong64") { ++ sources += [ ### gcmole(arch:loong64) ### ++ "src/codegen/loong64/assembler-loong64-inl.h", ++ "src/codegen/loong64/assembler-loong64.cc", ++ "src/codegen/loong64/assembler-loong64.h", ++ "src/codegen/loong64/constants-loong64.cc", ++ "src/codegen/loong64/constants-loong64.h", ++ "src/codegen/loong64/cpu-loong64.cc", ++ "src/codegen/loong64/interface-descriptors-loong64.cc", ++ "src/codegen/loong64/macro-assembler-loong64.cc", ++ "src/codegen/loong64/macro-assembler-loong64.h", ++ "src/codegen/loong64/register-loong64.h", ++ "src/compiler/backend/loong64/code-generator-loong64.cc", ++ "src/compiler/backend/loong64/instruction-codes-loong64.h", ++ "src/compiler/backend/loong64/instruction-scheduler-loong64.cc", ++ "src/compiler/backend/loong64/instruction-selector-loong64.cc", ++ "src/debug/loong64/debug-loong64.cc", ++ "src/deoptimizer/loong64/deoptimizer-loong64.cc", ++ "src/diagnostics/loong64/disasm-loong64.cc", ++ "src/execution/loong64/frame-constants-loong64.cc", ++ "src/execution/loong64/frame-constants-loong64.h", ++ "src/execution/loong64/simulator-loong64.cc", ++ "src/execution/loong64/simulator-loong64.h", ++ "src/regexp/loong64/regexp-macro-assembler-loong64.cc", ++ "src/regexp/loong64/regexp-macro-assembler-loong64.h", ++ "src/wasm/baseline/loong64/liftoff-assembler-loong64.h", ++ ] + } else if (v8_current_cpu == "ppc") { + sources += [ ### gcmole(arch:ppc) ### + "src/codegen/ppc/assembler-ppc-inl.h", +@@ -4124,6 +4166,8 @@ v8_source_set("cppgc_base") { + sources += [ "src/heap/cppgc/asm/mips/push_registers_asm.cc" ] + } else if (target_cpu == "mips64el") { + sources += [ "src/heap/cppgc/asm/mips64/push_registers_asm.cc" ] ++ } else if (target_cpu == "loong64") { ++ sources += [ "src/heap/cppgc/asm/loong64/push_registers_asm.cc" ] + } + } else if (is_win) { + if (target_cpu == "x64") { +diff --git a/deps/v8/BUILD.gn.orig b/deps/v8/BUILD.gn.orig +new file mode 100644 +index 00000000..a8100ad6 +--- /dev/null ++++ b/deps/v8/BUILD.gn.orig +@@ -0,0 +1,5075 @@ ++# Copyright 2014 The Chromium Authors. All rights reserved. ++# Use of this source code is governed by a BSD-style license that can be ++# found in the LICENSE file. ++ ++import("//build/config/android/config.gni") ++import("//build/config/arm.gni") ++import("//build/config/dcheck_always_on.gni") ++import("//build/config/host_byteorder.gni") ++import("//build/config/mips.gni") ++import("//build/config/sanitizers/sanitizers.gni") ++import("//build_overrides/build.gni") ++ ++if (is_android) { ++ import("//build/config/android/rules.gni") ++} ++ ++import("gni/snapshot_toolchain.gni") ++import("gni/v8.gni") ++ ++# Specifies if the target build is a simulator build. Comparing target cpu ++# with v8 target cpu to not affect simulator builds for making cross-compile ++# snapshots. ++target_is_simulator = (target_cpu != v8_target_cpu && !v8_multi_arch_build) || ++ (current_cpu != v8_current_cpu && v8_multi_arch_build) ++ ++# For faster Windows builds. See https://crbug.com/v8/8475. ++emit_builtins_as_inline_asm = is_win && is_clang ++ ++declare_args() { ++ # Print to stdout on Android. ++ v8_android_log_stdout = false ++ ++ # Dynamically set an additional dependency from v8/custom_deps. ++ v8_custom_deps = "" ++ ++ # Turns on all V8 debug features. Enables running V8 in a pseudo debug mode ++ # within a release Chrome. ++ v8_enable_debugging_features = is_debug ++ ++ # Sets -DV8_ENABLE_FUTURE. ++ v8_enable_future = false ++ ++ # Lite mode disables a number of performance optimizations to reduce memory ++ # at the cost of performance. ++ # Sets --DV8_LITE_MODE. ++ v8_enable_lite_mode = false ++ ++ # Sets -DVERIFY_HEAP. ++ v8_enable_verify_heap = "" ++ ++ # Sets -DVERIFY_PREDICTABLE ++ v8_enable_verify_predictable = false ++ ++ # Enable compiler warnings when using V8_DEPRECATED apis. ++ v8_deprecation_warnings = true ++ ++ # Enable compiler warnings when using V8_DEPRECATE_SOON apis. ++ v8_imminent_deprecation_warnings = true ++ ++ # Embeds the given script into the snapshot. ++ v8_embed_script = "" ++ ++ # Allows the embedder to add a custom suffix to the version string. ++ v8_embedder_string = "" ++ ++ # Sets -dENABLE_DISASSEMBLER. ++ v8_enable_disassembler = "" ++ ++ # Sets the number of internal fields on promise objects. ++ v8_promise_internal_field_count = 0 ++ ++ # Sets -dENABLE_GDB_JIT_INTERFACE. ++ v8_enable_gdbjit = "" ++ ++ # Sets -dENABLE_VTUNE_JIT_INTERFACE. ++ v8_enable_vtunejit = false ++ ++ # Sets -dENABLE_VTUNE_TRACEMARK. ++ v8_enable_vtunetracemark = false ++ ++ # Sets -dENABLE_HANDLE_ZAPPING. ++ v8_enable_handle_zapping = is_debug ++ ++ # Enable slow dchecks. ++ v8_enable_slow_dchecks = false ++ ++ # Enable fast mksnapshot runs. ++ v8_enable_fast_mksnapshot = false ++ ++ # Optimize code for Torque executable, even during a debug build. ++ v8_enable_fast_torque = "" ++ ++ # Enable the registration of unwinding info for Windows x64 and ARM64. ++ v8_win64_unwinding_info = true ++ ++ # Enable code comments for builtins in the snapshot (impacts performance). ++ v8_enable_snapshot_code_comments = false ++ ++ # Enable native counters from the snapshot (impacts performance, sets ++ # -dV8_SNAPSHOT_NATIVE_CODE_COUNTERS). ++ # This option will generate extra code in the snapshot to increment counters, ++ # as per the --native-code-counters flag. ++ v8_enable_snapshot_native_code_counters = "" ++ ++ # Enable code-generation-time checking of types in the CodeStubAssembler. ++ v8_enable_verify_csa = false ++ ++ # Enable pointer compression (sets -dV8_COMPRESS_POINTERS). ++ v8_enable_pointer_compression = "" ++ v8_enable_31bit_smis_on_64bit_arch = false ++ ++ # Sets -dOBJECT_PRINT. ++ v8_enable_object_print = "" ++ ++ # Sets -dV8_TRACE_MAPS. ++ v8_enable_trace_maps = "" ++ ++ # Sets -dV8_ENABLE_CHECKS. ++ v8_enable_v8_checks = "" ++ ++ # Sets -dV8_TRACE_IGNITION. ++ v8_enable_trace_ignition = false ++ ++ # Sets -dV8_TRACE_FEEDBACK_UPDATES. ++ v8_enable_trace_feedback_updates = false ++ ++ # Sets -dV8_CONCURRENT_MARKING ++ v8_enable_concurrent_marking = true ++ ++ # Sets -dV8_ARRAY_BUFFER_EXTENSION ++ v8_enable_array_buffer_extension = true ++ ++ # Enables various testing features. ++ v8_enable_test_features = "" ++ ++ # With post mortem support enabled, metadata is embedded into libv8 that ++ # describes various parameters of the VM for use by debuggers. See ++ # tools/gen-postmortem-metadata.py for details. ++ v8_postmortem_support = false ++ ++ # Use Siphash as added protection against hash flooding attacks. ++ v8_use_siphash = false ++ ++ # Switches off inlining in V8. ++ v8_no_inline = false ++ ++ # Override OS page size when generating snapshot ++ v8_os_page_size = "0" ++ ++ # Similar to vfp but on MIPS. ++ v8_can_use_fpu_instructions = true ++ ++ # Similar to the ARM hard float ABI but on MIPS. ++ v8_use_mips_abi_hardfloat = true ++ ++ # Controls the threshold for on-heap/off-heap Typed Arrays. ++ v8_typed_array_max_size_in_heap = 64 ++ ++ v8_enable_gdbjit = ++ ((v8_current_cpu == "x86" || v8_current_cpu == "x64") && ++ (is_linux || is_mac)) || (v8_current_cpu == "ppc64" && is_linux) ++ ++ # Temporary flag to allow embedders to update their microtasks scopes ++ # while rolling in a new version of V8. ++ v8_check_microtasks_scopes_consistency = "" ++ ++ # Enable mitigations for executing untrusted code. ++ # Disabled by default on ia32 due to conflicting requirements with embedded ++ # builtins. Enabled by default on Android since it doesn't support ++ # site-isolation in Chrome and on simulator builds which test code generation ++ # on these platforms. ++ v8_untrusted_code_mitigations = ++ v8_current_cpu != "x86" && (is_android || target_is_simulator) ++ ++ # Enable minor mark compact. ++ v8_enable_minor_mc = true ++ ++ # Check that each header can be included in isolation (requires also ++ # setting the "check_v8_header_includes" gclient variable to run a ++ # specific hook). ++ v8_check_header_includes = false ++ ++ # Enable sharing read-only space across isolates. ++ # Sets -DV8_SHARED_RO_HEAP. ++ v8_enable_shared_ro_heap = "" ++ ++ # Enable lazy source positions by default. ++ v8_enable_lazy_source_positions = true ++ ++ # Enable third party HEAP library ++ v8_enable_third_party_heap = false ++ ++ # Libaries used by third party heap ++ v8_third_party_heap_libs = [] ++ ++ # Source code used by third party heap ++ v8_third_party_heap_files = [] ++ ++ # Disable write barriers when GCs are non-incremental and ++ # heap has single generation. ++ v8_disable_write_barriers = false ++ ++ # Redirect allocation in young generation so that there will be ++ # only one single generation. ++ v8_enable_single_generation = "" ++ ++ # Use token threaded dispatch for the regular expression interpreter. ++ # Use switch-based dispatch if this is false ++ v8_enable_regexp_interpreter_threaded_dispatch = true ++ ++ # Enable additional targets necessary for verification of torque ++ # file generation ++ v8_verify_torque_generation_invariance = false ++ ++ # Disable all snapshot compression. ++ v8_enable_snapshot_compression = true ++ ++ # Enable control-flow integrity features, such as pointer authentication for ++ # ARM64. ++ v8_control_flow_integrity = false ++ ++ # Enable object names in cppgc for debug purposes. ++ cppgc_enable_object_names = false ++ ++ # Enable V8 heap sandbox experimental feature. ++ # Sets -DV8_HEAP_SANDBOX. ++ v8_enable_heap_sandbox = "" ++ ++ # Experimental support for native context independent code. ++ # https://crbug.com/v8/8888 ++ v8_enable_nci_code = false ++} ++ ++# Derived defaults. ++if (v8_enable_verify_heap == "") { ++ v8_enable_verify_heap = v8_enable_debugging_features ++} ++if (v8_enable_object_print == "") { ++ v8_enable_object_print = v8_enable_debugging_features ++} ++if (v8_enable_disassembler == "") { ++ v8_enable_disassembler = v8_enable_debugging_features ++} ++if (v8_enable_trace_maps == "") { ++ v8_enable_trace_maps = v8_enable_debugging_features ++} ++if (v8_enable_test_features == "") { ++ v8_enable_test_features = v8_enable_debugging_features || dcheck_always_on ++} ++if (v8_enable_v8_checks == "") { ++ v8_enable_v8_checks = v8_enable_debugging_features ++} ++if (v8_check_microtasks_scopes_consistency == "") { ++ v8_check_microtasks_scopes_consistency = ++ v8_enable_debugging_features || dcheck_always_on ++} ++if (v8_enable_snapshot_native_code_counters == "") { ++ v8_enable_snapshot_native_code_counters = v8_enable_debugging_features ++} ++if (v8_enable_pointer_compression == "") { ++ # TODO(v8:v7703): temporarily enable pointer compression on arm64 and on x64 ++ v8_enable_pointer_compression = ++ v8_current_cpu == "arm64" || v8_current_cpu == "x64" ++} ++if (v8_enable_fast_torque == "") { ++ v8_enable_fast_torque = v8_enable_fast_mksnapshot ++} ++if (v8_enable_heap_sandbox == "") { ++ v8_enable_heap_sandbox = false ++} ++if (v8_enable_single_generation == "") { ++ v8_enable_single_generation = v8_disable_write_barriers ++} ++ ++# Toggle pointer compression for correctness fuzzing when building the ++# clang_x64_pointer_compression toolchain. We'll correctness-compare the ++# default build with the clang_x64_pointer_compression build. ++if (v8_multi_arch_build && ++ rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) == ++ "clang_x64_pointer_compression") { ++ v8_enable_pointer_compression = !v8_enable_pointer_compression ++} ++if (v8_enable_shared_ro_heap == "") { ++ v8_enable_shared_ro_heap = !v8_enable_pointer_compression ++} ++ ++assert(!v8_disable_write_barriers || v8_enable_single_generation, ++ "Disabling write barriers works only with single generation") ++ ++assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, ++ "Untrusted code mitigations are unsupported on ia32") ++ ++assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity, ++ "Control-flow integrity is only supported on arm64") ++ ++assert( ++ !v8_enable_pointer_compression || !v8_enable_shared_ro_heap, ++ "Pointer compression is not supported with shared read-only heap enabled") ++ ++assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression, ++ "V8 Heap Sandbox requires pointer compression") ++ ++v8_random_seed = "314159265" ++v8_toolset_for_shell = "host" ++ ++############################################################################### ++# Configurations ++# ++ ++config("internal_config_base") { ++ # Only targets in this file and its subdirs can depend on this. ++ visibility = [ "./*" ] ++ ++ configs = [ ":v8_tracing_config" ] ++ ++ include_dirs = [ ++ ".", ++ "include", ++ "$target_gen_dir", ++ ] ++} ++ ++config("internal_config") { ++ defines = [] ++ # Only targets in this file and its subdirs can depend on this. ++ visibility = [ "./*" ] ++ ++ configs = [ ++ "//build/config/compiler:wexit_time_destructors", ++ ":internal_config_base", ++ ":v8_header_features", ++ ] ++ ++ if (is_component_build) { ++ defines += [ "BUILDING_V8_SHARED" ] ++ } ++} ++ ++# Should be applied to all targets that write trace events. ++config("v8_tracing_config") { ++ if (v8_use_perfetto) { ++ include_dirs = [ ++ "third_party/perfetto/include", ++ "$root_gen_dir/third_party/perfetto", ++ "$root_gen_dir/third_party/perfetto/build_config", ++ ] ++ } ++} ++ ++# This config should be applied to code using the libplatform. ++config("libplatform_config") { ++ include_dirs = [ "include" ] ++ if (is_component_build) { ++ defines = [ "USING_V8_PLATFORM_SHARED" ] ++ } ++} ++ ++# This config should be applied to code using the libbase. ++config("libbase_config") { ++ if (is_component_build) { ++ defines = [ "USING_V8_BASE_SHARED" ] ++ } ++ libs = [] ++ if (is_android && current_toolchain != host_toolchain) { ++ libs += [ "log" ] ++ } ++} ++ ++# This config should be applied to code using the cppgc_base. ++config("cppgc_base_config") { ++ defines = [] ++ if (cppgc_enable_object_names) { ++ defines += [ "CPPGC_SUPPORTS_OBJECT_NAMES" ] ++ } ++} ++ ++# This config should be applied to code using the libsampler. ++config("libsampler_config") { ++ include_dirs = [ "include" ] ++} ++ ++# This config should only be applied to code using V8 and not any V8 code ++# itself. ++config("external_config") { ++ defines = [] ++ configs = [ ":v8_header_features" ] ++ if (is_component_build) { ++ defines += [ "USING_V8_SHARED" ] ++ } ++ include_dirs = [ ++ "include", ++ "$target_gen_dir/include", ++ ] ++} ++ ++# This config should only be applied to code that needs to be explicitly ++# aware of whether we are using startup data or not. ++config("external_startup_data") { ++ if (v8_use_external_startup_data) { ++ defines = [ "V8_USE_EXTERNAL_STARTUP_DATA" ] ++ } ++} ++ ++# Put defines that are used in public headers here; public headers are ++# defined in "v8_headers" and are included by embedders of V8. ++config("v8_header_features") { ++ visibility = [ ":*" ] ++ ++ defines = [] ++ ++ if (v8_enable_v8_checks) { ++ defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h". ++ } ++ if (v8_enable_pointer_compression) { ++ defines += [ "V8_COMPRESS_POINTERS" ] ++ } ++ if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) { ++ defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ] ++ } ++ if (v8_enable_heap_sandbox) { ++ defines += [ "V8_HEAP_SANDBOX" ] ++ } ++ if (v8_deprecation_warnings) { ++ defines += [ "V8_DEPRECATION_WARNINGS" ] ++ } ++ if (v8_imminent_deprecation_warnings) { ++ defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ] ++ } ++} ++ ++# Put defines here that are only used in our internal files and NEVER in ++# external headers that embedders (such as chromium and node) might include. ++config("features") { ++ # Only targets in this file and its subdirs can depend on this. ++ visibility = [ "./*" ] ++ ++ defines = [] ++ ++ configs = [ ":v8_header_features" ] ++ ++ if (v8_embedder_string != "") { ++ defines += [ "V8_EMBEDDER_STRING=\"$v8_embedder_string\"" ] ++ } ++ if (v8_enable_disassembler) { ++ defines += [ "ENABLE_DISASSEMBLER" ] ++ } ++ if (v8_promise_internal_field_count != 0) { ++ defines += ++ [ "V8_PROMISE_INTERNAL_FIELD_COUNT=${v8_promise_internal_field_count}" ] ++ } ++ defines += ++ [ "V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=${v8_typed_array_max_size_in_heap}" ] ++ ++ assert( ++ !v8_enable_raw_heap_snapshots, ++ "This flag is deprecated and is now available through the inspector interface as an argument to profiler's method `takeHeapSnapshot`. Consider using blink's flag `enable_additional_blink_object_names` to get better naming of internal objects.") ++ ++ if (v8_enable_future) { ++ defines += [ "V8_ENABLE_FUTURE" ] ++ } ++ if (v8_enable_lite_mode) { ++ defines += [ "V8_LITE_MODE" ] ++ } ++ if (v8_enable_gdbjit) { ++ defines += [ "ENABLE_GDB_JIT_INTERFACE" ] ++ } ++ if (v8_enable_vtunejit) { ++ defines += [ "ENABLE_VTUNE_JIT_INTERFACE" ] ++ } ++ if (v8_enable_vtunetracemark) { ++ defines += [ "ENABLE_VTUNE_TRACEMARK" ] ++ } ++ if (v8_enable_minor_mc) { ++ defines += [ "ENABLE_MINOR_MC" ] ++ } ++ if (v8_enable_object_print) { ++ defines += [ "OBJECT_PRINT" ] ++ } ++ if (v8_enable_verify_heap) { ++ defines += [ "VERIFY_HEAP" ] ++ } ++ if (v8_enable_verify_predictable) { ++ defines += [ "VERIFY_PREDICTABLE" ] ++ } ++ if (v8_enable_trace_maps) { ++ defines += [ "V8_TRACE_MAPS" ] ++ } ++ if (v8_enable_trace_ignition) { ++ defines += [ "V8_TRACE_IGNITION" ] ++ } ++ if (v8_enable_trace_feedback_updates) { ++ defines += [ "V8_TRACE_FEEDBACK_UPDATES" ] ++ } ++ if (v8_enable_test_features) { ++ defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ] ++ defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ] ++ defines += [ "V8_ENABLE_DOUBLE_CONST_STORE_CHECK" ] ++ } ++ if (v8_enable_i18n_support) { ++ defines += [ "V8_INTL_SUPPORT" ] ++ } ++ if (v8_enable_handle_zapping) { ++ defines += [ "ENABLE_HANDLE_ZAPPING" ] ++ } ++ if (v8_enable_snapshot_native_code_counters) { ++ defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ] ++ } ++ if (v8_enable_single_generation) { ++ defines += [ "V8_ENABLE_SINGLE_GENERATION" ] ++ } ++ if (v8_disable_write_barriers) { ++ defines += [ "V8_DISABLE_WRITE_BARRIERS" ] ++ } ++ if (v8_enable_third_party_heap) { ++ defines += [ "V8_ENABLE_THIRD_PARTY_HEAP" ] ++ } ++ if (v8_use_external_startup_data) { ++ defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] ++ } ++ if (v8_enable_concurrent_marking) { ++ defines += [ "V8_CONCURRENT_MARKING" ] ++ } ++ if (v8_enable_array_buffer_extension) { ++ defines += [ "V8_ARRAY_BUFFER_EXTENSION" ] ++ } ++ if (v8_enable_lazy_source_positions) { ++ defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ] ++ } ++ if (v8_check_microtasks_scopes_consistency) { ++ defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ] ++ } ++ if (v8_use_multi_snapshots) { ++ defines += [ "V8_MULTI_SNAPSHOTS" ] ++ } ++ if (v8_use_siphash) { ++ defines += [ "V8_USE_SIPHASH" ] ++ } ++ if (v8_enable_shared_ro_heap) { ++ defines += [ "V8_SHARED_RO_HEAP" ] ++ } ++ if (v8_use_perfetto) { ++ defines += [ "V8_USE_PERFETTO" ] ++ } ++ if (v8_win64_unwinding_info) { ++ defines += [ "V8_WIN64_UNWINDING_INFO" ] ++ } ++ if (v8_enable_regexp_interpreter_threaded_dispatch) { ++ defines += [ "V8_ENABLE_REGEXP_INTERPRETER_THREADED_DISPATCH" ] ++ } ++ if (v8_enable_snapshot_compression) { ++ defines += [ "V8_SNAPSHOT_COMPRESSION" ] ++ } ++ if (v8_control_flow_integrity) { ++ defines += [ "V8_ENABLE_CONTROL_FLOW_INTEGRITY" ] ++ } ++ if (v8_enable_wasm_gdb_remote_debugging) { ++ defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ] ++ } ++ if (v8_enable_nci_code) { ++ defines += [ "V8_ENABLE_NCI_CODE" ] ++ } ++} ++ ++config("toolchain") { ++ # Only targets in this file and its subdirs can depend on this. ++ visibility = [ "./*" ] ++ ++ defines = [] ++ cflags = [] ++ ldflags = [] ++ ++ if (v8_current_cpu == "arm") { ++ defines += [ "V8_TARGET_ARCH_ARM" ] ++ if (arm_version >= 7) { ++ defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ] ++ } ++ if (arm_fpu == "vfpv3-d16") { ++ defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ] ++ } else if (arm_fpu == "vfpv3") { ++ defines += [ ++ "CAN_USE_VFP3_INSTRUCTIONS", ++ "CAN_USE_VFP32DREGS", ++ ] ++ } else if (arm_fpu == "neon") { ++ defines += [ ++ "CAN_USE_VFP3_INSTRUCTIONS", ++ "CAN_USE_VFP32DREGS", ++ "CAN_USE_NEON", ++ ] ++ } ++ ++ # TODO(jochen): Add support for arm_test_noprobe. ++ ++ if (current_cpu != "arm") { ++ # These defines ares used for the ARM simulator. ++ if (arm_float_abi == "hard") { ++ defines += [ "USE_EABI_HARDFLOAT=1" ] ++ } else if (arm_float_abi == "softfp") { ++ defines += [ "USE_EABI_HARDFLOAT=0" ] ++ } ++ } ++ } ++ if (v8_current_cpu == "arm64") { ++ defines += [ "V8_TARGET_ARCH_ARM64" ] ++ if (v8_control_flow_integrity) { ++ # TODO(v8:10026): Enable this in src/build. ++ if (current_cpu == "arm64") { ++ cflags += [ "-mbranch-protection=standard" ] ++ } ++ } ++ } ++ ++ # Mips64el/mipsel simulators. ++ if (target_is_simulator && ++ (v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el")) { ++ defines += [ "_MIPS_TARGET_SIMULATOR" ] ++ } ++ ++ if (v8_current_cpu == "mipsel" || v8_current_cpu == "mips") { ++ defines += [ "V8_TARGET_ARCH_MIPS" ] ++ if (v8_can_use_fpu_instructions) { ++ defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] ++ } ++ if (v8_use_mips_abi_hardfloat) { ++ defines += [ ++ "__mips_hard_float=1", ++ "CAN_USE_FPU_INSTRUCTIONS", ++ ] ++ } else { ++ defines += [ "__mips_soft_float=1" ] ++ } ++ if (mips_arch_variant == "r6") { ++ defines += [ ++ "_MIPS_ARCH_MIPS32R6", ++ "FPU_MODE_FP64", ++ ] ++ if (mips_use_msa) { ++ defines += [ "_MIPS_MSA" ] ++ } ++ } else if (mips_arch_variant == "r2") { ++ defines += [ "_MIPS_ARCH_MIPS32R2" ] ++ if (mips_fpu_mode == "fp64") { ++ defines += [ "FPU_MODE_FP64" ] ++ } else if (mips_fpu_mode == "fpxx") { ++ defines += [ "FPU_MODE_FPXX" ] ++ } else if (mips_fpu_mode == "fp32") { ++ defines += [ "FPU_MODE_FP32" ] ++ } ++ } else if (mips_arch_variant == "r1") { ++ defines += [ "FPU_MODE_FP32" ] ++ } ++ ++ # TODO(jochen): Add support for mips_arch_variant rx and loongson. ++ } ++ ++ if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") { ++ defines += [ "V8_TARGET_ARCH_MIPS64" ] ++ if (v8_can_use_fpu_instructions) { ++ defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] ++ } ++ if (mips_use_msa) { ++ defines += [ "_MIPS_MSA" ] ++ } ++ if (host_byteorder == "little") { ++ defines += [ "V8_TARGET_ARCH_MIPS64_LE" ] ++ } else if (host_byteorder == "big") { ++ defines += [ "V8_TARGET_ARCH_MIPS64_BE" ] ++ } ++ if (v8_use_mips_abi_hardfloat) { ++ defines += [ ++ "__mips_hard_float=1", ++ "CAN_USE_FPU_INSTRUCTIONS", ++ ] ++ } else { ++ defines += [ "__mips_soft_float=1" ] ++ } ++ if (mips_arch_variant == "r6") { ++ defines += [ "_MIPS_ARCH_MIPS64R6" ] ++ } else if (mips_arch_variant == "r2") { ++ defines += [ "_MIPS_ARCH_MIPS64R2" ] ++ } ++ } ++ if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { ++ defines += [ "V8_TARGET_ARCH_S390" ] ++ cflags += [ "-ffp-contract=off" ] ++ if (v8_current_cpu == "s390x") { ++ defines += [ "V8_TARGET_ARCH_S390X" ] ++ } ++ if (host_byteorder == "little") { ++ defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ] ++ } else { ++ cflags += [ "-march=z196" ] ++ } ++ } ++ if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { ++ if (v8_current_cpu == "ppc") { ++ defines += [ "V8_TARGET_ARCH_PPC" ] ++ } else if (v8_current_cpu == "ppc64") { ++ defines += [ "V8_TARGET_ARCH_PPC64" ] ++ } ++ if (host_byteorder == "little") { ++ defines += [ "V8_TARGET_ARCH_PPC_LE" ] ++ } else if (host_byteorder == "big") { ++ defines += [ "V8_TARGET_ARCH_PPC_BE" ] ++ if (current_os == "aix") { ++ cflags += [ ++ # Work around AIX ceil, trunc and round oddities. ++ "-mcpu=power5+", ++ "-mfprnd", ++ ++ # Work around AIX assembler popcntb bug. ++ "-mno-popcntb", ++ ] ++ } ++ } ++ } ++ ++ if (v8_current_cpu == "x86") { ++ defines += [ "V8_TARGET_ARCH_IA32" ] ++ if (is_win) { ++ # Ensure no surprising artifacts from 80bit double math with x86. ++ cflags += [ "/arch:SSE2" ] ++ } ++ } ++ if (v8_current_cpu == "x64") { ++ defines += [ "V8_TARGET_ARCH_X64" ] ++ if (is_win) { ++ # Increase the initial stack size. The default is 1MB, this is 2MB. This ++ # applies only to executables and shared libraries produced by V8 since ++ # ldflags are not pushed to dependants. ++ ldflags += [ "/STACK:2097152" ] ++ } ++ } ++ if (is_android && v8_android_log_stdout) { ++ defines += [ "V8_ANDROID_LOG_STDOUT" ] ++ } ++ ++ # V8_TARGET_OS_ defines. The target OS may differ from host OS e.g. in ++ # mksnapshot. We additionally set V8_HAVE_TARGET_OS to determine that a ++ # target OS has in fact been set; otherwise we internally assume that target ++ # OS == host OS (see v8config.h). ++ if (target_os == "android") { ++ defines += [ "V8_HAVE_TARGET_OS" ] ++ defines += [ "V8_TARGET_OS_ANDROID" ] ++ } else if (target_os == "fuchsia") { ++ defines += [ "V8_HAVE_TARGET_OS" ] ++ defines += [ "V8_TARGET_OS_FUCHSIA" ] ++ } else if (target_os == "ios") { ++ defines += [ "V8_HAVE_TARGET_OS" ] ++ defines += [ "V8_TARGET_OS_IOS" ] ++ } else if (target_os == "linux") { ++ defines += [ "V8_HAVE_TARGET_OS" ] ++ defines += [ "V8_TARGET_OS_LINUX" ] ++ } else if (target_os == "mac") { ++ defines += [ "V8_HAVE_TARGET_OS" ] ++ defines += [ "V8_TARGET_OS_MACOSX" ] ++ } else if (target_os == "win") { ++ defines += [ "V8_HAVE_TARGET_OS" ] ++ defines += [ "V8_TARGET_OS_WIN" ] ++ } ++ ++ # TODO(jochen): Support v8_enable_prof on Windows. ++ # TODO(jochen): Add support for compiling with simulators. ++ ++ if (v8_enable_debugging_features) { ++ if (is_linux && v8_enable_backtrace) { ++ ldflags += [ "-rdynamic" ] ++ } ++ ++ defines += [ "DEBUG" ] ++ if (v8_enable_slow_dchecks) { ++ defines += [ "ENABLE_SLOW_DCHECKS" ] ++ } ++ } else if (dcheck_always_on) { ++ defines += [ "DEBUG" ] ++ } ++ ++ if (v8_enable_verify_csa) { ++ defines += [ "ENABLE_VERIFY_CSA" ] ++ } ++ ++ if (!v8_untrusted_code_mitigations) { ++ defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ] ++ } ++ ++ if (v8_no_inline) { ++ if (is_win) { ++ cflags += [ "/Ob0" ] ++ } else { ++ cflags += [ ++ "-fno-inline-functions", ++ "-fno-inline", ++ ] ++ } ++ } ++ ++ if (is_clang) { ++ cflags += [ "-Wmissing-field-initializers" ] ++ ++ if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") { ++ # We exclude MIPS because the IsMipsArchVariant macro causes trouble. ++ cflags += [ "-Wunreachable-code" ] ++ } ++ ++ if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || ++ v8_current_cpu == "mips64el") { ++ cflags += [ "-Wshorten-64-to-32" ] ++ } ++ } ++ ++ if (is_win) { ++ cflags += [ ++ "/wd4245", # Conversion with signed/unsigned mismatch. ++ "/wd4267", # Conversion with possible loss of data. ++ "/wd4324", # Padding structure due to alignment. ++ "/wd4701", # Potentially uninitialized local variable. ++ "/wd4702", # Unreachable code. ++ "/wd4703", # Potentially uninitialized local pointer variable. ++ "/wd4709", # Comma operator within array index expr (bugged). ++ "/wd4714", # Function marked forceinline not inlined. ++ ++ # MSVC assumes that control can get past an exhaustive switch and then ++ # warns if there's no return there (see https://crbug.com/v8/7658) ++ "/wd4715", # Not all control paths return a value. ++ ++ "/wd4718", # Recursive call has no side-effect. ++ "/wd4723", # https://crbug.com/v8/7771 ++ "/wd4724", # https://crbug.com/v8/7771 ++ "/wd4800", # Forcing value to bool. ++ ] ++ } ++ ++ if (!is_clang && is_win) { ++ cflags += [ "/wd4506" ] # Benign "no definition for inline function" ++ } ++ ++ if (!is_clang && !is_win) { ++ cflags += [ ++ # Disable gcc warnings for optimizations based on the assumption that ++ # signed overflow does not occur. Generates false positives (see ++ # http://crbug.com/v8/6341). ++ "-Wno-strict-overflow", ++ ++ # GCC assumes that control can get past an exhaustive switch and then ++ # warns if there's no return there (see https://crbug.com/v8/7658). ++ "-Wno-return-type", ++ ] ++ } ++ ++ # Chromium uses a hand-picked subset of UBSan coverage. We want everything. ++ if (is_ubsan) { ++ cflags += [ "-fsanitize=undefined" ] ++ } ++} ++ ++# For code that is hot during mksnapshot. In fast-mksnapshot builds, we ++# optimize some files even in debug builds to speed up mksnapshot times. ++config("always_optimize") { ++ configs = [ ":internal_config" ] ++ ++ # TODO(crbug.com/621335) Rework this so that we don't have the confusion ++ # between "optimize_speed" and "optimize_max". ++ if (((is_posix && !is_android) || is_fuchsia) && !using_sanitizer) { ++ configs += [ "//build/config/compiler:optimize_speed" ] ++ } else { ++ configs += [ "//build/config/compiler:optimize_max" ] ++ } ++} ++ ++# Configs for code coverage with gcov. Separate configs for cflags and ldflags ++# to selectively influde cflags in non-test targets only. ++config("v8_gcov_coverage_cflags") { ++ cflags = [ ++ "-fprofile-arcs", ++ "-ftest-coverage", ++ ] ++} ++ ++config("v8_gcov_coverage_ldflags") { ++ ldflags = [ "-fprofile-arcs" ] ++} ++ ++############################################################################### ++# Actions ++# ++ ++# Only for Windows clang builds. Converts the embedded.S file produced by ++# mksnapshot into an embedded.cc file with corresponding inline assembly. ++template("asm_to_inline_asm") { ++ name = target_name ++ if (name == "default") { ++ suffix = "" ++ } else { ++ suffix = "_$name" ++ } ++ ++ action("asm_to_inline_asm_" + name) { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ assert(emit_builtins_as_inline_asm) ++ ++ script = "tools/snapshot/asm_to_inline_asm.py" ++ deps = [ ":run_mksnapshot_" + name ] ++ sources = [ "$target_gen_dir/embedded${suffix}.S" ] ++ outputs = [ "$target_gen_dir/embedded${suffix}.cc" ] ++ args = invoker.args ++ args += [ ++ rebase_path("$target_gen_dir/embedded${suffix}.S", root_build_dir), ++ rebase_path("$target_gen_dir/embedded${suffix}.cc", root_build_dir), ++ ] ++ } ++} ++ ++if (is_android && enable_java_templates) { ++ android_assets("v8_external_startup_data_assets") { ++ if (v8_use_external_startup_data) { ++ # We don't support side-by-side snapshots on Android within Chromium. ++ assert(!v8_use_multi_snapshots) ++ deps = [ "//v8" ] ++ renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ] ++ if (current_cpu == "arm" || current_cpu == "x86" || ++ current_cpu == "mipsel") { ++ renaming_destinations = [ "snapshot_blob_32.bin" ] ++ } else { ++ renaming_destinations = [ "snapshot_blob_64.bin" ] ++ } ++ disable_compression = true ++ } ++ } ++} ++ ++action("postmortem-metadata") { ++ # Only targets in this file and the top-level visibility target can ++ # depend on this. ++ visibility = [ ++ ":*", ++ "//:gn_visibility", ++ ] ++ ++ script = "tools/gen-postmortem-metadata.py" ++ ++ # NOSORT ++ sources = [ ++ "src/objects/objects.h", ++ "src/objects/objects-inl.h", ++ "src/objects/allocation-site-inl.h", ++ "src/objects/allocation-site.h", ++ "src/objects/cell-inl.h", ++ "src/objects/cell.h", ++ "src/objects/code-inl.h", ++ "src/objects/code.h", ++ "src/objects/data-handler.h", ++ "src/objects/data-handler-inl.h", ++ "src/objects/descriptor-array.h", ++ "src/objects/descriptor-array-inl.h", ++ "src/objects/feedback-cell.h", ++ "src/objects/feedback-cell-inl.h", ++ "src/objects/fixed-array-inl.h", ++ "src/objects/fixed-array.h", ++ "src/objects/heap-number-inl.h", ++ "src/objects/heap-number.h", ++ "src/objects/heap-object-inl.h", ++ "src/objects/heap-object.h", ++ "src/objects/instance-type.h", ++ "src/objects/js-array-inl.h", ++ "src/objects/js-array.h", ++ "src/objects/js-array-buffer-inl.h", ++ "src/objects/js-array-buffer.h", ++ "src/objects/js-objects-inl.h", ++ "src/objects/js-objects.h", ++ "src/objects/js-promise-inl.h", ++ "src/objects/js-promise.h", ++ "src/objects/js-regexp-inl.h", ++ "src/objects/js-regexp.cc", ++ "src/objects/js-regexp.h", ++ "src/objects/js-regexp-string-iterator-inl.h", ++ "src/objects/js-regexp-string-iterator.h", ++ "src/objects/map.h", ++ "src/objects/map.cc", ++ "src/objects/map-inl.h", ++ "src/objects/js-objects.cc", ++ "src/objects/name.h", ++ "src/objects/name-inl.h", ++ "src/objects/oddball-inl.h", ++ "src/objects/oddball.h", ++ "src/objects/primitive-heap-object.h", ++ "src/objects/primitive-heap-object-inl.h", ++ "src/objects/scope-info.h", ++ "src/objects/script.h", ++ "src/objects/script-inl.h", ++ "src/objects/shared-function-info.h", ++ "src/objects/shared-function-info-inl.h", ++ "src/objects/string.cc", ++ "src/objects/string.h", ++ "src/objects/string-comparator.cc", ++ "src/objects/string-comparator.h", ++ "src/objects/string-inl.h", ++ "src/objects/struct.h", ++ "src/objects/struct-inl.h", ++ "$target_gen_dir/torque-generated/instance-types-tq.h", ++ ] ++ ++ outputs = [ "$target_gen_dir/debug-support.cc" ] ++ ++ args = rebase_path(outputs, root_build_dir) + ++ rebase_path(sources, root_build_dir) ++ ++ deps = [ ":run_torque" ] ++} ++ ++torque_files = [ ++ "src/builtins/array-copywithin.tq", ++ "src/builtins/array-every.tq", ++ "src/builtins/array-filter.tq", ++ "src/builtins/array-find.tq", ++ "src/builtins/array-findindex.tq", ++ "src/builtins/array-foreach.tq", ++ "src/builtins/array-from.tq", ++ "src/builtins/array-isarray.tq", ++ "src/builtins/array-join.tq", ++ "src/builtins/array-lastindexof.tq", ++ "src/builtins/array-map.tq", ++ "src/builtins/array-of.tq", ++ "src/builtins/array-reduce-right.tq", ++ "src/builtins/array-reduce.tq", ++ "src/builtins/array-reverse.tq", ++ "src/builtins/array-shift.tq", ++ "src/builtins/array-slice.tq", ++ "src/builtins/array-some.tq", ++ "src/builtins/array-splice.tq", ++ "src/builtins/array-unshift.tq", ++ "src/builtins/array.tq", ++ "src/builtins/base.tq", ++ "src/builtins/bigint.tq", ++ "src/builtins/boolean.tq", ++ "src/builtins/builtins-string.tq", ++ "src/builtins/collections.tq", ++ "src/builtins/cast.tq", ++ "src/builtins/convert.tq", ++ "src/builtins/console.tq", ++ "src/builtins/data-view.tq", ++ "src/builtins/finalization-registry.tq", ++ "src/builtins/frames.tq", ++ "src/builtins/frame-arguments.tq", ++ "src/builtins/growable-fixed-array.tq", ++ "src/builtins/ic-callable.tq", ++ "src/builtins/ic.tq", ++ "src/builtins/internal-coverage.tq", ++ "src/builtins/iterator.tq", ++ "src/builtins/math.tq", ++ "src/builtins/number.tq", ++ "src/builtins/object-fromentries.tq", ++ "src/builtins/object.tq", ++ "src/builtins/promise-abstract-operations.tq", ++ "src/builtins/promise-all.tq", ++ "src/builtins/promise-all-element-closure.tq", ++ "src/builtins/promise-any.tq", ++ "src/builtins/promise-constructor.tq", ++ "src/builtins/promise-finally.tq", ++ "src/builtins/promise-misc.tq", ++ "src/builtins/promise-race.tq", ++ "src/builtins/promise-reaction-job.tq", ++ "src/builtins/promise-resolve.tq", ++ "src/builtins/promise-then.tq", ++ "src/builtins/promise-jobs.tq", ++ "src/builtins/proxy-constructor.tq", ++ "src/builtins/proxy-delete-property.tq", ++ "src/builtins/proxy-get-property.tq", ++ "src/builtins/proxy-get-prototype-of.tq", ++ "src/builtins/proxy-has-property.tq", ++ "src/builtins/proxy-is-extensible.tq", ++ "src/builtins/proxy-prevent-extensions.tq", ++ "src/builtins/proxy-revocable.tq", ++ "src/builtins/proxy-revoke.tq", ++ "src/builtins/proxy-set-property.tq", ++ "src/builtins/proxy-set-prototype-of.tq", ++ "src/builtins/proxy.tq", ++ "src/builtins/reflect.tq", ++ "src/builtins/regexp-exec.tq", ++ "src/builtins/regexp-match-all.tq", ++ "src/builtins/regexp-match.tq", ++ "src/builtins/regexp-replace.tq", ++ "src/builtins/regexp-search.tq", ++ "src/builtins/regexp-source.tq", ++ "src/builtins/regexp-split.tq", ++ "src/builtins/regexp-test.tq", ++ "src/builtins/regexp.tq", ++ "src/builtins/string-endswith.tq", ++ "src/builtins/string-html.tq", ++ "src/builtins/string-iterator.tq", ++ "src/builtins/string-pad.tq", ++ "src/builtins/string-repeat.tq", ++ "src/builtins/string-replaceall.tq", ++ "src/builtins/string-slice.tq", ++ "src/builtins/string-startswith.tq", ++ "src/builtins/string-substring.tq", ++ "src/builtins/string-substr.tq", ++ "src/builtins/symbol.tq", ++ "src/builtins/torque-internal.tq", ++ "src/builtins/typed-array-createtypedarray.tq", ++ "src/builtins/typed-array-every.tq", ++ "src/builtins/typed-array-filter.tq", ++ "src/builtins/typed-array-find.tq", ++ "src/builtins/typed-array-findindex.tq", ++ "src/builtins/typed-array-foreach.tq", ++ "src/builtins/typed-array-from.tq", ++ "src/builtins/typed-array-of.tq", ++ "src/builtins/typed-array-reduce.tq", ++ "src/builtins/typed-array-reduceright.tq", ++ "src/builtins/typed-array-set.tq", ++ "src/builtins/typed-array-slice.tq", ++ "src/builtins/typed-array-some.tq", ++ "src/builtins/typed-array-sort.tq", ++ "src/builtins/typed-array-subarray.tq", ++ "src/builtins/typed-array.tq", ++ "src/builtins/wasm.tq", ++ "src/ic/handler-configuration.tq", ++ "src/objects/allocation-site.tq", ++ "src/objects/api-callbacks.tq", ++ "src/objects/arguments.tq", ++ "src/objects/cell.tq", ++ "src/objects/code.tq", ++ "src/objects/contexts.tq", ++ "src/objects/data-handler.tq", ++ "src/objects/debug-objects.tq", ++ "src/objects/descriptor-array.tq", ++ "src/objects/embedder-data-array.tq", ++ "src/objects/feedback-cell.tq", ++ "src/objects/feedback-vector.tq", ++ "src/objects/fixed-array.tq", ++ "src/objects/foreign.tq", ++ "src/objects/free-space.tq", ++ "src/objects/heap-number.tq", ++ "src/objects/heap-object.tq", ++ "src/objects/intl-objects.tq", ++ "src/objects/js-aggregate-error.tq", ++ "src/objects/js-array-buffer.tq", ++ "src/objects/js-array.tq", ++ "src/objects/js-collection-iterator.tq", ++ "src/objects/js-collection.tq", ++ "src/objects/js-generator.tq", ++ "src/objects/js-objects.tq", ++ "src/objects/js-promise.tq", ++ "src/objects/js-proxy.tq", ++ "src/objects/js-regexp-string-iterator.tq", ++ "src/objects/js-regexp.tq", ++ "src/objects/js-weak-refs.tq", ++ "src/objects/literal-objects.tq", ++ "src/objects/map.tq", ++ "src/objects/microtask.tq", ++ "src/objects/module.tq", ++ "src/objects/name.tq", ++ "src/objects/oddball.tq", ++ "src/objects/ordered-hash-table.tq", ++ "src/objects/primitive-heap-object.tq", ++ "src/objects/promise.tq", ++ "src/objects/property-array.tq", ++ "src/objects/property-cell.tq", ++ "src/objects/property-descriptor-object.tq", ++ "src/objects/prototype-info.tq", ++ "src/objects/regexp-match-info.tq", ++ "src/objects/scope-info.tq", ++ "src/objects/script.tq", ++ "src/objects/shared-function-info.tq", ++ "src/objects/source-text-module.tq", ++ "src/objects/stack-frame-info.tq", ++ "src/objects/string.tq", ++ "src/objects/struct.tq", ++ "src/objects/synthetic-module.tq", ++ "src/objects/template-objects.tq", ++ "src/objects/template.tq", ++ "src/wasm/wasm-objects.tq", ++ "test/torque/test-torque.tq", ++ "third_party/v8/builtins/array-sort.tq", ++] ++ ++if (!v8_enable_i18n_support) { ++ torque_files -= [ "src/objects/intl-objects.tq" ] ++} ++ ++# Template for running torque ++# When building with v8_verify_torque_generation_invariance=true we need ++# to be able to run torque for both 32 and 64 bits in the same build ++template("run_torque") { ++ if (target_name == "") { ++ suffix = "" ++ } else { ++ suffix = "_$target_name" ++ } ++ ++ toolchain = invoker.toolchain ++ ++ action("run_torque" + suffix) { ++ visibility = [ ++ ":*", ++ "tools/debug_helper/:*", ++ "tools/gcmole/:*", ++ "test/cctest/:*", ++ ] ++ ++ deps = [ ":torque($toolchain)" ] ++ ++ script = "tools/run.py" ++ ++ sources = torque_files ++ ++ destination_folder = "$target_gen_dir/torque-generated$suffix" ++ ++ files = [ ++ "bit-fields-tq.h", ++ "builtin-definitions-tq.h", ++ "interface-descriptors-tq.inc", ++ "factory-tq.cc", ++ "factory-tq.inc", ++ "field-offsets-tq.h", ++ "class-verifiers-tq.cc", ++ "class-verifiers-tq.h", ++ "enum-verifiers-tq.cc", ++ "objects-printer-tq.cc", ++ "objects-body-descriptors-tq-inl.inc", ++ "class-definitions-tq.cc", ++ "class-definitions-tq-inl.h", ++ "class-definitions-tq.h", ++ "class-debug-readers-tq.cc", ++ "class-debug-readers-tq.h", ++ "exported-macros-assembler-tq.cc", ++ "exported-macros-assembler-tq.h", ++ "csa-types-tq.h", ++ "instance-types-tq.h", ++ "internal-class-definitions-tq.h", ++ "internal-class-definitions-tq-inl.h", ++ "exported-class-definitions-tq.h", ++ "exported-class-definitions-tq-inl.h", ++ ] ++ ++ outputs = [] ++ foreach(file, files) { ++ outputs += [ "$destination_folder/$file" ] ++ } ++ ++ foreach(file, torque_files) { ++ filetq = string_replace(file, ".tq", "-tq-csa") ++ outputs += [ ++ "$destination_folder/$filetq.cc", ++ "$destination_folder/$filetq.h", ++ ] ++ } ++ ++ args = [ ++ "./" + rebase_path( ++ get_label_info(":torque($toolchain)", "root_out_dir") + "/torque", ++ root_build_dir), ++ "-o", ++ rebase_path("$destination_folder", root_build_dir), ++ "-v8-root", ++ rebase_path(".", root_build_dir), ++ ] ++ if (defined(invoker.args)) { ++ args += invoker.args ++ } ++ args += torque_files ++ } ++} ++ ++# Default run_torque action ++run_torque("") { ++ toolchain = v8_generator_toolchain ++} ++ ++if (v8_verify_torque_generation_invariance) { ++ run_torque("x86") { ++ toolchain = "//build/toolchain/linux:clang_x86" ++ } ++ ++ run_torque("x64") { ++ args = [ "-m32" ] ++ toolchain = "//build/toolchain/linux:clang_x64" ++ } ++ ++ action("compare_torque_runs") { ++ deps = [ ++ ":run_torque_x64", ++ ":run_torque_x86", ++ ] ++ report_file = "$target_gen_dir/torque_comparison_results.txt" ++ script = "tools/compare_torque_output.py" ++ args = [ ++ rebase_path("$target_gen_dir/torque-generated_x64", root_build_dir), ++ rebase_path("$target_gen_dir/torque-generated_x86", root_build_dir), ++ rebase_path(report_file, root_build_dir), ++ ] ++ outputs = [ report_file ] ++ } ++} ++ ++group("v8_maybe_icu") { ++ if (v8_enable_i18n_support) { ++ public_deps = [ "//third_party/icu" ] ++ } ++} ++ ++v8_source_set("torque_generated_initializers") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ ":v8_tracing", ++ ] ++ ++ public_deps = [ ":v8_maybe_icu" ] ++ ++ sources = [ ++ "$target_gen_dir/torque-generated/csa-types-tq.h", ++ "$target_gen_dir/torque-generated/enum-verifiers-tq.cc", ++ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc", ++ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h", ++ "src/torque/runtime-support.h", ++ ] ++ foreach(file, torque_files) { ++ filetq = string_replace(file, ".tq", "-tq-csa") ++ sources += [ ++ "$target_gen_dir/torque-generated/$filetq.cc", ++ "$target_gen_dir/torque-generated/$filetq.h", ++ ] ++ } ++ ++ configs = [ ":internal_config" ] ++} ++ ++v8_source_set("torque_generated_definitions") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ ":v8_tracing", ++ ] ++ ++ public_deps = [ ":v8_maybe_icu" ] ++ ++ sources = [ ++ "$target_gen_dir/torque-generated/class-definitions-tq.cc", ++ "$target_gen_dir/torque-generated/class-verifiers-tq.cc", ++ "$target_gen_dir/torque-generated/class-verifiers-tq.h", ++ "$target_gen_dir/torque-generated/factory-tq.cc", ++ "$target_gen_dir/torque-generated/objects-printer-tq.cc", ++ ] ++ ++ configs = [ ":internal_config" ] ++} ++ ++action("generate_bytecode_builtins_list") { ++ script = "tools/run.py" ++ outputs = [ "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h" ] ++ deps = [ ":bytecode_builtins_list_generator($v8_generator_toolchain)" ] ++ args = [ ++ "./" + rebase_path( ++ get_label_info( ++ ":bytecode_builtins_list_generator($v8_generator_toolchain)", ++ "root_out_dir") + "/bytecode_builtins_list_generator", ++ root_build_dir), ++ rebase_path("$target_gen_dir/builtins-generated/bytecodes-builtins-list.h", ++ root_build_dir), ++ ] ++} ++ ++# Template to generate different V8 snapshots based on different runtime flags. ++# Can be invoked with run_mksnapshot(). The target will resolve to ++# run_mksnapshot_. If is "default", no file suffixes will be used. ++# Otherwise files are suffixed, e.g. embedded_.S and ++# snapshot_blob_.bin. ++# ++# The template exposes the variables: ++# args: additional flags for mksnapshots ++# embedded_suffix: a camel case suffix for method names in the embedded ++# snapshot. ++template("run_mksnapshot") { ++ name = target_name ++ if (name == "default") { ++ suffix = "" ++ } else { ++ suffix = "_$name" ++ } ++ action("run_mksnapshot_" + name) { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ deps = [ ":mksnapshot($v8_snapshot_toolchain)" ] ++ ++ script = "tools/run.py" ++ ++ sources = [] ++ ++ outputs = [] ++ ++ data = [] ++ ++ args = [ ++ "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)", ++ "root_out_dir") + "/mksnapshot", ++ root_build_dir), ++ "--turbo_instruction_scheduling", ++ ++ # In cross builds, the snapshot may be generated for both the host and ++ # target toolchains. The same host binary is used to generate both, so ++ # mksnapshot needs to know which target OS to use at runtime. It's weird, ++ # but the target OS is really |current_os|. ++ "--target_os=$current_os", ++ "--target_arch=$current_cpu", ++ ++ "--embedded_src", ++ rebase_path("$target_gen_dir/embedded${suffix}.S", root_build_dir), ++ ] ++ ++ # This is needed to distinguish between generating code for the simulator ++ # and cross-compiling. The latter may need to run code on the host with the ++ # simulator but cannot use simulator-specific instructions. ++ if (target_is_simulator) { ++ args += [ "--target_is_simulator" ] ++ } ++ ++ args += invoker.args ++ ++ outputs += [ "$target_gen_dir/embedded${suffix}.S" ] ++ if (invoker.embedded_variant != "") { ++ args += [ ++ "--embedded_variant", ++ invoker.embedded_variant, ++ ] ++ } ++ ++ if (v8_random_seed != "0") { ++ args += [ ++ "--random-seed", ++ v8_random_seed, ++ ] ++ } ++ ++ if (v8_os_page_size != "0") { ++ args += [ ++ "--v8_os_page_size", ++ v8_os_page_size, ++ ] ++ } ++ ++ if (v8_use_external_startup_data) { ++ outputs += [ "$root_out_dir/snapshot_blob${suffix}.bin" ] ++ data += [ "$root_out_dir/snapshot_blob${suffix}.bin" ] ++ args += [ ++ "--startup_blob", ++ rebase_path("$root_out_dir/snapshot_blob${suffix}.bin", root_build_dir), ++ ] ++ } else { ++ outputs += [ "$target_gen_dir/snapshot${suffix}.cc" ] ++ args += [ ++ "--startup_src", ++ rebase_path("$target_gen_dir/snapshot${suffix}.cc", root_build_dir), ++ ] ++ } ++ ++ if (v8_embed_script != "") { ++ sources += [ v8_embed_script ] ++ args += [ rebase_path(v8_embed_script, root_build_dir) ] ++ } ++ ++ if (v8_enable_snapshot_code_comments) { ++ args += [ "--code-comments" ] ++ } ++ ++ if (v8_enable_snapshot_native_code_counters) { ++ args += [ "--native-code-counters" ] ++ } else { ++ # --native-code-counters is the default in debug mode so make sure we can ++ # unset it. ++ args += [ "--no-native-code-counters" ] ++ } ++ ++ if (v8_enable_fast_mksnapshot) { ++ args += [ ++ "--no-turbo-rewrite-far-jumps", ++ "--no-turbo-verify-allocation", ++ ] ++ ++ if (v8_enable_debugging_features && v8_enable_slow_dchecks) { ++ # mksnapshot only accepts this flag if ENABLE_SLOW_DCHECKS is defined. ++ args += [ "--no-enable-slow-asserts" ] ++ } ++ } ++ ++ if (v8_enable_verify_heap) { ++ args += [ "--verify-heap" ] ++ } ++ } ++} ++ ++run_mksnapshot("default") { ++ args = [] ++ embedded_variant = "Default" ++} ++if (emit_builtins_as_inline_asm) { ++ asm_to_inline_asm("default") { ++ args = [] ++ } ++} ++if (v8_use_multi_snapshots) { ++ run_mksnapshot("trusted") { ++ args = [ "--no-untrusted-code-mitigations" ] ++ embedded_variant = "Trusted" ++ } ++ if (emit_builtins_as_inline_asm) { ++ asm_to_inline_asm("trusted") { ++ args = [] ++ } ++ } ++} ++ ++action("v8_dump_build_config") { ++ script = "tools/testrunner/utils/dump_build_config.py" ++ outputs = [ "$root_out_dir/v8_build_config.json" ] ++ is_gcov_coverage = v8_code_coverage && !is_clang ++ is_full_debug = v8_enable_debugging_features && !v8_optimized_debug ++ args = [ ++ rebase_path("$root_out_dir/v8_build_config.json", root_build_dir), ++ "current_cpu=\"$current_cpu\"", ++ "dcheck_always_on=$dcheck_always_on", ++ "is_android=$is_android", ++ "is_asan=$is_asan", ++ "is_cfi=$is_cfi", ++ "is_clang=$is_clang", ++ "is_component_build=$is_component_build", ++ "is_debug=$v8_enable_debugging_features", ++ "is_full_debug=$is_full_debug", ++ "is_gcov_coverage=$is_gcov_coverage", ++ "is_msan=$is_msan", ++ "is_tsan=$is_tsan", ++ "is_ubsan_vptr=$is_ubsan_vptr", ++ "target_cpu=\"$target_cpu\"", ++ "v8_current_cpu=\"$v8_current_cpu\"", ++ "v8_enable_i18n_support=$v8_enable_i18n_support", ++ "v8_enable_verify_predictable=$v8_enable_verify_predictable", ++ "v8_target_cpu=\"$v8_target_cpu\"", ++ "v8_enable_verify_csa=$v8_enable_verify_csa", ++ "v8_enable_lite_mode=$v8_enable_lite_mode", ++ "v8_enable_pointer_compression=$v8_enable_pointer_compression", ++ ] ++ ++ if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || ++ v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { ++ args += [ ++ "mips_arch_variant=\"$mips_arch_variant\"", ++ "mips_use_msa=$mips_use_msa", ++ ] ++ } ++} ++ ++############################################################################### ++# Source Sets (aka static libraries) ++# ++ ++v8_source_set("v8_snapshot") { ++ visibility = [ ":*" ] # Targets in this file can depend on this. ++ ++ deps = [] ++ public_deps = [ ++ # This should be public so downstream targets can declare the snapshot ++ # output file as their inputs. ++ ":run_mksnapshot_default", ++ ] ++ ++ # Do not publicize any header to remove build dependency. ++ public = [] ++ ++ sources = [ "src/init/setup-isolate-deserialize.cc" ] ++ if (emit_builtins_as_inline_asm) { ++ deps += [ ":asm_to_inline_asm_default" ] ++ sources += [ "$target_gen_dir/embedded.cc" ] ++ } else { ++ sources += [ "$target_gen_dir/embedded.S" ] ++ } ++ ++ configs = [ ":internal_config" ] ++ ++ if (v8_use_external_startup_data) { ++ deps += [ ":v8_base" ] ++ ++ sources += [ "src/snapshot/snapshot-external.cc" ] ++ ++ if (v8_use_multi_snapshots) { ++ public_deps += [ ":run_mksnapshot_trusted" ] ++ if (emit_builtins_as_inline_asm) { ++ deps += [ ":asm_to_inline_asm_trusted" ] ++ sources += [ "$target_gen_dir/embedded_trusted.cc" ] ++ } else { ++ sources += [ "$target_gen_dir/embedded_trusted.S" ] ++ } ++ } ++ } else { ++ # Also top-level visibility targets can depend on this. ++ visibility += [ "//:gn_visibility" ] ++ ++ public_deps += [ ":v8_maybe_icu" ] ++ ++ sources += [ "$target_gen_dir/snapshot.cc" ] ++ } ++} ++ ++v8_source_set("v8_initializers") { ++ visibility = [ ++ ":*", ++ "test/cctest:*", ++ ] ++ ++ deps = [ ++ ":torque_generated_initializers", ++ ":v8_tracing", ++ ] ++ ++ sources = [ ++ ### gcmole(all) ### ++ "src/builtins/builtins-array-gen.cc", ++ "src/builtins/builtins-array-gen.h", ++ "src/builtins/builtins-async-function-gen.cc", ++ "src/builtins/builtins-async-gen.cc", ++ "src/builtins/builtins-async-gen.h", ++ "src/builtins/builtins-async-generator-gen.cc", ++ "src/builtins/builtins-async-iterator-gen.cc", ++ "src/builtins/builtins-bigint-gen.cc", ++ "src/builtins/builtins-bigint-gen.h", ++ "src/builtins/builtins-call-gen.cc", ++ "src/builtins/builtins-call-gen.h", ++ "src/builtins/builtins-collections-gen.cc", ++ "src/builtins/builtins-constructor-gen.cc", ++ "src/builtins/builtins-constructor-gen.h", ++ "src/builtins/builtins-constructor.h", ++ "src/builtins/builtins-conversion-gen.cc", ++ "src/builtins/builtins-data-view-gen.h", ++ "src/builtins/builtins-date-gen.cc", ++ "src/builtins/builtins-debug-gen.cc", ++ "src/builtins/builtins-function-gen.cc", ++ "src/builtins/builtins-generator-gen.cc", ++ "src/builtins/builtins-global-gen.cc", ++ "src/builtins/builtins-handler-gen.cc", ++ "src/builtins/builtins-ic-gen.cc", ++ "src/builtins/builtins-internal-gen.cc", ++ "src/builtins/builtins-interpreter-gen.cc", ++ "src/builtins/builtins-intl-gen.cc", ++ "src/builtins/builtins-iterator-gen.cc", ++ "src/builtins/builtins-iterator-gen.h", ++ "src/builtins/builtins-lazy-gen.cc", ++ "src/builtins/builtins-lazy-gen.h", ++ "src/builtins/builtins-microtask-queue-gen.cc", ++ "src/builtins/builtins-number-gen.cc", ++ "src/builtins/builtins-object-gen.cc", ++ "src/builtins/builtins-promise-gen.cc", ++ "src/builtins/builtins-promise-gen.h", ++ "src/builtins/builtins-proxy-gen.cc", ++ "src/builtins/builtins-proxy-gen.h", ++ "src/builtins/builtins-regexp-gen.cc", ++ "src/builtins/builtins-regexp-gen.h", ++ "src/builtins/builtins-sharedarraybuffer-gen.cc", ++ "src/builtins/builtins-string-gen.cc", ++ "src/builtins/builtins-string-gen.h", ++ "src/builtins/builtins-typed-array-gen.cc", ++ "src/builtins/builtins-typed-array-gen.h", ++ "src/builtins/builtins-utils-gen.h", ++ "src/builtins/builtins-wasm-gen.cc", ++ "src/builtins/builtins-wasm-gen.h", ++ "src/builtins/growable-fixed-array-gen.cc", ++ "src/builtins/growable-fixed-array-gen.h", ++ "src/builtins/setup-builtins-internal.cc", ++ "src/codegen/code-stub-assembler.cc", ++ "src/codegen/code-stub-assembler.h", ++ "src/heap/setup-heap-internal.cc", ++ "src/ic/accessor-assembler.cc", ++ "src/ic/accessor-assembler.h", ++ "src/ic/binary-op-assembler.cc", ++ "src/ic/binary-op-assembler.h", ++ "src/ic/keyed-store-generic.cc", ++ "src/ic/keyed-store-generic.h", ++ "src/interpreter/interpreter-assembler.cc", ++ "src/interpreter/interpreter-assembler.h", ++ "src/interpreter/interpreter-generator.cc", ++ "src/interpreter/interpreter-generator.h", ++ "src/interpreter/interpreter-intrinsics-generator.cc", ++ "src/interpreter/interpreter-intrinsics-generator.h", ++ ] ++ ++ if (v8_current_cpu == "x86") { ++ sources += [ ++ ### gcmole(arch:ia32) ### ++ "src/builtins/ia32/builtins-ia32.cc", ++ ] ++ } else if (v8_current_cpu == "x64") { ++ sources += [ ++ ### gcmole(arch:x64) ### ++ "src/builtins/x64/builtins-x64.cc", ++ ] ++ } else if (v8_current_cpu == "arm") { ++ sources += [ ++ ### gcmole(arch:arm) ### ++ "src/builtins/arm/builtins-arm.cc", ++ ] ++ } else if (v8_current_cpu == "arm64") { ++ sources += [ ++ ### gcmole(arch:arm64) ### ++ "src/builtins/arm64/builtins-arm64.cc", ++ ] ++ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { ++ sources += [ ++ ### gcmole(arch:mipsel) ### ++ "src/builtins/mips/builtins-mips.cc", ++ ] ++ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { ++ sources += [ ++ ### gcmole(arch:mips64el) ### ++ "src/builtins/mips64/builtins-mips64.cc", ++ ] ++ } else if (v8_current_cpu == "ppc") { ++ sources += [ ++ ### gcmole(arch:ppc) ### ++ "src/builtins/ppc/builtins-ppc.cc", ++ ] ++ } else if (v8_current_cpu == "ppc64") { ++ sources += [ ++ ### gcmole(arch:ppc64) ### ++ "src/builtins/ppc/builtins-ppc.cc", ++ ] ++ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { ++ sources += [ ++ ### gcmole(arch:s390) ### ++ "src/builtins/s390/builtins-s390.cc", ++ ] ++ } ++ ++ if (!v8_enable_i18n_support) { ++ sources -= [ "src/builtins/builtins-intl-gen.cc" ] ++ } ++ ++ configs = [ ":internal_config" ] ++} ++ ++v8_source_set("v8_init") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ deps = [ ++ ":v8_initializers", ++ ":v8_tracing", ++ ] ++ ++ sources = [ ++ ### gcmole(all) ### ++ "src/init/setup-isolate-full.cc", ++ ] ++ ++ public_deps = [ ":v8_maybe_icu" ] ++ ++ configs = [ ":internal_config" ] ++} ++ ++# This is split out to be a non-code containing target that the Chromium browser ++# DLL can depend upon to get only a version string. ++v8_header_set("v8_version") { ++ configs = [ ":internal_config" ] ++ ++ sources = [ ++ "include/v8-value-serializer-version.h", ++ "include/v8-version-string.h", ++ "include/v8-version.h", ++ ] ++} ++ ++# This is split out to be a non-code containing target that the Chromium browser ++# can depend upon to get basic v8 types. ++v8_header_set("v8_headers") { ++ configs = [ ":internal_config" ] ++ public_configs = [ ":v8_header_features" ] ++ ++ sources = [ ++ "include/v8-fast-api-calls.h", ++ "include/v8-internal.h", ++ "include/v8.h", ++ "include/v8config.h", ++ ] ++ ++ sources += [ ++ # The following headers cannot be platform-specific. The include validation ++ # of `gn gen $dir --check` requires all header files to be available on all ++ # platforms. ++ "include/v8-wasm-trap-handler-posix.h", ++ "include/v8-wasm-trap-handler-win.h", ++ ] ++ ++ deps = [ ":v8_version" ] ++} ++ ++# This is split out to share basic headers with Torque. ++v8_header_set("v8_shared_internal_headers") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ configs = [ ":internal_config" ] ++ ++ sources = [ "src/common/globals.h" ] ++ ++ deps = [ ":v8_headers" ] ++} ++ ++v8_compiler_sources = [ ++ ### gcmole(all) ### ++ "src/compiler/access-builder.cc", ++ "src/compiler/access-builder.h", ++ "src/compiler/access-info.cc", ++ "src/compiler/access-info.h", ++ "src/compiler/add-type-assertions-reducer.cc", ++ "src/compiler/add-type-assertions-reducer.h", ++ "src/compiler/all-nodes.cc", ++ "src/compiler/all-nodes.h", ++ "src/compiler/allocation-builder-inl.h", ++ "src/compiler/allocation-builder.h", ++ "src/compiler/backend/code-generator-impl.h", ++ "src/compiler/backend/code-generator.cc", ++ "src/compiler/backend/code-generator.h", ++ "src/compiler/backend/frame-elider.cc", ++ "src/compiler/backend/frame-elider.h", ++ "src/compiler/backend/gap-resolver.cc", ++ "src/compiler/backend/gap-resolver.h", ++ "src/compiler/backend/instruction-codes.h", ++ "src/compiler/backend/instruction-scheduler.cc", ++ "src/compiler/backend/instruction-scheduler.h", ++ "src/compiler/backend/instruction-selector-impl.h", ++ "src/compiler/backend/instruction-selector.cc", ++ "src/compiler/backend/instruction-selector.h", ++ "src/compiler/backend/instruction.cc", ++ "src/compiler/backend/instruction.h", ++ "src/compiler/backend/jump-threading.cc", ++ "src/compiler/backend/jump-threading.h", ++ "src/compiler/backend/live-range-separator.cc", ++ "src/compiler/backend/live-range-separator.h", ++ "src/compiler/backend/move-optimizer.cc", ++ "src/compiler/backend/move-optimizer.h", ++ "src/compiler/backend/register-allocator-verifier.cc", ++ "src/compiler/backend/register-allocator-verifier.h", ++ "src/compiler/backend/register-allocator.cc", ++ "src/compiler/backend/register-allocator.h", ++ "src/compiler/backend/unwinding-info-writer.h", ++ "src/compiler/basic-block-instrumentor.cc", ++ "src/compiler/basic-block-instrumentor.h", ++ "src/compiler/branch-elimination.cc", ++ "src/compiler/branch-elimination.h", ++ "src/compiler/bytecode-analysis.cc", ++ "src/compiler/bytecode-analysis.h", ++ "src/compiler/bytecode-graph-builder.cc", ++ "src/compiler/bytecode-graph-builder.h", ++ "src/compiler/bytecode-liveness-map.cc", ++ "src/compiler/bytecode-liveness-map.h", ++ "src/compiler/c-linkage.cc", ++ "src/compiler/checkpoint-elimination.cc", ++ "src/compiler/checkpoint-elimination.h", ++ "src/compiler/code-assembler.cc", ++ "src/compiler/code-assembler.h", ++ "src/compiler/common-node-cache.cc", ++ "src/compiler/common-node-cache.h", ++ "src/compiler/common-operator-reducer.cc", ++ "src/compiler/common-operator-reducer.h", ++ "src/compiler/common-operator.cc", ++ "src/compiler/common-operator.h", ++ "src/compiler/compilation-dependencies.cc", ++ "src/compiler/compilation-dependencies.h", ++ "src/compiler/compiler-source-position-table.cc", ++ "src/compiler/compiler-source-position-table.h", ++ "src/compiler/constant-folding-reducer.cc", ++ "src/compiler/constant-folding-reducer.h", ++ "src/compiler/control-equivalence.cc", ++ "src/compiler/control-equivalence.h", ++ "src/compiler/control-flow-optimizer.cc", ++ "src/compiler/control-flow-optimizer.h", ++ "src/compiler/csa-load-elimination.cc", ++ "src/compiler/csa-load-elimination.h", ++ "src/compiler/dead-code-elimination.cc", ++ "src/compiler/dead-code-elimination.h", ++ "src/compiler/decompression-optimizer.cc", ++ "src/compiler/decompression-optimizer.h", ++ "src/compiler/diamond.h", ++ "src/compiler/effect-control-linearizer.cc", ++ "src/compiler/effect-control-linearizer.h", ++ "src/compiler/escape-analysis-reducer.cc", ++ "src/compiler/escape-analysis-reducer.h", ++ "src/compiler/escape-analysis.cc", ++ "src/compiler/escape-analysis.h", ++ "src/compiler/feedback-source.cc", ++ "src/compiler/feedback-source.h", ++ "src/compiler/frame-states.cc", ++ "src/compiler/frame-states.h", ++ "src/compiler/frame.cc", ++ "src/compiler/frame.h", ++ "src/compiler/functional-list.h", ++ "src/compiler/globals.h", ++ "src/compiler/graph-assembler.cc", ++ "src/compiler/graph-assembler.h", ++ "src/compiler/graph-reducer.cc", ++ "src/compiler/graph-reducer.h", ++ "src/compiler/graph-trimmer.cc", ++ "src/compiler/graph-trimmer.h", ++ "src/compiler/graph-visualizer.cc", ++ "src/compiler/graph-visualizer.h", ++ "src/compiler/graph.cc", ++ "src/compiler/graph.h", ++ "src/compiler/int64-lowering.cc", ++ "src/compiler/int64-lowering.h", ++ "src/compiler/js-call-reducer.cc", ++ "src/compiler/js-call-reducer.h", ++ "src/compiler/js-context-specialization.cc", ++ "src/compiler/js-context-specialization.h", ++ "src/compiler/js-create-lowering.cc", ++ "src/compiler/js-create-lowering.h", ++ "src/compiler/js-generic-lowering.cc", ++ "src/compiler/js-generic-lowering.h", ++ "src/compiler/js-graph.cc", ++ "src/compiler/js-graph.h", ++ "src/compiler/js-heap-broker.cc", ++ "src/compiler/js-heap-broker.h", ++ "src/compiler/js-heap-copy-reducer.cc", ++ "src/compiler/js-heap-copy-reducer.h", ++ "src/compiler/js-inlining-heuristic.cc", ++ "src/compiler/js-inlining-heuristic.h", ++ "src/compiler/js-inlining.cc", ++ "src/compiler/js-inlining.h", ++ "src/compiler/js-intrinsic-lowering.cc", ++ "src/compiler/js-intrinsic-lowering.h", ++ "src/compiler/js-native-context-specialization.cc", ++ "src/compiler/js-native-context-specialization.h", ++ "src/compiler/js-operator.cc", ++ "src/compiler/js-operator.h", ++ "src/compiler/js-type-hint-lowering.cc", ++ "src/compiler/js-type-hint-lowering.h", ++ "src/compiler/js-typed-lowering.cc", ++ "src/compiler/js-typed-lowering.h", ++ "src/compiler/linkage.cc", ++ "src/compiler/linkage.h", ++ "src/compiler/load-elimination.cc", ++ "src/compiler/load-elimination.h", ++ "src/compiler/loop-analysis.cc", ++ "src/compiler/loop-analysis.h", ++ "src/compiler/loop-peeling.cc", ++ "src/compiler/loop-peeling.h", ++ "src/compiler/loop-variable-optimizer.cc", ++ "src/compiler/loop-variable-optimizer.h", ++ "src/compiler/machine-graph-verifier.cc", ++ "src/compiler/machine-graph-verifier.h", ++ "src/compiler/machine-graph.cc", ++ "src/compiler/machine-graph.h", ++ "src/compiler/machine-operator-reducer.cc", ++ "src/compiler/machine-operator-reducer.h", ++ "src/compiler/machine-operator.cc", ++ "src/compiler/machine-operator.h", ++ "src/compiler/map-inference.cc", ++ "src/compiler/map-inference.h", ++ "src/compiler/memory-lowering.cc", ++ "src/compiler/memory-lowering.h", ++ "src/compiler/memory-optimizer.cc", ++ "src/compiler/memory-optimizer.h", ++ "src/compiler/node-aux-data.h", ++ "src/compiler/node-cache.h", ++ "src/compiler/node-marker.cc", ++ "src/compiler/node-marker.h", ++ "src/compiler/node-matchers.cc", ++ "src/compiler/node-matchers.h", ++ "src/compiler/node-origin-table.cc", ++ "src/compiler/node-origin-table.h", ++ "src/compiler/node-properties.cc", ++ "src/compiler/node-properties.h", ++ "src/compiler/node.cc", ++ "src/compiler/node.h", ++ "src/compiler/opcodes.cc", ++ "src/compiler/opcodes.h", ++ "src/compiler/operation-typer.cc", ++ "src/compiler/operation-typer.h", ++ "src/compiler/operator-properties.cc", ++ "src/compiler/operator-properties.h", ++ "src/compiler/operator.cc", ++ "src/compiler/operator.h", ++ "src/compiler/osr.cc", ++ "src/compiler/osr.h", ++ "src/compiler/per-isolate-compiler-cache.h", ++ "src/compiler/persistent-map.h", ++ "src/compiler/pipeline-statistics.cc", ++ "src/compiler/pipeline-statistics.h", ++ "src/compiler/pipeline.cc", ++ "src/compiler/pipeline.h", ++ "src/compiler/property-access-builder.cc", ++ "src/compiler/property-access-builder.h", ++ "src/compiler/raw-machine-assembler.cc", ++ "src/compiler/raw-machine-assembler.h", ++ "src/compiler/redundancy-elimination.cc", ++ "src/compiler/redundancy-elimination.h", ++ "src/compiler/refs-map.cc", ++ "src/compiler/refs-map.h", ++ "src/compiler/representation-change.cc", ++ "src/compiler/representation-change.h", ++ "src/compiler/schedule.cc", ++ "src/compiler/schedule.h", ++ "src/compiler/scheduled-machine-lowering.cc", ++ "src/compiler/scheduled-machine-lowering.h", ++ "src/compiler/scheduler.cc", ++ "src/compiler/scheduler.h", ++ "src/compiler/select-lowering.cc", ++ "src/compiler/select-lowering.h", ++ "src/compiler/serializer-for-background-compilation.cc", ++ "src/compiler/serializer-for-background-compilation.h", ++ "src/compiler/serializer-hints.h", ++ "src/compiler/simd-scalar-lowering.cc", ++ "src/compiler/simd-scalar-lowering.h", ++ "src/compiler/simplified-lowering.cc", ++ "src/compiler/simplified-lowering.h", ++ "src/compiler/simplified-operator-reducer.cc", ++ "src/compiler/simplified-operator-reducer.h", ++ "src/compiler/simplified-operator.cc", ++ "src/compiler/simplified-operator.h", ++ "src/compiler/state-values-utils.cc", ++ "src/compiler/state-values-utils.h", ++ "src/compiler/store-store-elimination.cc", ++ "src/compiler/store-store-elimination.h", ++ "src/compiler/type-cache.cc", ++ "src/compiler/type-cache.h", ++ "src/compiler/type-narrowing-reducer.cc", ++ "src/compiler/type-narrowing-reducer.h", ++ "src/compiler/typed-optimization.cc", ++ "src/compiler/typed-optimization.h", ++ "src/compiler/typer.cc", ++ "src/compiler/typer.h", ++ "src/compiler/types.cc", ++ "src/compiler/types.h", ++ "src/compiler/value-numbering-reducer.cc", ++ "src/compiler/value-numbering-reducer.h", ++ "src/compiler/verifier.cc", ++ "src/compiler/verifier.h", ++ "src/compiler/wasm-compiler.cc", ++ "src/compiler/wasm-compiler.h", ++ "src/compiler/write-barrier-kind.h", ++ "src/compiler/zone-stats.cc", ++ "src/compiler/zone-stats.h", ++] ++ ++# The src/compiler files with optimizations. ++v8_source_set("v8_compiler_opt") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = v8_compiler_sources ++ ++ public_deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ ":v8_maybe_icu", ++ ":v8_tracing", ++ ] ++ ++ if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) { ++ # The :no_optimize config is added to v8_add_configs in v8.gni. ++ remove_configs = [ "//build/config/compiler:no_optimize" ] ++ configs = [ ":always_optimize" ] ++ } else { ++ # Without this else branch, gn fails to generate build files for non-debug ++ # builds (because we try to remove a config that is not present). ++ # So we include it, even if this config is not used outside of debug builds. ++ configs = [ ":internal_config" ] ++ } ++} ++ ++# The src/compiler files with default optimization behavior. ++v8_source_set("v8_compiler") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = v8_compiler_sources ++ ++ public_deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ ":v8_maybe_icu", ++ ":v8_tracing", ++ ] ++ ++ configs = [ ":internal_config" ] ++} ++ ++group("v8_compiler_for_mksnapshot") { ++ if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) { ++ deps = [ ":v8_compiler_opt" ] ++ } else { ++ deps = [ ":v8_compiler" ] ++ } ++} ++ ++# Any target using trace events must directly or indirectly depend on ++# v8_tracing. ++group("v8_tracing") { ++ if (v8_use_perfetto) { ++ if (build_with_chromium) { ++ public_deps = [ "//third_party/perfetto:libperfetto" ] ++ } else { ++ public_deps = [ ":v8_libperfetto" ] ++ } ++ } ++} ++ ++v8_source_set("v8_base_without_compiler") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ # Split static libraries on windows into two. ++ split_count = 2 ++ ++ sources = [ ++ "//base/trace_event/common/trace_event_common.h", ++ ++ ### gcmole(all) ### ++ "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h", ++ "include/cppgc/common.h", ++ "include/v8-fast-api-calls.h", ++ "include/v8-inspector-protocol.h", ++ "include/v8-inspector.h", ++ "include/v8-internal.h", ++ "include/v8-platform.h", ++ "include/v8-profiler.h", ++ "include/v8-util.h", ++ "include/v8-wasm-trap-handler-posix.h", ++ "include/v8.h", ++ "include/v8config.h", ++ "src/api/api-arguments-inl.h", ++ "src/api/api-arguments.cc", ++ "src/api/api-arguments.h", ++ "src/api/api-natives.cc", ++ "src/api/api-natives.h", ++ "src/api/api.cc", ++ "src/api/api.h", ++ "src/asmjs/asm-js.cc", ++ "src/asmjs/asm-js.h", ++ "src/asmjs/asm-names.h", ++ "src/asmjs/asm-parser.cc", ++ "src/asmjs/asm-parser.h", ++ "src/asmjs/asm-scanner.cc", ++ "src/asmjs/asm-scanner.h", ++ "src/asmjs/asm-types.cc", ++ "src/asmjs/asm-types.h", ++ "src/ast/ast-function-literal-id-reindexer.cc", ++ "src/ast/ast-function-literal-id-reindexer.h", ++ "src/ast/ast-source-ranges.h", ++ "src/ast/ast-traversal-visitor.h", ++ "src/ast/ast-value-factory.cc", ++ "src/ast/ast-value-factory.h", ++ "src/ast/ast.cc", ++ "src/ast/ast.h", ++ "src/ast/modules.cc", ++ "src/ast/modules.h", ++ "src/ast/prettyprinter.cc", ++ "src/ast/prettyprinter.h", ++ "src/ast/scopes.cc", ++ "src/ast/scopes.h", ++ "src/ast/source-range-ast-visitor.cc", ++ "src/ast/source-range-ast-visitor.h", ++ "src/ast/variables.cc", ++ "src/ast/variables.h", ++ "src/builtins/accessors.cc", ++ "src/builtins/accessors.h", ++ "src/builtins/builtins-api.cc", ++ "src/builtins/builtins-array.cc", ++ "src/builtins/builtins-arraybuffer.cc", ++ "src/builtins/builtins-async-module.cc", ++ "src/builtins/builtins-bigint.cc", ++ "src/builtins/builtins-call.cc", ++ "src/builtins/builtins-callsite.cc", ++ "src/builtins/builtins-collections.cc", ++ "src/builtins/builtins-console.cc", ++ "src/builtins/builtins-constructor.h", ++ "src/builtins/builtins-dataview.cc", ++ "src/builtins/builtins-date.cc", ++ "src/builtins/builtins-definitions.h", ++ "src/builtins/builtins-descriptors.h", ++ "src/builtins/builtins-error.cc", ++ "src/builtins/builtins-function.cc", ++ "src/builtins/builtins-global.cc", ++ "src/builtins/builtins-internal.cc", ++ "src/builtins/builtins-intl.cc", ++ "src/builtins/builtins-json.cc", ++ "src/builtins/builtins-number.cc", ++ "src/builtins/builtins-object.cc", ++ "src/builtins/builtins-promise.h", ++ "src/builtins/builtins-reflect.cc", ++ "src/builtins/builtins-regexp.cc", ++ "src/builtins/builtins-sharedarraybuffer.cc", ++ "src/builtins/builtins-string.cc", ++ "src/builtins/builtins-symbol.cc", ++ "src/builtins/builtins-trace.cc", ++ "src/builtins/builtins-typed-array.cc", ++ "src/builtins/builtins-utils-inl.h", ++ "src/builtins/builtins-utils.h", ++ "src/builtins/builtins-weak-refs.cc", ++ "src/builtins/builtins.cc", ++ "src/builtins/builtins.h", ++ "src/builtins/constants-table-builder.cc", ++ "src/builtins/constants-table-builder.h", ++ "src/codegen/assembler-arch.h", ++ "src/codegen/assembler-inl.h", ++ "src/codegen/assembler.cc", ++ "src/codegen/assembler.h", ++ "src/codegen/bailout-reason.cc", ++ "src/codegen/bailout-reason.h", ++ "src/codegen/callable.h", ++ "src/codegen/code-comments.cc", ++ "src/codegen/code-comments.h", ++ "src/codegen/code-desc.cc", ++ "src/codegen/code-desc.h", ++ "src/codegen/code-factory.cc", ++ "src/codegen/code-factory.h", ++ "src/codegen/code-reference.cc", ++ "src/codegen/code-reference.h", ++ "src/codegen/compilation-cache.cc", ++ "src/codegen/compilation-cache.h", ++ "src/codegen/compiler.cc", ++ "src/codegen/compiler.h", ++ "src/codegen/constant-pool.cc", ++ "src/codegen/constant-pool.h", ++ "src/codegen/constants-arch.h", ++ "src/codegen/cpu-features.h", ++ "src/codegen/external-reference-encoder.cc", ++ "src/codegen/external-reference-encoder.h", ++ "src/codegen/external-reference-table.cc", ++ "src/codegen/external-reference-table.h", ++ "src/codegen/external-reference.cc", ++ "src/codegen/external-reference.h", ++ "src/codegen/flush-instruction-cache.cc", ++ "src/codegen/flush-instruction-cache.h", ++ "src/codegen/handler-table.cc", ++ "src/codegen/handler-table.h", ++ "src/codegen/interface-descriptors.cc", ++ "src/codegen/interface-descriptors.h", ++ "src/codegen/label.h", ++ "src/codegen/machine-type.cc", ++ "src/codegen/machine-type.h", ++ "src/codegen/macro-assembler-inl.h", ++ "src/codegen/macro-assembler.h", ++ "src/codegen/optimized-compilation-info.cc", ++ "src/codegen/optimized-compilation-info.h", ++ "src/codegen/pending-optimization-table.cc", ++ "src/codegen/pending-optimization-table.h", ++ "src/codegen/register-arch.h", ++ "src/codegen/register-configuration.cc", ++ "src/codegen/register-configuration.h", ++ "src/codegen/register.cc", ++ "src/codegen/register.h", ++ "src/codegen/reglist.h", ++ "src/codegen/reloc-info.cc", ++ "src/codegen/reloc-info.h", ++ "src/codegen/safepoint-table.cc", ++ "src/codegen/safepoint-table.h", ++ "src/codegen/signature.h", ++ "src/codegen/source-position-table.cc", ++ "src/codegen/source-position-table.h", ++ "src/codegen/source-position.cc", ++ "src/codegen/source-position.h", ++ "src/codegen/string-constants.cc", ++ "src/codegen/string-constants.h", ++ "src/codegen/tick-counter.cc", ++ "src/codegen/tick-counter.h", ++ "src/codegen/tnode.cc", ++ "src/codegen/tnode.h", ++ "src/codegen/turbo-assembler.cc", ++ "src/codegen/turbo-assembler.h", ++ "src/codegen/unoptimized-compilation-info.cc", ++ "src/codegen/unoptimized-compilation-info.h", ++ "src/common/assert-scope.cc", ++ "src/common/assert-scope.h", ++ "src/common/checks.h", ++ "src/common/external-pointer-inl.h", ++ "src/common/external-pointer.h", ++ "src/common/message-template.h", ++ "src/common/ptr-compr-inl.h", ++ "src/common/ptr-compr.h", ++ "src/compiler-dispatcher/compiler-dispatcher.cc", ++ "src/compiler-dispatcher/compiler-dispatcher.h", ++ "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", ++ "src/compiler-dispatcher/optimizing-compile-dispatcher.h", ++ "src/date/date.cc", ++ "src/date/date.h", ++ "src/date/dateparser-inl.h", ++ "src/date/dateparser.cc", ++ "src/date/dateparser.h", ++ "src/debug/debug-coverage.cc", ++ "src/debug/debug-coverage.h", ++ "src/debug/debug-evaluate.cc", ++ "src/debug/debug-evaluate.h", ++ "src/debug/debug-frames.cc", ++ "src/debug/debug-frames.h", ++ "src/debug/debug-interface.h", ++ "src/debug/debug-property-iterator.cc", ++ "src/debug/debug-property-iterator.h", ++ "src/debug/debug-scope-iterator.cc", ++ "src/debug/debug-scope-iterator.h", ++ "src/debug/debug-scopes.cc", ++ "src/debug/debug-scopes.h", ++ "src/debug/debug-stack-trace-iterator.cc", ++ "src/debug/debug-stack-trace-iterator.h", ++ "src/debug/debug-type-profile.cc", ++ "src/debug/debug-type-profile.h", ++ "src/debug/debug.cc", ++ "src/debug/debug.h", ++ "src/debug/interface-types.h", ++ "src/debug/liveedit.cc", ++ "src/debug/liveedit.h", ++ "src/deoptimizer/deoptimize-reason.cc", ++ "src/deoptimizer/deoptimize-reason.h", ++ "src/deoptimizer/deoptimizer.cc", ++ "src/deoptimizer/deoptimizer.h", ++ "src/diagnostics/basic-block-profiler.cc", ++ "src/diagnostics/basic-block-profiler.h", ++ "src/diagnostics/code-tracer.h", ++ "src/diagnostics/compilation-statistics.cc", ++ "src/diagnostics/compilation-statistics.h", ++ "src/diagnostics/disasm.h", ++ "src/diagnostics/disassembler.cc", ++ "src/diagnostics/disassembler.h", ++ "src/diagnostics/eh-frame.cc", ++ "src/diagnostics/eh-frame.h", ++ "src/diagnostics/gdb-jit.cc", ++ "src/diagnostics/gdb-jit.h", ++ "src/diagnostics/objects-debug.cc", ++ "src/diagnostics/objects-printer.cc", ++ "src/diagnostics/perf-jit.cc", ++ "src/diagnostics/perf-jit.h", ++ "src/diagnostics/unwinder.cc", ++ "src/execution/arguments-inl.h", ++ "src/execution/arguments.cc", ++ "src/execution/arguments.h", ++ "src/execution/execution.cc", ++ "src/execution/execution.h", ++ "src/execution/frame-constants.h", ++ "src/execution/frames-inl.h", ++ "src/execution/frames.cc", ++ "src/execution/frames.h", ++ "src/execution/futex-emulation.cc", ++ "src/execution/futex-emulation.h", ++ "src/execution/interrupts-scope.cc", ++ "src/execution/interrupts-scope.h", ++ "src/execution/isolate-data.h", ++ "src/execution/isolate-inl.h", ++ "src/execution/isolate-utils.h", ++ "src/execution/isolate.cc", ++ "src/execution/isolate.h", ++ "src/execution/messages.cc", ++ "src/execution/messages.h", ++ "src/execution/microtask-queue.cc", ++ "src/execution/microtask-queue.h", ++ "src/execution/off-thread-isolate-inl.h", ++ "src/execution/off-thread-isolate.cc", ++ "src/execution/off-thread-isolate.h", ++ "src/execution/pointer-authentication.h", ++ "src/execution/protectors-inl.h", ++ "src/execution/protectors.cc", ++ "src/execution/protectors.h", ++ "src/execution/runtime-profiler.cc", ++ "src/execution/runtime-profiler.h", ++ "src/execution/simulator-base.cc", ++ "src/execution/simulator-base.h", ++ "src/execution/simulator.h", ++ "src/execution/stack-guard.cc", ++ "src/execution/stack-guard.h", ++ "src/execution/thread-id.cc", ++ "src/execution/thread-id.h", ++ "src/execution/thread-local-top.cc", ++ "src/execution/thread-local-top.h", ++ "src/execution/v8threads.cc", ++ "src/execution/v8threads.h", ++ "src/execution/vm-state-inl.h", ++ "src/execution/vm-state.h", ++ "src/extensions/cputracemark-extension.cc", ++ "src/extensions/cputracemark-extension.h", ++ "src/extensions/externalize-string-extension.cc", ++ "src/extensions/externalize-string-extension.h", ++ "src/extensions/gc-extension.cc", ++ "src/extensions/gc-extension.h", ++ "src/extensions/ignition-statistics-extension.cc", ++ "src/extensions/ignition-statistics-extension.h", ++ "src/extensions/statistics-extension.cc", ++ "src/extensions/statistics-extension.h", ++ "src/extensions/trigger-failure-extension.cc", ++ "src/extensions/trigger-failure-extension.h", ++ "src/flags/flag-definitions.h", ++ "src/flags/flags.cc", ++ "src/flags/flags.h", ++ "src/handles/global-handles.cc", ++ "src/handles/global-handles.h", ++ "src/handles/handles-inl.h", ++ "src/handles/handles.cc", ++ "src/handles/handles.h", ++ "src/handles/local-handles-inl.h", ++ "src/handles/local-handles.cc", ++ "src/handles/local-handles.h", ++ "src/handles/maybe-handles-inl.h", ++ "src/handles/maybe-handles.h", ++ "src/handles/persistent-handles.cc", ++ "src/handles/persistent-handles.h", ++ "src/heap/array-buffer-collector.cc", ++ "src/heap/array-buffer-collector.h", ++ "src/heap/array-buffer-sweeper.cc", ++ "src/heap/array-buffer-sweeper.h", ++ "src/heap/array-buffer-tracker-inl.h", ++ "src/heap/array-buffer-tracker.cc", ++ "src/heap/array-buffer-tracker.h", ++ "src/heap/barrier.h", ++ "src/heap/basic-memory-chunk.cc", ++ "src/heap/basic-memory-chunk.h", ++ "src/heap/code-stats.cc", ++ "src/heap/code-stats.h", ++ "src/heap/combined-heap.cc", ++ "src/heap/combined-heap.h", ++ "src/heap/concurrent-allocator-inl.h", ++ "src/heap/concurrent-allocator.cc", ++ "src/heap/concurrent-allocator.h", ++ "src/heap/concurrent-marking.cc", ++ "src/heap/concurrent-marking.h", ++ "src/heap/embedder-tracing.cc", ++ "src/heap/embedder-tracing.h", ++ "src/heap/factory-base.cc", ++ "src/heap/factory-base.h", ++ "src/heap/factory-inl.h", ++ "src/heap/factory.cc", ++ "src/heap/factory.h", ++ "src/heap/finalization-registry-cleanup-task.cc", ++ "src/heap/finalization-registry-cleanup-task.h", ++ "src/heap/gc-idle-time-handler.cc", ++ "src/heap/gc-idle-time-handler.h", ++ "src/heap/gc-tracer.cc", ++ "src/heap/gc-tracer.h", ++ "src/heap/heap-controller.cc", ++ "src/heap/heap-controller.h", ++ "src/heap/heap-inl.h", ++ "src/heap/heap-write-barrier-inl.h", ++ "src/heap/heap-write-barrier.h", ++ "src/heap/heap.cc", ++ "src/heap/heap.h", ++ "src/heap/incremental-marking-inl.h", ++ "src/heap/incremental-marking-job.cc", ++ "src/heap/incremental-marking-job.h", ++ "src/heap/incremental-marking.cc", ++ "src/heap/incremental-marking.h", ++ "src/heap/invalidated-slots-inl.h", ++ "src/heap/invalidated-slots.cc", ++ "src/heap/invalidated-slots.h", ++ "src/heap/item-parallel-job.cc", ++ "src/heap/item-parallel-job.h", ++ "src/heap/large-spaces.cc", ++ "src/heap/large-spaces.h", ++ "src/heap/list.h", ++ "src/heap/local-allocator-inl.h", ++ "src/heap/local-allocator.h", ++ "src/heap/local-heap.cc", ++ "src/heap/local-heap.h", ++ "src/heap/mark-compact-inl.h", ++ "src/heap/mark-compact.cc", ++ "src/heap/mark-compact.h", ++ "src/heap/marking-visitor-inl.h", ++ "src/heap/marking-visitor.h", ++ "src/heap/marking-worklist.cc", ++ "src/heap/marking-worklist.h", ++ "src/heap/marking.cc", ++ "src/heap/marking.h", ++ "src/heap/memory-chunk-inl.h", ++ "src/heap/memory-chunk.cc", ++ "src/heap/memory-chunk.h", ++ "src/heap/memory-measurement-inl.h", ++ "src/heap/memory-measurement.cc", ++ "src/heap/memory-measurement.h", ++ "src/heap/memory-reducer.cc", ++ "src/heap/memory-reducer.h", ++ "src/heap/object-stats.cc", ++ "src/heap/object-stats.h", ++ "src/heap/objects-visiting-inl.h", ++ "src/heap/objects-visiting.cc", ++ "src/heap/objects-visiting.h", ++ "src/heap/off-thread-factory.cc", ++ "src/heap/off-thread-factory.h", ++ "src/heap/off-thread-heap.cc", ++ "src/heap/off-thread-heap.h", ++ "src/heap/read-only-heap-inl.h", ++ "src/heap/read-only-heap.cc", ++ "src/heap/read-only-heap.h", ++ "src/heap/read-only-spaces.cc", ++ "src/heap/read-only-spaces.h", ++ "src/heap/remembered-set.h", ++ "src/heap/safepoint.cc", ++ "src/heap/safepoint.h", ++ "src/heap/scavenge-job.cc", ++ "src/heap/scavenge-job.h", ++ "src/heap/scavenger-inl.h", ++ "src/heap/scavenger.cc", ++ "src/heap/scavenger.h", ++ "src/heap/slot-set.cc", ++ "src/heap/slot-set.h", ++ "src/heap/spaces-inl.h", ++ "src/heap/spaces.cc", ++ "src/heap/spaces.h", ++ "src/heap/stress-marking-observer.cc", ++ "src/heap/stress-marking-observer.h", ++ "src/heap/stress-scavenge-observer.cc", ++ "src/heap/stress-scavenge-observer.h", ++ "src/heap/sweeper.cc", ++ "src/heap/sweeper.h", ++ "src/heap/worklist.h", ++ "src/ic/call-optimization.cc", ++ "src/ic/call-optimization.h", ++ "src/ic/handler-configuration-inl.h", ++ "src/ic/handler-configuration.cc", ++ "src/ic/handler-configuration.h", ++ "src/ic/ic-inl.h", ++ "src/ic/ic-stats.cc", ++ "src/ic/ic-stats.h", ++ "src/ic/ic.cc", ++ "src/ic/ic.h", ++ "src/ic/stub-cache.cc", ++ "src/ic/stub-cache.h", ++ "src/init/bootstrapper.cc", ++ "src/init/bootstrapper.h", ++ "src/init/heap-symbols.h", ++ "src/init/icu_util.cc", ++ "src/init/icu_util.h", ++ "src/init/isolate-allocator.cc", ++ "src/init/isolate-allocator.h", ++ "src/init/setup-isolate.h", ++ "src/init/startup-data-util.cc", ++ "src/init/startup-data-util.h", ++ "src/init/v8.cc", ++ "src/init/v8.h", ++ "src/interpreter/block-coverage-builder.h", ++ "src/interpreter/bytecode-array-accessor.cc", ++ "src/interpreter/bytecode-array-accessor.h", ++ "src/interpreter/bytecode-array-builder.cc", ++ "src/interpreter/bytecode-array-builder.h", ++ "src/interpreter/bytecode-array-iterator.cc", ++ "src/interpreter/bytecode-array-iterator.h", ++ "src/interpreter/bytecode-array-random-iterator.cc", ++ "src/interpreter/bytecode-array-random-iterator.h", ++ "src/interpreter/bytecode-array-writer.cc", ++ "src/interpreter/bytecode-array-writer.h", ++ "src/interpreter/bytecode-decoder.cc", ++ "src/interpreter/bytecode-decoder.h", ++ "src/interpreter/bytecode-flags.cc", ++ "src/interpreter/bytecode-flags.h", ++ "src/interpreter/bytecode-generator.cc", ++ "src/interpreter/bytecode-generator.h", ++ "src/interpreter/bytecode-jump-table.h", ++ "src/interpreter/bytecode-label.cc", ++ "src/interpreter/bytecode-label.h", ++ "src/interpreter/bytecode-node.cc", ++ "src/interpreter/bytecode-node.h", ++ "src/interpreter/bytecode-operands.cc", ++ "src/interpreter/bytecode-operands.h", ++ "src/interpreter/bytecode-register-allocator.h", ++ "src/interpreter/bytecode-register-optimizer.cc", ++ "src/interpreter/bytecode-register-optimizer.h", ++ "src/interpreter/bytecode-register.cc", ++ "src/interpreter/bytecode-register.h", ++ "src/interpreter/bytecode-source-info.cc", ++ "src/interpreter/bytecode-source-info.h", ++ "src/interpreter/bytecode-traits.h", ++ "src/interpreter/bytecodes.cc", ++ "src/interpreter/bytecodes.h", ++ "src/interpreter/constant-array-builder.cc", ++ "src/interpreter/constant-array-builder.h", ++ "src/interpreter/control-flow-builders.cc", ++ "src/interpreter/control-flow-builders.h", ++ "src/interpreter/handler-table-builder.cc", ++ "src/interpreter/handler-table-builder.h", ++ "src/interpreter/interpreter-generator.h", ++ "src/interpreter/interpreter-intrinsics.cc", ++ "src/interpreter/interpreter-intrinsics.h", ++ "src/interpreter/interpreter.cc", ++ "src/interpreter/interpreter.h", ++ "src/json/json-parser.cc", ++ "src/json/json-parser.h", ++ "src/json/json-stringifier.cc", ++ "src/json/json-stringifier.h", ++ "src/logging/code-events.h", ++ "src/logging/counters-definitions.h", ++ "src/logging/counters-inl.h", ++ "src/logging/counters.cc", ++ "src/logging/counters.h", ++ "src/logging/log-inl.h", ++ "src/logging/log-utils.cc", ++ "src/logging/log-utils.h", ++ "src/logging/log.cc", ++ "src/logging/log.h", ++ "src/logging/off-thread-logger.h", ++ "src/numbers/bignum-dtoa.cc", ++ "src/numbers/bignum-dtoa.h", ++ "src/numbers/bignum.cc", ++ "src/numbers/bignum.h", ++ "src/numbers/cached-powers.cc", ++ "src/numbers/cached-powers.h", ++ "src/numbers/conversions-inl.h", ++ "src/numbers/conversions.cc", ++ "src/numbers/conversions.h", ++ "src/numbers/diy-fp.cc", ++ "src/numbers/diy-fp.h", ++ "src/numbers/double.h", ++ "src/numbers/dtoa.cc", ++ "src/numbers/dtoa.h", ++ "src/numbers/fast-dtoa.cc", ++ "src/numbers/fast-dtoa.h", ++ "src/numbers/fixed-dtoa.cc", ++ "src/numbers/fixed-dtoa.h", ++ "src/numbers/hash-seed-inl.h", ++ "src/numbers/math-random.cc", ++ "src/numbers/math-random.h", ++ "src/numbers/strtod.cc", ++ "src/numbers/strtod.h", ++ "src/objects/allocation-site-inl.h", ++ "src/objects/allocation-site-scopes-inl.h", ++ "src/objects/allocation-site-scopes.h", ++ "src/objects/allocation-site.h", ++ "src/objects/api-callbacks-inl.h", ++ "src/objects/api-callbacks.h", ++ "src/objects/arguments-inl.h", ++ "src/objects/arguments.h", ++ "src/objects/backing-store.cc", ++ "src/objects/backing-store.h", ++ "src/objects/bigint.cc", ++ "src/objects/bigint.h", ++ "src/objects/cell-inl.h", ++ "src/objects/cell.h", ++ "src/objects/code-inl.h", ++ "src/objects/code.cc", ++ "src/objects/code.h", ++ "src/objects/compilation-cache-inl.h", ++ "src/objects/compilation-cache.h", ++ "src/objects/compressed-slots-inl.h", ++ "src/objects/compressed-slots.h", ++ "src/objects/contexts-inl.h", ++ "src/objects/contexts.cc", ++ "src/objects/contexts.h", ++ "src/objects/data-handler.h", ++ "src/objects/debug-objects-inl.h", ++ "src/objects/debug-objects.cc", ++ "src/objects/debug-objects.h", ++ "src/objects/descriptor-array-inl.h", ++ "src/objects/descriptor-array.h", ++ "src/objects/dictionary-inl.h", ++ "src/objects/dictionary.h", ++ "src/objects/elements-inl.h", ++ "src/objects/elements-kind.cc", ++ "src/objects/elements-kind.h", ++ "src/objects/elements.cc", ++ "src/objects/elements.h", ++ "src/objects/embedder-data-array-inl.h", ++ "src/objects/embedder-data-array.cc", ++ "src/objects/embedder-data-array.h", ++ "src/objects/embedder-data-slot-inl.h", ++ "src/objects/embedder-data-slot.h", ++ "src/objects/feedback-cell-inl.h", ++ "src/objects/feedback-cell.h", ++ "src/objects/feedback-vector-inl.h", ++ "src/objects/feedback-vector.cc", ++ "src/objects/feedback-vector.h", ++ "src/objects/field-index-inl.h", ++ "src/objects/field-index.h", ++ "src/objects/field-type.cc", ++ "src/objects/field-type.h", ++ "src/objects/fixed-array-inl.h", ++ "src/objects/fixed-array.h", ++ "src/objects/frame-array-inl.h", ++ "src/objects/frame-array.h", ++ "src/objects/function-kind.h", ++ "src/objects/hash-table-inl.h", ++ "src/objects/hash-table.h", ++ "src/objects/heap-number-inl.h", ++ "src/objects/heap-number.h", ++ "src/objects/heap-object-inl.h", ++ "src/objects/heap-object.h", ++ "src/objects/instance-type-inl.h", ++ "src/objects/instance-type.h", ++ "src/objects/internal-index.h", ++ "src/objects/intl-objects.cc", ++ "src/objects/intl-objects.h", ++ "src/objects/js-aggregate-error-inl.h", ++ "src/objects/js-aggregate-error.h", ++ "src/objects/js-array-buffer-inl.h", ++ "src/objects/js-array-buffer.cc", ++ "src/objects/js-array-buffer.h", ++ "src/objects/js-array-inl.h", ++ "src/objects/js-array.h", ++ "src/objects/js-break-iterator-inl.h", ++ "src/objects/js-break-iterator.cc", ++ "src/objects/js-break-iterator.h", ++ "src/objects/js-collator-inl.h", ++ "src/objects/js-collator.cc", ++ "src/objects/js-collator.h", ++ "src/objects/js-collection-inl.h", ++ "src/objects/js-collection-iterator.h", ++ "src/objects/js-collection.h", ++ "src/objects/js-date-time-format-inl.h", ++ "src/objects/js-date-time-format.cc", ++ "src/objects/js-date-time-format.h", ++ "src/objects/js-display-names-inl.h", ++ "src/objects/js-display-names.cc", ++ "src/objects/js-display-names.h", ++ "src/objects/js-generator-inl.h", ++ "src/objects/js-generator.h", ++ "src/objects/js-list-format-inl.h", ++ "src/objects/js-list-format.cc", ++ "src/objects/js-list-format.h", ++ "src/objects/js-locale-inl.h", ++ "src/objects/js-locale.cc", ++ "src/objects/js-locale.h", ++ "src/objects/js-number-format-inl.h", ++ "src/objects/js-number-format.cc", ++ "src/objects/js-number-format.h", ++ "src/objects/js-objects-inl.h", ++ "src/objects/js-objects.cc", ++ "src/objects/js-objects.h", ++ "src/objects/js-plural-rules-inl.h", ++ "src/objects/js-plural-rules.cc", ++ "src/objects/js-plural-rules.h", ++ "src/objects/js-promise-inl.h", ++ "src/objects/js-promise.h", ++ "src/objects/js-proxy-inl.h", ++ "src/objects/js-proxy.h", ++ "src/objects/js-regexp-inl.h", ++ "src/objects/js-regexp-string-iterator-inl.h", ++ "src/objects/js-regexp-string-iterator.h", ++ "src/objects/js-regexp.cc", ++ "src/objects/js-regexp.h", ++ "src/objects/js-relative-time-format-inl.h", ++ "src/objects/js-relative-time-format.cc", ++ "src/objects/js-relative-time-format.h", ++ "src/objects/js-segment-iterator-inl.h", ++ "src/objects/js-segment-iterator.cc", ++ "src/objects/js-segment-iterator.h", ++ "src/objects/js-segmenter-inl.h", ++ "src/objects/js-segmenter.cc", ++ "src/objects/js-segmenter.h", ++ "src/objects/js-weak-refs-inl.h", ++ "src/objects/js-weak-refs.h", ++ "src/objects/keys.cc", ++ "src/objects/keys.h", ++ "src/objects/layout-descriptor-inl.h", ++ "src/objects/layout-descriptor.cc", ++ "src/objects/layout-descriptor.h", ++ "src/objects/literal-objects-inl.h", ++ "src/objects/literal-objects.cc", ++ "src/objects/literal-objects.h", ++ "src/objects/lookup-cache-inl.h", ++ "src/objects/lookup-cache.cc", ++ "src/objects/lookup-cache.h", ++ "src/objects/lookup-inl.h", ++ "src/objects/lookup.cc", ++ "src/objects/lookup.h", ++ "src/objects/managed.cc", ++ "src/objects/managed.h", ++ "src/objects/map-inl.h", ++ "src/objects/map-updater.cc", ++ "src/objects/map-updater.h", ++ "src/objects/map.cc", ++ "src/objects/map.h", ++ "src/objects/maybe-object-inl.h", ++ "src/objects/maybe-object.h", ++ "src/objects/microtask-inl.h", ++ "src/objects/microtask.h", ++ "src/objects/module-inl.h", ++ "src/objects/module.cc", ++ "src/objects/module.h", ++ "src/objects/name-inl.h", ++ "src/objects/name.h", ++ "src/objects/object-list-macros.h", ++ "src/objects/object-macros-undef.h", ++ "src/objects/object-macros.h", ++ "src/objects/objects-body-descriptors-inl.h", ++ "src/objects/objects-body-descriptors.h", ++ "src/objects/objects-inl.h", ++ "src/objects/objects.cc", ++ "src/objects/objects.h", ++ "src/objects/oddball-inl.h", ++ "src/objects/oddball.h", ++ "src/objects/ordered-hash-table-inl.h", ++ "src/objects/ordered-hash-table.cc", ++ "src/objects/ordered-hash-table.h", ++ "src/objects/osr-optimized-code-cache-inl.h", ++ "src/objects/osr-optimized-code-cache.cc", ++ "src/objects/osr-optimized-code-cache.h", ++ "src/objects/primitive-heap-object-inl.h", ++ "src/objects/primitive-heap-object.h", ++ "src/objects/promise-inl.h", ++ "src/objects/promise.h", ++ "src/objects/property-array-inl.h", ++ "src/objects/property-array.h", ++ "src/objects/property-cell-inl.h", ++ "src/objects/property-cell.h", ++ "src/objects/property-descriptor-object-inl.h", ++ "src/objects/property-descriptor-object.h", ++ "src/objects/property-descriptor.cc", ++ "src/objects/property-descriptor.h", ++ "src/objects/property-details.h", ++ "src/objects/property.cc", ++ "src/objects/property.h", ++ "src/objects/prototype-info-inl.h", ++ "src/objects/prototype-info.h", ++ "src/objects/prototype.h", ++ "src/objects/regexp-match-info.h", ++ "src/objects/scope-info.cc", ++ "src/objects/scope-info.h", ++ "src/objects/script-inl.h", ++ "src/objects/script.h", ++ "src/objects/shared-function-info-inl.h", ++ "src/objects/shared-function-info.h", ++ "src/objects/slots-atomic-inl.h", ++ "src/objects/slots-inl.h", ++ "src/objects/slots.h", ++ "src/objects/source-text-module.cc", ++ "src/objects/source-text-module.h", ++ "src/objects/stack-frame-info-inl.h", ++ "src/objects/stack-frame-info.cc", ++ "src/objects/stack-frame-info.h", ++ "src/objects/string-comparator.cc", ++ "src/objects/string-comparator.h", ++ "src/objects/string-inl.h", ++ "src/objects/string-table-inl.h", ++ "src/objects/string-table.h", ++ "src/objects/string.cc", ++ "src/objects/string.h", ++ "src/objects/struct-inl.h", ++ "src/objects/struct.h", ++ "src/objects/synthetic-module.cc", ++ "src/objects/synthetic-module.h", ++ "src/objects/tagged-field-inl.h", ++ "src/objects/tagged-field.h", ++ "src/objects/tagged-impl-inl.h", ++ "src/objects/tagged-impl.cc", ++ "src/objects/tagged-impl.h", ++ "src/objects/tagged-index.h", ++ "src/objects/tagged-value-inl.h", ++ "src/objects/tagged-value.h", ++ "src/objects/template-objects-inl.h", ++ "src/objects/template-objects.cc", ++ "src/objects/template-objects.h", ++ "src/objects/templates-inl.h", ++ "src/objects/templates.h", ++ "src/objects/transitions-inl.h", ++ "src/objects/transitions.cc", ++ "src/objects/transitions.h", ++ "src/objects/type-hints.cc", ++ "src/objects/type-hints.h", ++ "src/objects/value-serializer.cc", ++ "src/objects/value-serializer.h", ++ "src/objects/visitors.cc", ++ "src/objects/visitors.h", ++ "src/parsing/expression-scope.h", ++ "src/parsing/func-name-inferrer.cc", ++ "src/parsing/func-name-inferrer.h", ++ "src/parsing/literal-buffer.cc", ++ "src/parsing/literal-buffer.h", ++ "src/parsing/parse-info.cc", ++ "src/parsing/parse-info.h", ++ "src/parsing/parser-base.h", ++ "src/parsing/parser.cc", ++ "src/parsing/parser.h", ++ "src/parsing/parsing.cc", ++ "src/parsing/parsing.h", ++ "src/parsing/pending-compilation-error-handler.cc", ++ "src/parsing/pending-compilation-error-handler.h", ++ "src/parsing/preparse-data-impl.h", ++ "src/parsing/preparse-data.cc", ++ "src/parsing/preparse-data.h", ++ "src/parsing/preparser-logger.h", ++ "src/parsing/preparser.cc", ++ "src/parsing/preparser.h", ++ "src/parsing/rewriter.cc", ++ "src/parsing/rewriter.h", ++ "src/parsing/scanner-character-streams.cc", ++ "src/parsing/scanner-character-streams.h", ++ "src/parsing/scanner.cc", ++ "src/parsing/scanner.h", ++ "src/parsing/token.cc", ++ "src/parsing/token.h", ++ "src/profiler/allocation-tracker.cc", ++ "src/profiler/allocation-tracker.h", ++ "src/profiler/circular-queue-inl.h", ++ "src/profiler/circular-queue.h", ++ "src/profiler/cpu-profiler-inl.h", ++ "src/profiler/cpu-profiler.cc", ++ "src/profiler/cpu-profiler.h", ++ "src/profiler/heap-profiler.cc", ++ "src/profiler/heap-profiler.h", ++ "src/profiler/heap-snapshot-generator-inl.h", ++ "src/profiler/heap-snapshot-generator.cc", ++ "src/profiler/heap-snapshot-generator.h", ++ "src/profiler/profile-generator-inl.h", ++ "src/profiler/profile-generator.cc", ++ "src/profiler/profile-generator.h", ++ "src/profiler/profiler-listener.cc", ++ "src/profiler/profiler-listener.h", ++ "src/profiler/sampling-heap-profiler.cc", ++ "src/profiler/sampling-heap-profiler.h", ++ "src/profiler/strings-storage.cc", ++ "src/profiler/strings-storage.h", ++ "src/profiler/tick-sample.cc", ++ "src/profiler/tick-sample.h", ++ "src/profiler/tracing-cpu-profiler.cc", ++ "src/profiler/tracing-cpu-profiler.h", ++ "src/regexp/property-sequences.cc", ++ "src/regexp/property-sequences.h", ++ "src/regexp/regexp-ast.cc", ++ "src/regexp/regexp-ast.h", ++ "src/regexp/regexp-bytecode-generator-inl.h", ++ "src/regexp/regexp-bytecode-generator.cc", ++ "src/regexp/regexp-bytecode-generator.h", ++ "src/regexp/regexp-bytecode-peephole.cc", ++ "src/regexp/regexp-bytecode-peephole.h", ++ "src/regexp/regexp-bytecodes.cc", ++ "src/regexp/regexp-bytecodes.h", ++ "src/regexp/regexp-compiler-tonode.cc", ++ "src/regexp/regexp-compiler.cc", ++ "src/regexp/regexp-compiler.h", ++ "src/regexp/regexp-dotprinter.cc", ++ "src/regexp/regexp-dotprinter.h", ++ "src/regexp/regexp-error.cc", ++ "src/regexp/regexp-error.h", ++ "src/regexp/regexp-interpreter.cc", ++ "src/regexp/regexp-interpreter.h", ++ "src/regexp/regexp-macro-assembler-arch.h", ++ "src/regexp/regexp-macro-assembler-tracer.cc", ++ "src/regexp/regexp-macro-assembler-tracer.h", ++ "src/regexp/regexp-macro-assembler.cc", ++ "src/regexp/regexp-macro-assembler.h", ++ "src/regexp/regexp-nodes.h", ++ "src/regexp/regexp-parser.cc", ++ "src/regexp/regexp-parser.h", ++ "src/regexp/regexp-stack.cc", ++ "src/regexp/regexp-stack.h", ++ "src/regexp/regexp-utils.cc", ++ "src/regexp/regexp-utils.h", ++ "src/regexp/regexp.cc", ++ "src/regexp/regexp.h", ++ "src/regexp/special-case.h", ++ "src/roots/roots-inl.h", ++ "src/roots/roots.cc", ++ "src/roots/roots.h", ++ "src/runtime/runtime-array.cc", ++ "src/runtime/runtime-atomics.cc", ++ "src/runtime/runtime-bigint.cc", ++ "src/runtime/runtime-classes.cc", ++ "src/runtime/runtime-collections.cc", ++ "src/runtime/runtime-compiler.cc", ++ "src/runtime/runtime-date.cc", ++ "src/runtime/runtime-debug.cc", ++ "src/runtime/runtime-forin.cc", ++ "src/runtime/runtime-function.cc", ++ "src/runtime/runtime-futex.cc", ++ "src/runtime/runtime-generator.cc", ++ "src/runtime/runtime-internal.cc", ++ "src/runtime/runtime-interpreter.cc", ++ "src/runtime/runtime-intl.cc", ++ "src/runtime/runtime-literals.cc", ++ "src/runtime/runtime-module.cc", ++ "src/runtime/runtime-numbers.cc", ++ "src/runtime/runtime-object.cc", ++ "src/runtime/runtime-operators.cc", ++ "src/runtime/runtime-promise.cc", ++ "src/runtime/runtime-proxy.cc", ++ "src/runtime/runtime-regexp.cc", ++ "src/runtime/runtime-scopes.cc", ++ "src/runtime/runtime-strings.cc", ++ "src/runtime/runtime-symbol.cc", ++ "src/runtime/runtime-test.cc", ++ "src/runtime/runtime-typedarray.cc", ++ "src/runtime/runtime-utils.h", ++ "src/runtime/runtime-wasm.cc", ++ "src/runtime/runtime-weak-refs.cc", ++ "src/runtime/runtime.cc", ++ "src/runtime/runtime.h", ++ "src/sanitizer/asan.h", ++ "src/sanitizer/lsan-page-allocator.cc", ++ "src/sanitizer/lsan-page-allocator.h", ++ "src/sanitizer/msan.h", ++ "src/sanitizer/tsan.h", ++ "src/snapshot/code-serializer.cc", ++ "src/snapshot/code-serializer.h", ++ "src/snapshot/context-deserializer.cc", ++ "src/snapshot/context-deserializer.h", ++ "src/snapshot/context-serializer.cc", ++ "src/snapshot/context-serializer.h", ++ "src/snapshot/deserializer-allocator.cc", ++ "src/snapshot/deserializer-allocator.h", ++ "src/snapshot/deserializer.cc", ++ "src/snapshot/deserializer.h", ++ "src/snapshot/embedded/embedded-data.cc", ++ "src/snapshot/embedded/embedded-data.h", ++ "src/snapshot/object-deserializer.cc", ++ "src/snapshot/object-deserializer.h", ++ "src/snapshot/read-only-deserializer.cc", ++ "src/snapshot/read-only-deserializer.h", ++ "src/snapshot/read-only-serializer.cc", ++ "src/snapshot/read-only-serializer.h", ++ "src/snapshot/references.h", ++ "src/snapshot/roots-serializer.cc", ++ "src/snapshot/roots-serializer.h", ++ "src/snapshot/serializer-allocator.cc", ++ "src/snapshot/serializer-allocator.h", ++ "src/snapshot/serializer-deserializer.cc", ++ "src/snapshot/serializer-deserializer.h", ++ "src/snapshot/serializer.cc", ++ "src/snapshot/serializer.h", ++ "src/snapshot/snapshot-compression.cc", ++ "src/snapshot/snapshot-compression.h", ++ "src/snapshot/snapshot-data.cc", ++ "src/snapshot/snapshot-data.h", ++ "src/snapshot/snapshot-source-sink.cc", ++ "src/snapshot/snapshot-source-sink.h", ++ "src/snapshot/snapshot-utils.cc", ++ "src/snapshot/snapshot-utils.h", ++ "src/snapshot/snapshot.cc", ++ "src/snapshot/snapshot.h", ++ "src/snapshot/startup-deserializer.cc", ++ "src/snapshot/startup-deserializer.h", ++ "src/snapshot/startup-serializer.cc", ++ "src/snapshot/startup-serializer.h", ++ "src/strings/char-predicates-inl.h", ++ "src/strings/char-predicates.cc", ++ "src/strings/char-predicates.h", ++ "src/strings/string-builder-inl.h", ++ "src/strings/string-builder.cc", ++ "src/strings/string-case.cc", ++ "src/strings/string-case.h", ++ "src/strings/string-hasher-inl.h", ++ "src/strings/string-hasher.h", ++ "src/strings/string-search.h", ++ "src/strings/string-stream.cc", ++ "src/strings/string-stream.h", ++ "src/strings/unicode-decoder.cc", ++ "src/strings/unicode-decoder.h", ++ "src/strings/unicode-inl.h", ++ "src/strings/unicode.cc", ++ "src/strings/unicode.h", ++ "src/strings/uri.cc", ++ "src/strings/uri.h", ++ "src/tasks/cancelable-task.cc", ++ "src/tasks/cancelable-task.h", ++ "src/tasks/task-utils.cc", ++ "src/tasks/task-utils.h", ++ "src/third_party/siphash/halfsiphash.cc", ++ "src/third_party/siphash/halfsiphash.h", ++ "src/third_party/utf8-decoder/utf8-decoder.h", ++ "src/tracing/trace-event.cc", ++ "src/tracing/trace-event.h", ++ "src/tracing/traced-value.cc", ++ "src/tracing/traced-value.h", ++ "src/tracing/tracing-category-observer.cc", ++ "src/tracing/tracing-category-observer.h", ++ "src/trap-handler/handler-inside.cc", ++ "src/trap-handler/handler-outside.cc", ++ "src/trap-handler/handler-shared.cc", ++ "src/trap-handler/trap-handler-internal.h", ++ "src/trap-handler/trap-handler.h", ++ "src/utils/address-map.cc", ++ "src/utils/address-map.h", ++ "src/utils/allocation.cc", ++ "src/utils/allocation.h", ++ "src/utils/bit-vector.cc", ++ "src/utils/bit-vector.h", ++ "src/utils/boxed-float.h", ++ "src/utils/detachable-vector.cc", ++ "src/utils/detachable-vector.h", ++ "src/utils/identity-map.cc", ++ "src/utils/identity-map.h", ++ "src/utils/locked-queue-inl.h", ++ "src/utils/locked-queue.h", ++ "src/utils/memcopy.cc", ++ "src/utils/memcopy.h", ++ "src/utils/ostreams.cc", ++ "src/utils/ostreams.h", ++ "src/utils/pointer-with-payload.h", ++ "src/utils/utils-inl.h", ++ "src/utils/utils.cc", ++ "src/utils/utils.h", ++ "src/utils/vector.h", ++ "src/utils/version.cc", ++ "src/utils/version.h", ++ "src/wasm/baseline/liftoff-assembler-defs.h", ++ "src/wasm/baseline/liftoff-assembler.cc", ++ "src/wasm/baseline/liftoff-assembler.h", ++ "src/wasm/baseline/liftoff-compiler.cc", ++ "src/wasm/baseline/liftoff-compiler.h", ++ "src/wasm/baseline/liftoff-register.h", ++ "src/wasm/code-space-access.h", ++ "src/wasm/compilation-environment.h", ++ "src/wasm/decoder.h", ++ "src/wasm/function-body-decoder-impl.h", ++ "src/wasm/function-body-decoder.cc", ++ "src/wasm/function-body-decoder.h", ++ "src/wasm/function-compiler.cc", ++ "src/wasm/function-compiler.h", ++ "src/wasm/graph-builder-interface.cc", ++ "src/wasm/graph-builder-interface.h", ++ "src/wasm/jump-table-assembler.cc", ++ "src/wasm/jump-table-assembler.h", ++ "src/wasm/leb-helper.h", ++ "src/wasm/local-decl-encoder.cc", ++ "src/wasm/local-decl-encoder.h", ++ "src/wasm/memory-tracing.cc", ++ "src/wasm/memory-tracing.h", ++ "src/wasm/module-compiler.cc", ++ "src/wasm/module-compiler.h", ++ "src/wasm/module-decoder.cc", ++ "src/wasm/module-decoder.h", ++ "src/wasm/module-instantiate.cc", ++ "src/wasm/module-instantiate.h", ++ "src/wasm/object-access.h", ++ "src/wasm/signature-map.cc", ++ "src/wasm/signature-map.h", ++ "src/wasm/streaming-decoder.cc", ++ "src/wasm/streaming-decoder.h", ++ "src/wasm/struct-types.h", ++ "src/wasm/value-type.h", ++ "src/wasm/wasm-arguments.h", ++ "src/wasm/wasm-code-manager.cc", ++ "src/wasm/wasm-code-manager.h", ++ "src/wasm/wasm-constants.h", ++ "src/wasm/wasm-debug-evaluate.cc", ++ "src/wasm/wasm-debug-evaluate.h", ++ "src/wasm/wasm-debug.cc", ++ "src/wasm/wasm-engine.cc", ++ "src/wasm/wasm-engine.h", ++ "src/wasm/wasm-external-refs.cc", ++ "src/wasm/wasm-external-refs.h", ++ "src/wasm/wasm-feature-flags.h", ++ "src/wasm/wasm-features.cc", ++ "src/wasm/wasm-features.h", ++ "src/wasm/wasm-import-wrapper-cache.cc", ++ "src/wasm/wasm-import-wrapper-cache.h", ++ "src/wasm/wasm-interpreter.cc", ++ "src/wasm/wasm-interpreter.h", ++ "src/wasm/wasm-js.cc", ++ "src/wasm/wasm-js.h", ++ "src/wasm/wasm-limits.h", ++ "src/wasm/wasm-linkage.h", ++ "src/wasm/wasm-module-builder.cc", ++ "src/wasm/wasm-module-builder.h", ++ "src/wasm/wasm-module-sourcemap.cc", ++ "src/wasm/wasm-module-sourcemap.h", ++ "src/wasm/wasm-module.cc", ++ "src/wasm/wasm-module.h", ++ "src/wasm/wasm-objects-inl.h", ++ "src/wasm/wasm-objects.cc", ++ "src/wasm/wasm-objects.h", ++ "src/wasm/wasm-opcodes.cc", ++ "src/wasm/wasm-opcodes.h", ++ "src/wasm/wasm-result.cc", ++ "src/wasm/wasm-result.h", ++ "src/wasm/wasm-serialization.cc", ++ "src/wasm/wasm-serialization.h", ++ "src/wasm/wasm-tier.h", ++ "src/wasm/wasm-value.h", ++ "src/zone/accounting-allocator.cc", ++ "src/zone/accounting-allocator.h", ++ "src/zone/zone-allocator.h", ++ "src/zone/zone-chunk-list.h", ++ "src/zone/zone-containers.h", ++ "src/zone/zone-handle-set.h", ++ "src/zone/zone-list-inl.h", ++ "src/zone/zone-segment.cc", ++ "src/zone/zone-segment.h", ++ "src/zone/zone.cc", ++ "src/zone/zone.h", ++ ] ++ ++ if (!v8_control_flow_integrity) { ++ sources += [ "src/execution/pointer-authentication-dummy.h" ] ++ } ++ ++ if (v8_enable_third_party_heap) { ++ sources += v8_third_party_heap_files ++ } else { ++ sources += [ "src/heap/third-party/heap-api-stub.cc" ] ++ } ++ ++ if (v8_enable_wasm_gdb_remote_debugging) { ++ sources += [ ++ "src/debug/wasm/gdb-server/gdb-remote-util.cc", ++ "src/debug/wasm/gdb-server/gdb-remote-util.h", ++ "src/debug/wasm/gdb-server/gdb-server-thread.cc", ++ "src/debug/wasm/gdb-server/gdb-server-thread.h", ++ "src/debug/wasm/gdb-server/gdb-server.cc", ++ "src/debug/wasm/gdb-server/gdb-server.h", ++ "src/debug/wasm/gdb-server/packet.cc", ++ "src/debug/wasm/gdb-server/packet.h", ++ "src/debug/wasm/gdb-server/session.cc", ++ "src/debug/wasm/gdb-server/session.h", ++ "src/debug/wasm/gdb-server/target.cc", ++ "src/debug/wasm/gdb-server/target.h", ++ "src/debug/wasm/gdb-server/transport.cc", ++ "src/debug/wasm/gdb-server/transport.h", ++ "src/debug/wasm/gdb-server/wasm-module-debug.cc", ++ "src/debug/wasm/gdb-server/wasm-module-debug.h", ++ ] ++ } ++ ++ if (v8_check_header_includes) { ++ # This file will be generated by tools/generate-header-include-checks.py ++ # if the "check_v8_header_includes" gclient variable is set. ++ import("check-header-includes/sources.gni") ++ sources += check_header_includes_sources ++ } ++ ++ if (v8_current_cpu == "x86") { ++ sources += [ ### gcmole(arch:ia32) ### ++ "src/codegen/ia32/assembler-ia32-inl.h", ++ "src/codegen/ia32/assembler-ia32.cc", ++ "src/codegen/ia32/assembler-ia32.h", ++ "src/codegen/ia32/constants-ia32.h", ++ "src/codegen/ia32/cpu-ia32.cc", ++ "src/codegen/ia32/interface-descriptors-ia32.cc", ++ "src/codegen/ia32/macro-assembler-ia32.cc", ++ "src/codegen/ia32/macro-assembler-ia32.h", ++ "src/codegen/ia32/register-ia32.h", ++ "src/codegen/ia32/sse-instr.h", ++ "src/compiler/backend/ia32/code-generator-ia32.cc", ++ "src/compiler/backend/ia32/instruction-codes-ia32.h", ++ "src/compiler/backend/ia32/instruction-scheduler-ia32.cc", ++ "src/compiler/backend/ia32/instruction-selector-ia32.cc", ++ "src/debug/ia32/debug-ia32.cc", ++ "src/deoptimizer/ia32/deoptimizer-ia32.cc", ++ "src/diagnostics/ia32/disasm-ia32.cc", ++ "src/execution/ia32/frame-constants-ia32.cc", ++ "src/execution/ia32/frame-constants-ia32.h", ++ "src/regexp/ia32/regexp-macro-assembler-ia32.cc", ++ "src/regexp/ia32/regexp-macro-assembler-ia32.h", ++ "src/wasm/baseline/ia32/liftoff-assembler-ia32.h", ++ ] ++ } else if (v8_current_cpu == "x64") { ++ sources += [ ### gcmole(arch:x64) ### ++ "src/codegen/x64/assembler-x64-inl.h", ++ "src/codegen/x64/assembler-x64.cc", ++ "src/codegen/x64/assembler-x64.h", ++ "src/codegen/x64/constants-x64.h", ++ "src/codegen/x64/cpu-x64.cc", ++ "src/codegen/x64/fma-instr.h", ++ "src/codegen/x64/interface-descriptors-x64.cc", ++ "src/codegen/x64/macro-assembler-x64.cc", ++ "src/codegen/x64/macro-assembler-x64.h", ++ "src/codegen/x64/register-x64.h", ++ "src/codegen/x64/sse-instr.h", ++ "src/compiler/backend/x64/code-generator-x64.cc", ++ "src/compiler/backend/x64/instruction-codes-x64.h", ++ "src/compiler/backend/x64/instruction-scheduler-x64.cc", ++ "src/compiler/backend/x64/instruction-selector-x64.cc", ++ "src/compiler/backend/x64/unwinding-info-writer-x64.cc", ++ "src/compiler/backend/x64/unwinding-info-writer-x64.h", ++ "src/debug/x64/debug-x64.cc", ++ "src/deoptimizer/x64/deoptimizer-x64.cc", ++ "src/diagnostics/x64/disasm-x64.cc", ++ "src/diagnostics/x64/eh-frame-x64.cc", ++ "src/execution/x64/frame-constants-x64.cc", ++ "src/execution/x64/frame-constants-x64.h", ++ "src/regexp/x64/regexp-macro-assembler-x64.cc", ++ "src/regexp/x64/regexp-macro-assembler-x64.h", ++ "src/third_party/valgrind/valgrind.h", ++ "src/wasm/baseline/x64/liftoff-assembler-x64.h", ++ ] ++ ++ # iOS Xcode simulator builds run on an x64 target. iOS and macOS are both ++ # based on Darwin and thus POSIX-compliant to a similar degree. ++ if (is_linux || is_mac || is_ios || target_os == "freebsd") { ++ sources += [ ++ "src/trap-handler/handler-inside-posix.cc", ++ "src/trap-handler/handler-inside-posix.h", ++ "src/trap-handler/handler-outside-posix.cc", ++ ] ++ } ++ if (is_win) { ++ sources += [ ++ "src/diagnostics/unwinding-info-win64.cc", ++ "src/diagnostics/unwinding-info-win64.h", ++ "src/trap-handler/handler-inside-win.cc", ++ "src/trap-handler/handler-inside-win.h", ++ "src/trap-handler/handler-outside-win.cc", ++ ] ++ } ++ } else if (v8_current_cpu == "arm") { ++ sources += [ ### gcmole(arch:arm) ### ++ "src/codegen/arm/assembler-arm-inl.h", ++ "src/codegen/arm/assembler-arm.cc", ++ "src/codegen/arm/assembler-arm.h", ++ "src/codegen/arm/constants-arm.cc", ++ "src/codegen/arm/constants-arm.h", ++ "src/codegen/arm/cpu-arm.cc", ++ "src/codegen/arm/interface-descriptors-arm.cc", ++ "src/codegen/arm/macro-assembler-arm.cc", ++ "src/codegen/arm/macro-assembler-arm.h", ++ "src/codegen/arm/register-arm.h", ++ "src/compiler/backend/arm/code-generator-arm.cc", ++ "src/compiler/backend/arm/instruction-codes-arm.h", ++ "src/compiler/backend/arm/instruction-scheduler-arm.cc", ++ "src/compiler/backend/arm/instruction-selector-arm.cc", ++ "src/compiler/backend/arm/unwinding-info-writer-arm.cc", ++ "src/compiler/backend/arm/unwinding-info-writer-arm.h", ++ "src/debug/arm/debug-arm.cc", ++ "src/deoptimizer/arm/deoptimizer-arm.cc", ++ "src/diagnostics/arm/disasm-arm.cc", ++ "src/diagnostics/arm/eh-frame-arm.cc", ++ "src/execution/arm/frame-constants-arm.cc", ++ "src/execution/arm/frame-constants-arm.h", ++ "src/execution/arm/simulator-arm.cc", ++ "src/execution/arm/simulator-arm.h", ++ "src/regexp/arm/regexp-macro-assembler-arm.cc", ++ "src/regexp/arm/regexp-macro-assembler-arm.h", ++ "src/wasm/baseline/arm/liftoff-assembler-arm.h", ++ ] ++ } else if (v8_current_cpu == "arm64") { ++ sources += [ ### gcmole(arch:arm64) ### ++ "src/codegen/arm64/assembler-arm64-inl.h", ++ "src/codegen/arm64/assembler-arm64.cc", ++ "src/codegen/arm64/assembler-arm64.h", ++ "src/codegen/arm64/constants-arm64.h", ++ "src/codegen/arm64/cpu-arm64.cc", ++ "src/codegen/arm64/decoder-arm64-inl.h", ++ "src/codegen/arm64/decoder-arm64.cc", ++ "src/codegen/arm64/decoder-arm64.h", ++ "src/codegen/arm64/instructions-arm64-constants.cc", ++ "src/codegen/arm64/instructions-arm64.cc", ++ "src/codegen/arm64/instructions-arm64.h", ++ "src/codegen/arm64/interface-descriptors-arm64.cc", ++ "src/codegen/arm64/macro-assembler-arm64-inl.h", ++ "src/codegen/arm64/macro-assembler-arm64.cc", ++ "src/codegen/arm64/macro-assembler-arm64.h", ++ "src/codegen/arm64/register-arm64.cc", ++ "src/codegen/arm64/register-arm64.h", ++ "src/codegen/arm64/utils-arm64.cc", ++ "src/codegen/arm64/utils-arm64.h", ++ "src/compiler/backend/arm64/code-generator-arm64.cc", ++ "src/compiler/backend/arm64/instruction-codes-arm64.h", ++ "src/compiler/backend/arm64/instruction-scheduler-arm64.cc", ++ "src/compiler/backend/arm64/instruction-selector-arm64.cc", ++ "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc", ++ "src/compiler/backend/arm64/unwinding-info-writer-arm64.h", ++ "src/debug/arm64/debug-arm64.cc", ++ "src/deoptimizer/arm64/deoptimizer-arm64.cc", ++ "src/diagnostics/arm64/disasm-arm64.cc", ++ "src/diagnostics/arm64/disasm-arm64.h", ++ "src/diagnostics/arm64/eh-frame-arm64.cc", ++ "src/execution/arm64/frame-constants-arm64.cc", ++ "src/execution/arm64/frame-constants-arm64.h", ++ "src/execution/arm64/pointer-auth-arm64.cc", ++ "src/execution/arm64/simulator-arm64.cc", ++ "src/execution/arm64/simulator-arm64.h", ++ "src/execution/arm64/simulator-logic-arm64.cc", ++ "src/regexp/arm64/regexp-macro-assembler-arm64.cc", ++ "src/regexp/arm64/regexp-macro-assembler-arm64.h", ++ "src/wasm/baseline/arm64/liftoff-assembler-arm64.h", ++ ] ++ if (v8_control_flow_integrity) { ++ sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ] ++ } ++ if (is_win) { ++ sources += [ ++ "src/diagnostics/unwinding-info-win64.cc", ++ "src/diagnostics/unwinding-info-win64.h", ++ ] ++ } ++ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { ++ sources += [ ### gcmole(arch:mipsel) ### ++ "src/codegen/mips/assembler-mips-inl.h", ++ "src/codegen/mips/assembler-mips.cc", ++ "src/codegen/mips/assembler-mips.h", ++ "src/codegen/mips/constants-mips.cc", ++ "src/codegen/mips/constants-mips.h", ++ "src/codegen/mips/cpu-mips.cc", ++ "src/codegen/mips/interface-descriptors-mips.cc", ++ "src/codegen/mips/macro-assembler-mips.cc", ++ "src/codegen/mips/macro-assembler-mips.h", ++ "src/codegen/mips/register-mips.h", ++ "src/compiler/backend/mips/code-generator-mips.cc", ++ "src/compiler/backend/mips/instruction-codes-mips.h", ++ "src/compiler/backend/mips/instruction-scheduler-mips.cc", ++ "src/compiler/backend/mips/instruction-selector-mips.cc", ++ "src/debug/mips/debug-mips.cc", ++ "src/deoptimizer/mips/deoptimizer-mips.cc", ++ "src/diagnostics/mips/disasm-mips.cc", ++ "src/execution/mips/frame-constants-mips.cc", ++ "src/execution/mips/frame-constants-mips.h", ++ "src/execution/mips/simulator-mips.cc", ++ "src/execution/mips/simulator-mips.h", ++ "src/regexp/mips/regexp-macro-assembler-mips.cc", ++ "src/regexp/mips/regexp-macro-assembler-mips.h", ++ "src/wasm/baseline/mips/liftoff-assembler-mips.h", ++ ] ++ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { ++ sources += [ ### gcmole(arch:mips64el) ### ++ "src/codegen/mips64/assembler-mips64-inl.h", ++ "src/codegen/mips64/assembler-mips64.cc", ++ "src/codegen/mips64/assembler-mips64.h", ++ "src/codegen/mips64/constants-mips64.cc", ++ "src/codegen/mips64/constants-mips64.h", ++ "src/codegen/mips64/cpu-mips64.cc", ++ "src/codegen/mips64/interface-descriptors-mips64.cc", ++ "src/codegen/mips64/macro-assembler-mips64.cc", ++ "src/codegen/mips64/macro-assembler-mips64.h", ++ "src/codegen/mips64/register-mips64.h", ++ "src/compiler/backend/mips64/code-generator-mips64.cc", ++ "src/compiler/backend/mips64/instruction-codes-mips64.h", ++ "src/compiler/backend/mips64/instruction-scheduler-mips64.cc", ++ "src/compiler/backend/mips64/instruction-selector-mips64.cc", ++ "src/debug/mips64/debug-mips64.cc", ++ "src/deoptimizer/mips64/deoptimizer-mips64.cc", ++ "src/diagnostics/mips64/disasm-mips64.cc", ++ "src/execution/mips64/frame-constants-mips64.cc", ++ "src/execution/mips64/frame-constants-mips64.h", ++ "src/execution/mips64/simulator-mips64.cc", ++ "src/execution/mips64/simulator-mips64.h", ++ "src/regexp/mips64/regexp-macro-assembler-mips64.cc", ++ "src/regexp/mips64/regexp-macro-assembler-mips64.h", ++ "src/wasm/baseline/mips64/liftoff-assembler-mips64.h", ++ ] ++ } else if (v8_current_cpu == "ppc") { ++ sources += [ ### gcmole(arch:ppc) ### ++ "src/codegen/ppc/assembler-ppc-inl.h", ++ "src/codegen/ppc/assembler-ppc.cc", ++ "src/codegen/ppc/assembler-ppc.h", ++ "src/codegen/ppc/constants-ppc.cc", ++ "src/codegen/ppc/constants-ppc.h", ++ "src/codegen/ppc/cpu-ppc.cc", ++ "src/codegen/ppc/interface-descriptors-ppc.cc", ++ "src/codegen/ppc/macro-assembler-ppc.cc", ++ "src/codegen/ppc/macro-assembler-ppc.h", ++ "src/codegen/ppc/register-ppc.h", ++ "src/compiler/backend/ppc/code-generator-ppc.cc", ++ "src/compiler/backend/ppc/instruction-codes-ppc.h", ++ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", ++ "src/compiler/backend/ppc/instruction-selector-ppc.cc", ++ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", ++ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", ++ "src/debug/ppc/debug-ppc.cc", ++ "src/deoptimizer/ppc/deoptimizer-ppc.cc", ++ "src/diagnostics/ppc/disasm-ppc.cc", ++ "src/diagnostics/ppc/eh-frame-ppc.cc", ++ "src/execution/ppc/frame-constants-ppc.cc", ++ "src/execution/ppc/frame-constants-ppc.h", ++ "src/execution/ppc/simulator-ppc.cc", ++ "src/execution/ppc/simulator-ppc.h", ++ "src/regexp/ppc/regexp-macro-assembler-ppc.cc", ++ "src/regexp/ppc/regexp-macro-assembler-ppc.h", ++ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", ++ ] ++ } else if (v8_current_cpu == "ppc64") { ++ sources += [ ### gcmole(arch:ppc64) ### ++ "src/codegen/ppc/assembler-ppc-inl.h", ++ "src/codegen/ppc/assembler-ppc.cc", ++ "src/codegen/ppc/assembler-ppc.h", ++ "src/codegen/ppc/constants-ppc.cc", ++ "src/codegen/ppc/constants-ppc.h", ++ "src/codegen/ppc/cpu-ppc.cc", ++ "src/codegen/ppc/interface-descriptors-ppc.cc", ++ "src/codegen/ppc/macro-assembler-ppc.cc", ++ "src/codegen/ppc/macro-assembler-ppc.h", ++ "src/codegen/ppc/register-ppc.h", ++ "src/compiler/backend/ppc/code-generator-ppc.cc", ++ "src/compiler/backend/ppc/instruction-codes-ppc.h", ++ "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", ++ "src/compiler/backend/ppc/instruction-selector-ppc.cc", ++ "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", ++ "src/compiler/backend/ppc/unwinding-info-writer-ppc.h", ++ "src/debug/ppc/debug-ppc.cc", ++ "src/deoptimizer/ppc/deoptimizer-ppc.cc", ++ "src/diagnostics/ppc/disasm-ppc.cc", ++ "src/diagnostics/ppc/eh-frame-ppc.cc", ++ "src/execution/ppc/frame-constants-ppc.cc", ++ "src/execution/ppc/frame-constants-ppc.h", ++ "src/execution/ppc/simulator-ppc.cc", ++ "src/execution/ppc/simulator-ppc.h", ++ "src/regexp/ppc/regexp-macro-assembler-ppc.cc", ++ "src/regexp/ppc/regexp-macro-assembler-ppc.h", ++ "src/wasm/baseline/ppc/liftoff-assembler-ppc.h", ++ ] ++ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { ++ sources += [ ### gcmole(arch:s390) ### ++ "src/codegen/s390/assembler-s390-inl.h", ++ "src/codegen/s390/assembler-s390.cc", ++ "src/codegen/s390/assembler-s390.h", ++ "src/codegen/s390/constants-s390.cc", ++ "src/codegen/s390/constants-s390.h", ++ "src/codegen/s390/cpu-s390.cc", ++ "src/codegen/s390/interface-descriptors-s390.cc", ++ "src/codegen/s390/macro-assembler-s390.cc", ++ "src/codegen/s390/macro-assembler-s390.h", ++ "src/codegen/s390/register-s390.h", ++ "src/compiler/backend/s390/code-generator-s390.cc", ++ "src/compiler/backend/s390/instruction-codes-s390.h", ++ "src/compiler/backend/s390/instruction-scheduler-s390.cc", ++ "src/compiler/backend/s390/instruction-selector-s390.cc", ++ "src/compiler/backend/s390/unwinding-info-writer-s390.cc", ++ "src/compiler/backend/s390/unwinding-info-writer-s390.h", ++ "src/debug/s390/debug-s390.cc", ++ "src/deoptimizer/s390/deoptimizer-s390.cc", ++ "src/diagnostics/s390/disasm-s390.cc", ++ "src/diagnostics/s390/eh-frame-s390.cc", ++ "src/execution/s390/frame-constants-s390.cc", ++ "src/execution/s390/frame-constants-s390.h", ++ "src/execution/s390/simulator-s390.cc", ++ "src/execution/s390/simulator-s390.h", ++ "src/regexp/s390/regexp-macro-assembler-s390.cc", ++ "src/regexp/s390/regexp-macro-assembler-s390.h", ++ "src/wasm/baseline/s390/liftoff-assembler-s390.h", ++ ] ++ } ++ ++ configs = [ ":internal_config" ] ++ ++ defines = [] ++ deps = [ ++ ":torque_generated_definitions", ++ ":v8_headers", ++ ":v8_libbase", ++ ":v8_libsampler", ++ ":v8_shared_internal_headers", ++ ":v8_tracing", ++ ":v8_version", ++ "src/inspector:inspector", ++ ] ++ ++ public_deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ ":v8_maybe_icu", ++ ] ++ ++ if (v8_enable_i18n_support) { ++ deps += [ ":run_gen-regexp-special-case" ] ++ sources += [ "$target_gen_dir/src/regexp/special-case.cc" ] ++ if (is_win) { ++ deps += [ "//third_party/icu:icudata" ] ++ } ++ } else { ++ sources -= [ ++ "src/builtins/builtins-intl.cc", ++ "src/objects/intl-objects.cc", ++ "src/objects/intl-objects.h", ++ "src/objects/js-break-iterator-inl.h", ++ "src/objects/js-break-iterator.cc", ++ "src/objects/js-break-iterator.h", ++ "src/objects/js-collator-inl.h", ++ "src/objects/js-collator.cc", ++ "src/objects/js-collator.h", ++ "src/objects/js-date-time-format-inl.h", ++ "src/objects/js-date-time-format.cc", ++ "src/objects/js-date-time-format.h", ++ "src/objects/js-display-names-inl.h", ++ "src/objects/js-display-names.cc", ++ "src/objects/js-display-names.h", ++ "src/objects/js-list-format-inl.h", ++ "src/objects/js-list-format.cc", ++ "src/objects/js-list-format.h", ++ "src/objects/js-locale-inl.h", ++ "src/objects/js-locale.cc", ++ "src/objects/js-locale.h", ++ "src/objects/js-number-format-inl.h", ++ "src/objects/js-number-format.cc", ++ "src/objects/js-number-format.h", ++ "src/objects/js-plural-rules-inl.h", ++ "src/objects/js-plural-rules.cc", ++ "src/objects/js-plural-rules.h", ++ "src/objects/js-relative-time-format-inl.h", ++ "src/objects/js-relative-time-format.cc", ++ "src/objects/js-relative-time-format.h", ++ "src/objects/js-segment-iterator-inl.h", ++ "src/objects/js-segment-iterator.cc", ++ "src/objects/js-segment-iterator.h", ++ "src/objects/js-segmenter-inl.h", ++ "src/objects/js-segmenter.cc", ++ "src/objects/js-segmenter.h", ++ "src/runtime/runtime-intl.cc", ++ "src/strings/char-predicates.cc", ++ ] ++ } ++ ++ deps += [ ++ "//third_party/zlib", ++ "//third_party/zlib/google:compression_utils_portable", ++ ] ++ ++ if (v8_postmortem_support) { ++ sources += [ "$target_gen_dir/debug-support.cc" ] ++ deps += [ ":postmortem-metadata" ] ++ } ++ ++ libs = [] ++ ++ if (v8_enable_third_party_heap) { ++ libs += v8_third_party_heap_libs ++ } ++ ++ # Platforms that don't have CAS support need to link atomic library ++ # to implement atomic memory access ++ if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || ++ v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || ++ v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || ++ v8_current_cpu == "s390" || v8_current_cpu == "s390x") { ++ libs += [ "atomic" ] ++ } ++ ++ if (v8_enable_vtunetracemark && (is_linux || is_win)) { ++ sources += [ ++ "src/extensions/vtunedomain-support-extension.cc", ++ "src/extensions/vtunedomain-support-extension.h", ++ ] ++ deps += [ "src/third_party/vtune:v8_vtune_trace_mark" ] ++ } ++ ++ if (v8_use_perfetto) { ++ sources -= [ "//base/trace_event/common/trace_event_common.h" ] ++ sources += [ ++ "src/tracing/trace-categories.cc", ++ "src/tracing/trace-categories.h", ++ ] ++ } ++} ++ ++group("v8_base") { ++ public_deps = [ ++ ":v8_base_without_compiler", ++ ":v8_compiler", ++ ] ++} ++ ++v8_source_set("torque_base") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = [ ++ "src/torque/ast.h", ++ "src/torque/cfg.cc", ++ "src/torque/cfg.h", ++ "src/torque/class-debug-reader-generator.cc", ++ "src/torque/constants.h", ++ "src/torque/contextual.h", ++ "src/torque/csa-generator.cc", ++ "src/torque/csa-generator.h", ++ "src/torque/declarable.cc", ++ "src/torque/declarable.h", ++ "src/torque/declaration-visitor.cc", ++ "src/torque/declaration-visitor.h", ++ "src/torque/declarations.cc", ++ "src/torque/declarations.h", ++ "src/torque/earley-parser.cc", ++ "src/torque/earley-parser.h", ++ "src/torque/global-context.cc", ++ "src/torque/global-context.h", ++ "src/torque/implementation-visitor.cc", ++ "src/torque/implementation-visitor.h", ++ "src/torque/instance-type-generator.cc", ++ "src/torque/instructions.cc", ++ "src/torque/instructions.h", ++ "src/torque/parameter-difference.h", ++ "src/torque/server-data.cc", ++ "src/torque/server-data.h", ++ "src/torque/source-positions.cc", ++ "src/torque/source-positions.h", ++ "src/torque/torque-compiler.cc", ++ "src/torque/torque-compiler.h", ++ "src/torque/torque-parser.cc", ++ "src/torque/torque-parser.h", ++ "src/torque/type-inference.cc", ++ "src/torque/type-inference.h", ++ "src/torque/type-oracle.cc", ++ "src/torque/type-oracle.h", ++ "src/torque/type-visitor.cc", ++ "src/torque/type-visitor.h", ++ "src/torque/types.cc", ++ "src/torque/types.h", ++ "src/torque/utils.cc", ++ "src/torque/utils.h", ++ ] ++ ++ deps = [ ":v8_shared_internal_headers" ] ++ ++ public_deps = [ ":v8_libbase" ] ++ ++ # The use of exceptions for Torque in violation of the Chromium style-guide ++ # is justified by the fact that it is only used from the non-essential ++ # language server and can be removed anytime if it causes problems. ++ configs = [ ++ ":internal_config", ++ "//build/config/compiler:exceptions", ++ "//build/config/compiler:rtti", ++ ] ++ ++ remove_configs = [ ++ "//build/config/compiler:no_exceptions", ++ "//build/config/compiler:no_rtti", ++ ] ++ ++ if (is_win && is_asan) { ++ # Due to a bug in ASAN on Windows (chromium:893437), we disable ASAN for ++ # Torque on Windows. ++ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] ++ } ++ ++ if (is_debug && !v8_optimized_debug && v8_enable_fast_torque) { ++ # The :no_optimize config is added to v8_add_configs in v8.gni. ++ remove_configs += [ "//build/config/compiler:no_optimize" ] ++ configs += [ ":always_optimize" ] ++ } ++} ++ ++v8_source_set("torque_ls_base") { ++ sources = [ ++ "src/torque/ls/globals.h", ++ "src/torque/ls/json-parser.cc", ++ "src/torque/ls/json-parser.h", ++ "src/torque/ls/json.cc", ++ "src/torque/ls/json.h", ++ "src/torque/ls/message-handler.cc", ++ "src/torque/ls/message-handler.h", ++ "src/torque/ls/message-macros.h", ++ "src/torque/ls/message-pipe.h", ++ "src/torque/ls/message.h", ++ ] ++ ++ public_deps = [ ":torque_base" ] ++ ++ # The use of exceptions for Torque in violation of the Chromium style-guide ++ # is justified by the fact that it is only used from the non-essential ++ # language server and can be removed anytime if it causes problems. ++ configs = [ ++ ":internal_config", ++ "//build/config/compiler:exceptions", ++ "//build/config/compiler:rtti", ++ ] ++ ++ remove_configs = [ ++ "//build/config/compiler:no_exceptions", ++ "//build/config/compiler:no_rtti", ++ ] ++ ++ if (is_win && is_asan) { ++ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] ++ } ++} ++ ++v8_component("v8_libbase") { ++ sources = [ ++ "src/base/address-region.h", ++ "src/base/atomic-utils.h", ++ "src/base/atomicops.h", ++ "src/base/atomicops_internals_atomicword_compat.h", ++ "src/base/atomicops_internals_portable.h", ++ "src/base/atomicops_internals_std.h", ++ "src/base/base-export.h", ++ "src/base/bit-field.h", ++ "src/base/bits-iterator.h", ++ "src/base/bits.cc", ++ "src/base/bits.h", ++ "src/base/bounded-page-allocator.cc", ++ "src/base/bounded-page-allocator.h", ++ "src/base/bounds.h", ++ "src/base/build_config.h", ++ "src/base/compiler-specific.h", ++ "src/base/cpu.cc", ++ "src/base/cpu.h", ++ "src/base/debug/stack_trace.cc", ++ "src/base/debug/stack_trace.h", ++ "src/base/division-by-constant.cc", ++ "src/base/division-by-constant.h", ++ "src/base/enum-set.h", ++ "src/base/export-template.h", ++ "src/base/file-utils.cc", ++ "src/base/file-utils.h", ++ "src/base/flags.h", ++ "src/base/free_deleter.h", ++ "src/base/functional.cc", ++ "src/base/functional.h", ++ "src/base/hashmap-entry.h", ++ "src/base/hashmap.h", ++ "src/base/ieee754.cc", ++ "src/base/ieee754.h", ++ "src/base/iterator.h", ++ "src/base/lazy-instance.h", ++ "src/base/logging.cc", ++ "src/base/logging.h", ++ "src/base/lsan.h", ++ "src/base/macros.h", ++ "src/base/memory.h", ++ "src/base/once.cc", ++ "src/base/once.h", ++ "src/base/optional.h", ++ "src/base/overflowing-math.h", ++ "src/base/page-allocator.cc", ++ "src/base/page-allocator.h", ++ "src/base/platform/condition-variable.cc", ++ "src/base/platform/condition-variable.h", ++ "src/base/platform/elapsed-timer.h", ++ "src/base/platform/mutex.cc", ++ "src/base/platform/mutex.h", ++ "src/base/platform/platform.h", ++ "src/base/platform/semaphore.cc", ++ "src/base/platform/semaphore.h", ++ "src/base/platform/time.cc", ++ "src/base/platform/time.h", ++ "src/base/region-allocator.cc", ++ "src/base/region-allocator.h", ++ "src/base/ring-buffer.h", ++ "src/base/safe_conversions.h", ++ "src/base/safe_conversions_impl.h", ++ "src/base/small-vector.h", ++ "src/base/sys-info.cc", ++ "src/base/sys-info.h", ++ "src/base/template-utils.h", ++ "src/base/timezone-cache.h", ++ "src/base/type-traits.h", ++ "src/base/utils/random-number-generator.cc", ++ "src/base/utils/random-number-generator.h", ++ "src/base/vlq-base64.cc", ++ "src/base/vlq-base64.h", ++ ] ++ ++ configs = [ ":internal_config_base" ] ++ ++ public_configs = [ ":libbase_config" ] ++ ++ deps = [ ":v8_headers" ] ++ ++ public_deps = [] ++ ++ data = [] ++ ++ data_deps = [] ++ ++ defines = [] ++ ++ if (is_component_build) { ++ defines = [ "BUILDING_V8_BASE_SHARED" ] ++ } ++ ++ if (is_posix || is_fuchsia) { ++ sources += [ ++ "src/base/platform/platform-posix.cc", ++ "src/base/platform/platform-posix.h", ++ ] ++ if (current_os != "aix") { ++ sources += [ ++ "src/base/platform/platform-posix-time.cc", ++ "src/base/platform/platform-posix-time.h", ++ ] ++ } ++ } ++ ++ if (is_linux) { ++ sources += [ ++ "src/base/debug/stack_trace_posix.cc", ++ "src/base/platform/platform-linux.cc", ++ ] ++ ++ libs = [ ++ "dl", ++ "rt", ++ ] ++ } else if (current_os == "aix") { ++ sources += [ ++ "src/base/debug/stack_trace_posix.cc", ++ "src/base/platform/platform-aix.cc", ++ ] ++ ++ libs = [ ++ "dl", ++ "rt", ++ ] ++ } else if (is_android) { ++ if (current_toolchain == host_toolchain) { ++ libs = [ ++ "dl", ++ "rt", ++ ] ++ if (host_os == "mac") { ++ sources += [ ++ "src/base/debug/stack_trace_posix.cc", ++ "src/base/platform/platform-macos.cc", ++ ] ++ } else { ++ sources += [ ++ "src/base/debug/stack_trace_posix.cc", ++ "src/base/platform/platform-linux.cc", ++ ] ++ } ++ } else { ++ sources += [ ++ "src/base/debug/stack_trace_android.cc", ++ "src/base/platform/platform-linux.cc", ++ ] ++ } ++ } else if (is_fuchsia) { ++ sources += [ ++ "src/base/debug/stack_trace_fuchsia.cc", ++ "src/base/platform/platform-fuchsia.cc", ++ ] ++ } else if (is_mac || is_ios) { ++ sources += [ ++ "src/base/debug/stack_trace_posix.cc", ++ "src/base/platform/platform-macos.cc", ++ ] ++ } else if (is_win) { ++ # TODO(jochen): Add support for cygwin. ++ sources += [ ++ "src/base/debug/stack_trace_win.cc", ++ "src/base/platform/platform-win32.cc", ++ "src/base/win32-headers.h", ++ ] ++ ++ defines += [ "_CRT_RAND_S" ] # for rand_s() ++ ++ libs = [ ++ "dbghelp.lib", ++ "winmm.lib", ++ "ws2_32.lib", ++ ] ++ ++ data_deps += [ "//build/win:runtime_libs" ] ++ } ++ ++ if (v8_current_cpu == "mips" || v8_current_cpu == "mips64") { ++ # Add runtime libs for mips. ++ data += [ ++ "tools/mips_toolchain/sysroot/usr/lib/", ++ "tools/mips_toolchain/mips-mti-linux-gnu/lib", ++ ] ++ } ++ ++ if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm" || ++ v8_current_cpu == "mips")) { ++ # Special UBSan 32-bit requirement. ++ sources += [ "src/base/ubsan.cc" ] ++ } ++ ++ if (is_tsan && !build_with_chromium) { ++ data += [ "tools/sanitizers/tsan_suppressions.txt" ] ++ } ++ ++ # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris. ++} ++ ++v8_component("v8_libplatform") { ++ sources = [ ++ "//base/trace_event/common/trace_event_common.h", ++ "include/libplatform/libplatform-export.h", ++ "include/libplatform/libplatform.h", ++ "include/libplatform/v8-tracing.h", ++ "src/libplatform/default-foreground-task-runner.cc", ++ "src/libplatform/default-foreground-task-runner.h", ++ "src/libplatform/default-job.cc", ++ "src/libplatform/default-job.h", ++ "src/libplatform/default-platform.cc", ++ "src/libplatform/default-platform.h", ++ "src/libplatform/default-worker-threads-task-runner.cc", ++ "src/libplatform/default-worker-threads-task-runner.h", ++ "src/libplatform/delayed-task-queue.cc", ++ "src/libplatform/delayed-task-queue.h", ++ "src/libplatform/task-queue.cc", ++ "src/libplatform/task-queue.h", ++ "src/libplatform/tracing/trace-buffer.cc", ++ "src/libplatform/tracing/trace-buffer.h", ++ "src/libplatform/tracing/trace-config.cc", ++ "src/libplatform/tracing/trace-object.cc", ++ "src/libplatform/tracing/trace-writer.cc", ++ "src/libplatform/tracing/trace-writer.h", ++ "src/libplatform/tracing/tracing-controller.cc", ++ "src/libplatform/worker-thread.cc", ++ "src/libplatform/worker-thread.h", ++ ] ++ ++ configs = [ ":internal_config_base" ] ++ ++ if (is_component_build) { ++ defines = [ "BUILDING_V8_PLATFORM_SHARED" ] ++ } ++ ++ public_configs = [ ":libplatform_config" ] ++ ++ deps = [ ++ ":v8_headers", ++ ":v8_libbase", ++ ":v8_tracing", ++ ] ++ ++ if (v8_use_perfetto) { ++ sources -= [ ++ "//base/trace_event/common/trace_event_common.h", ++ "src/libplatform/tracing/trace-buffer.cc", ++ "src/libplatform/tracing/trace-buffer.h", ++ "src/libplatform/tracing/trace-object.cc", ++ "src/libplatform/tracing/trace-writer.cc", ++ "src/libplatform/tracing/trace-writer.h", ++ ] ++ sources += [ ++ "src/libplatform/tracing/trace-event-listener.cc", ++ "src/libplatform/tracing/trace-event-listener.h", ++ ] ++ deps += [ ++ # TODO(skyostil): Switch TraceEventListener to protozero. ++ "//third_party/perfetto/protos/perfetto/trace:lite", ++ ] ++ } ++} ++ ++v8_source_set("v8_libsampler") { ++ sources = [ ++ "src/libsampler/sampler.cc", ++ "src/libsampler/sampler.h", ++ ] ++ ++ configs = [ ":internal_config" ] ++ ++ public_configs = [ ":libsampler_config" ] ++ ++ deps = [ ":v8_libbase" ] ++} ++ ++v8_source_set("fuzzer_support") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = [ ++ "test/fuzzer/fuzzer-support.cc", ++ "test/fuzzer/fuzzer-support.h", ++ ] ++ ++ configs = [ ":internal_config_base" ] ++ ++ public_deps = [ ++ ":v8", ++ ":v8_libbase", ++ ":v8_libplatform", ++ ":v8_maybe_icu", ++ ] ++} ++ ++v8_source_set("cppgc_base") { ++ visibility = [ ":*" ] ++ ++ sources = [ ++ "include/cppgc/allocation.h", ++ "include/cppgc/common.h", ++ "include/cppgc/custom-space.h", ++ "include/cppgc/garbage-collected.h", ++ "include/cppgc/heap.h", ++ "include/cppgc/internal/accessors.h", ++ "include/cppgc/internal/api-contants.h", ++ "include/cppgc/internal/compiler-specific.h", ++ "include/cppgc/internal/finalizer-traits.h", ++ "include/cppgc/internal/gc-info.h", ++ "include/cppgc/internal/persistent-node.h", ++ "include/cppgc/internal/pointer-policies.h", ++ "include/cppgc/internal/prefinalizer-handler.h", ++ "include/cppgc/liveness-broker.h", ++ "include/cppgc/liveness-broker.h", ++ "include/cppgc/macros.h", ++ "include/cppgc/member.h", ++ "include/cppgc/persistent.h", ++ "include/cppgc/platform.h", ++ "include/cppgc/prefinalizer.h", ++ "include/cppgc/source-location.h", ++ "include/cppgc/trace-trait.h", ++ "include/cppgc/type-traits.h", ++ "include/cppgc/visitor.h", ++ "include/v8config.h", ++ "src/heap/cppgc/allocation.cc", ++ "src/heap/cppgc/free-list.cc", ++ "src/heap/cppgc/free-list.h", ++ "src/heap/cppgc/gc-info-table.cc", ++ "src/heap/cppgc/gc-info-table.h", ++ "src/heap/cppgc/gc-info.cc", ++ "src/heap/cppgc/heap-inl.h", ++ "src/heap/cppgc/heap-object-header-inl.h", ++ "src/heap/cppgc/heap-object-header.cc", ++ "src/heap/cppgc/heap-object-header.h", ++ "src/heap/cppgc/heap-page.cc", ++ "src/heap/cppgc/heap-page.h", ++ "src/heap/cppgc/heap-space.cc", ++ "src/heap/cppgc/heap-space.h", ++ "src/heap/cppgc/heap-visitor.h", ++ "src/heap/cppgc/heap.cc", ++ "src/heap/cppgc/heap.h", ++ "src/heap/cppgc/liveness-broker.cc", ++ "src/heap/cppgc/logging.cc", ++ "src/heap/cppgc/marker.cc", ++ "src/heap/cppgc/marker.h", ++ "src/heap/cppgc/marking-visitor.cc", ++ "src/heap/cppgc/marking-visitor.h", ++ "src/heap/cppgc/object-allocator-inl.h", ++ "src/heap/cppgc/object-allocator.cc", ++ "src/heap/cppgc/object-allocator.h", ++ "src/heap/cppgc/object-start-bitmap-inl.h", ++ "src/heap/cppgc/object-start-bitmap.h", ++ "src/heap/cppgc/page-memory-inl.h", ++ "src/heap/cppgc/page-memory.cc", ++ "src/heap/cppgc/page-memory.h", ++ "src/heap/cppgc/persistent-node.cc", ++ "src/heap/cppgc/platform.cc", ++ "src/heap/cppgc/pointer-policies.cc", ++ "src/heap/cppgc/prefinalizer-handler.cc", ++ "src/heap/cppgc/prefinalizer-handler.h", ++ "src/heap/cppgc/raw-heap.cc", ++ "src/heap/cppgc/raw-heap.h", ++ "src/heap/cppgc/sanitizers.h", ++ "src/heap/cppgc/source-location.cc", ++ "src/heap/cppgc/stack.cc", ++ "src/heap/cppgc/stack.h", ++ "src/heap/cppgc/sweeper.cc", ++ "src/heap/cppgc/sweeper.h", ++ "src/heap/cppgc/worklist.h", ++ ] ++ ++ if (is_clang || !is_win) { ++ if (target_cpu == "x64") { ++ sources += [ "src/heap/cppgc/asm/x64/push_registers_asm.cc" ] ++ } else if (target_cpu == "x86") { ++ sources += [ "src/heap/cppgc/asm/ia32/push_registers_asm.cc" ] ++ } else if (target_cpu == "arm") { ++ sources += [ "src/heap/cppgc/asm/arm/push_registers_asm.cc" ] ++ } else if (target_cpu == "arm64") { ++ sources += [ "src/heap/cppgc/asm/arm64/push_registers_asm.cc" ] ++ } else if (target_cpu == "ppc64") { ++ sources += [ "src/heap/cppgc/asm/ppc/push_registers_asm.cc" ] ++ } else if (target_cpu == "s390x") { ++ sources += [ "src/heap/cppgc/asm/s390/push_registers_asm.cc" ] ++ } else if (target_cpu == "mipsel") { ++ sources += [ "src/heap/cppgc/asm/mips/push_registers_asm.cc" ] ++ } else if (target_cpu == "mips64el") { ++ sources += [ "src/heap/cppgc/asm/mips64/push_registers_asm.cc" ] ++ } ++ } else if (is_win) { ++ if (target_cpu == "x64") { ++ sources += [ "src/heap/cppgc/asm/x64/push_registers_masm.S" ] ++ } else if (target_cpu == "x86") { ++ sources += [ "src/heap/cppgc/asm/ia32/push_registers_masm.S" ] ++ } else if (target_cpu == "arm64") { ++ sources += [ "src/heap/cppgc/asm/arm64/push_registers_masm.S" ] ++ } ++ } ++ ++ configs = [ ++ ":internal_config", ++ ":cppgc_base_config", ++ ] ++ ++ public_deps = [ ":v8_libbase" ] ++} ++ ++############################################################################### ++# Produce a single static library for embedders ++# ++ ++if (v8_monolithic) { ++ # A component build is not monolithic. ++ assert(!is_component_build) ++ ++ # Using external startup data would produce separate files. ++ assert(!v8_use_external_startup_data) ++ v8_static_library("v8_monolith") { ++ deps = [ ++ ":v8", ++ ":v8_libbase", ++ ":v8_libplatform", ++ ":v8_libsampler", ++ "//build/win:default_exe_manifest", ++ ] ++ ++ configs = [ ":internal_config" ] ++ } ++} ++ ++v8_static_library("wee8") { ++ deps = [ ++ ":v8_base", ++ ":v8_libbase", ++ ":v8_libplatform", ++ ":v8_libsampler", ++ ":v8_snapshot", ++ "//build/win:default_exe_manifest", ++ ] ++ ++ # TODO: v8dll-main.cc equivalent for shared library builds ++ ++ configs = [ ":internal_config" ] ++ ++ sources = [ ++ ### gcmole(all) ### ++ "src/wasm/c-api.cc", ++ "src/wasm/c-api.h", ++ "third_party/wasm-api/wasm.h", ++ "third_party/wasm-api/wasm.hh", ++ ] ++} ++ ++############################################################################### ++# Executables ++# ++ ++if (current_toolchain == v8_generator_toolchain) { ++ v8_executable("bytecode_builtins_list_generator") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ include_dirs = [ "." ] ++ ++ sources = [ ++ "src/builtins/generate-bytecodes-builtins-list.cc", ++ "src/interpreter/bytecode-operands.cc", ++ "src/interpreter/bytecode-operands.h", ++ "src/interpreter/bytecodes.cc", ++ "src/interpreter/bytecodes.h", ++ ] ++ ++ configs = [ ":internal_config" ] ++ ++ deps = [ ++ ":v8_libbase", ++ "//build/win:default_exe_manifest", ++ ] ++ } ++} ++ ++if (current_toolchain == v8_snapshot_toolchain) { ++ v8_executable("mksnapshot") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = [ ++ "src/snapshot/embedded/embedded-empty.cc", ++ "src/snapshot/embedded/embedded-file-writer.cc", ++ "src/snapshot/embedded/embedded-file-writer.h", ++ "src/snapshot/embedded/platform-embedded-file-writer-aix.cc", ++ "src/snapshot/embedded/platform-embedded-file-writer-aix.h", ++ "src/snapshot/embedded/platform-embedded-file-writer-base.cc", ++ "src/snapshot/embedded/platform-embedded-file-writer-base.h", ++ "src/snapshot/embedded/platform-embedded-file-writer-generic.cc", ++ "src/snapshot/embedded/platform-embedded-file-writer-generic.h", ++ "src/snapshot/embedded/platform-embedded-file-writer-mac.cc", ++ "src/snapshot/embedded/platform-embedded-file-writer-mac.h", ++ "src/snapshot/embedded/platform-embedded-file-writer-win.cc", ++ "src/snapshot/embedded/platform-embedded-file-writer-win.h", ++ "src/snapshot/mksnapshot.cc", ++ "src/snapshot/snapshot-empty.cc", ++ ] ++ ++ configs = [ ":internal_config" ] ++ ++ deps = [ ++ ":v8_base_without_compiler", ++ ":v8_compiler_for_mksnapshot", ++ ":v8_init", ++ ":v8_libbase", ++ ":v8_libplatform", ++ ":v8_maybe_icu", ++ ":v8_tracing", ++ "//build/win:default_exe_manifest", ++ ] ++ } ++} ++ ++if (current_toolchain == v8_snapshot_toolchain) { ++ v8_executable("torque") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = [ "src/torque/torque.cc" ] ++ ++ deps = [ ++ ":torque_base", ++ "//build/win:default_exe_manifest", ++ ] ++ ++ # The use of exceptions for Torque in violation of the Chromium style-guide ++ # is justified by the fact that it is only used from the non-essential ++ # language server and can be removed anytime if it causes problems. ++ configs = [ ++ ":internal_config", ++ "//build/config/compiler:exceptions", ++ "//build/config/compiler:rtti", ++ ] ++ ++ remove_configs = [ ++ "//build/config/compiler:no_exceptions", ++ "//build/config/compiler:no_rtti", ++ ] ++ ++ if (is_win && is_asan) { ++ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] ++ } ++ } ++} ++ ++v8_executable("torque-language-server") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = [ "src/torque/ls/torque-language-server.cc" ] ++ ++ deps = [ ++ ":torque_base", ++ ":torque_ls_base", ++ "//build/win:default_exe_manifest", ++ ] ++ ++ # The use of exceptions for Torque in violation of the Chromium style-guide ++ # is justified by the fact that it is only used from the non-essential ++ # language server and can be removed anytime if it causes problems. ++ configs = [ ++ ":internal_config", ++ "//build/config/compiler:exceptions", ++ "//build/config/compiler:rtti", ++ ] ++ ++ remove_configs = [ ++ "//build/config/compiler:no_exceptions", ++ "//build/config/compiler:no_rtti", ++ ] ++ ++ if (is_win && is_asan) { ++ remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ] ++ } ++} ++ ++if (v8_enable_i18n_support) { ++ if (current_toolchain == v8_generator_toolchain) { ++ v8_executable("gen-regexp-special-case") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ sources = [ "src/regexp/gen-regexp-special-case.cc" ] ++ ++ deps = [ ++ ":v8_libbase", ++ "//build/win:default_exe_manifest", ++ "//third_party/icu", ++ ] ++ ++ configs = [ ":internal_config" ] ++ } ++ } ++ ++ action("run_gen-regexp-special-case") { ++ visibility = [ ":*" ] # Only targets in this file can depend on this. ++ ++ script = "tools/run.py" ++ ++ deps = [ ":gen-regexp-special-case($v8_generator_toolchain)" ] ++ ++ output_file = "$target_gen_dir/src/regexp/special-case.cc" ++ ++ outputs = [ output_file ] ++ ++ args = [ ++ "./" + rebase_path( ++ get_label_info( ++ ":gen-regexp-special-case($v8_generator_toolchain)", ++ "root_out_dir") + "/gen-regexp-special-case", ++ root_build_dir), ++ rebase_path(output_file, root_build_dir), ++ ] ++ } ++} ++ ++############################################################################### ++# Public targets ++# ++ ++want_v8_shell = ++ (current_toolchain == host_toolchain && v8_toolset_for_shell == "host") || ++ (current_toolchain == v8_snapshot_toolchain && ++ v8_toolset_for_shell == "host") || ++ (current_toolchain != host_toolchain && v8_toolset_for_shell == "target") ++ ++group("gn_all") { ++ testonly = true ++ ++ deps = [ ++ ":d8", ++ ":v8_fuzzers", ++ ":v8_hello_world", ++ ":v8_sample_process", ++ "test:gn_all", ++ "tools:gn_all", ++ ] ++ ++ if (v8_custom_deps != "") { ++ # Custom dependency from directory under v8/custom_deps. ++ deps += [ v8_custom_deps ] ++ } ++ ++ if (want_v8_shell) { ++ deps += [ ":v8_shell" ] ++ } ++} ++ ++group("v8_python_base") { ++ data = [ ".vpython" ] ++} ++ ++group("v8_clusterfuzz") { ++ testonly = true ++ ++ deps = [ ":d8" ] ++ ++ if (v8_multi_arch_build) { ++ deps += [ ++ ":d8(//build/toolchain/linux:clang_x64)", ++ ":d8(//build/toolchain/linux:clang_x64_v8_arm64)", ++ ":d8(//build/toolchain/linux:clang_x86)", ++ ":d8(//build/toolchain/linux:clang_x86_v8_arm)", ++ ":d8(tools/clusterfuzz/toolchain:clang_x64_pointer_compression)", ++ ] ++ } ++} ++ ++group("v8_archive") { ++ testonly = true ++ ++ deps = [ ":d8" ] ++ ++ if (!is_win) { ++ # On windows, cctest doesn't link with v8_static_library. ++ deps += [ "test/cctest:cctest" ] ++ } ++} ++ ++# TODO(dglazkov): Remove the "!build_with_chromium" condition once this clause ++# is removed from Chromium. ++if (is_fuchsia && !build_with_chromium) { ++ import("//build/config/fuchsia/rules.gni") ++ ++ cr_fuchsia_package("d8_fuchsia_pkg") { ++ testonly = true ++ binary = ":d8" ++ package_name_override = "d8" ++ } ++ ++ fuchsia_package_runner("d8_fuchsia") { ++ testonly = true ++ package = ":d8_fuchsia_pkg" ++ package_name_override = "d8" ++ } ++} ++ ++group("v8_fuzzers") { ++ testonly = true ++ data_deps = [ ++ ":v8_simple_json_fuzzer", ++ ":v8_simple_multi_return_fuzzer", ++ ":v8_simple_parser_fuzzer", ++ ":v8_simple_regexp_builtins_fuzzer", ++ ":v8_simple_regexp_fuzzer", ++ ":v8_simple_wasm_async_fuzzer", ++ ":v8_simple_wasm_code_fuzzer", ++ ":v8_simple_wasm_compile_fuzzer", ++ ":v8_simple_wasm_fuzzer", ++ ] ++} ++ ++if (is_component_build) { ++ v8_component("v8") { ++ sources = [ "src/utils/v8dll-main.cc" ] ++ ++ public_deps = [ ++ ":v8_base", ++ ":v8_snapshot", ++ ] ++ ++ configs = [ ":internal_config" ] ++ ++ public_configs = [ ":external_config" ] ++ } ++ ++ v8_component("v8_for_testing") { ++ testonly = true ++ ++ sources = [ "src/utils/v8dll-main.cc" ] ++ ++ public_deps = [ ++ ":torque_base", ++ ":torque_ls_base", ++ ":v8_base", ++ ":v8_headers", ++ ":v8_initializers", ++ ":v8_snapshot", ++ ] ++ ++ configs = [ ":internal_config" ] ++ ++ public_configs = [ ":external_config" ] ++ } ++ ++ v8_component("cppgc") { ++ public_deps = [ ":cppgc_base" ] ++ ++ configs = [ ":internal_config" ] ++ ++ public_configs = [ ":external_config" ] ++ } ++ ++ v8_component("cppgc_for_testing") { ++ testonly = true ++ ++ public_deps = [ ":cppgc_base" ] ++ ++ configs = [ ":internal_config" ] ++ public_configs = [ ":external_config" ] ++ } ++} else { ++ group("v8") { ++ public_deps = [ ++ ":v8_base", ++ ":v8_snapshot", ++ ] ++ ++ public_configs = [ ":external_config" ] ++ } ++ ++ group("v8_for_testing") { ++ testonly = true ++ ++ public_deps = [ ++ ":torque_base", ++ ":torque_ls_base", ++ ":v8_base", ++ ":v8_initializers", ++ ":v8_snapshot", ++ ] ++ ++ public_configs = [ ":external_config" ] ++ } ++ ++ group("cppgc") { ++ public_deps = [ ":cppgc_base" ] ++ ++ public_configs = [ ":external_config" ] ++ } ++ ++ group("cppgc_for_testing") { ++ testonly = true ++ ++ public_deps = [ ":cppgc_base" ] ++ ++ public_configs = [ ":external_config" ] ++ } ++} ++ ++v8_executable("d8") { ++ sources = [ ++ "src/d8/async-hooks-wrapper.cc", ++ "src/d8/async-hooks-wrapper.h", ++ "src/d8/d8-console.cc", ++ "src/d8/d8-console.h", ++ "src/d8/d8-js.cc", ++ "src/d8/d8-platforms.cc", ++ "src/d8/d8-platforms.h", ++ "src/d8/d8.cc", ++ "src/d8/d8.h", ++ ] ++ ++ configs = [ ++ # Note: don't use :internal_config here because this target will get ++ # the :external_config applied to it by virtue of depending on :v8, and ++ # you can't have both applied to the same target. ++ ":internal_config_base", ++ ":v8_tracing_config", ++ ] ++ ++ deps = [ ++ ":v8", ++ ":v8_libbase", ++ ":v8_libplatform", ++ ":v8_tracing", ++ "//build/win:default_exe_manifest", ++ ] ++ ++ if (is_posix || is_fuchsia) { ++ sources += [ "src/d8/d8-posix.cc" ] ++ } else if (is_win) { ++ sources += [ "src/d8/d8-windows.cc" ] ++ } ++ ++ if (v8_correctness_fuzzer) { ++ deps += [ "tools/clusterfuzz:v8_correctness_fuzzer_resources" ] ++ } ++ ++ defines = [] ++ ++ if (v8_enable_vtunejit) { ++ deps += [ "src/third_party/vtune:v8_vtune" ] ++ } ++} ++ ++v8_executable("v8_hello_world") { ++ sources = [ "samples/hello-world.cc" ] ++ ++ configs = [ ++ # Note: don't use :internal_config here because this target will get ++ # the :external_config applied to it by virtue of depending on :v8, and ++ # you can't have both applied to the same target. ++ ":internal_config_base", ++ ] ++ ++ deps = [ ++ ":v8", ++ ":v8_libbase", ++ ":v8_libplatform", ++ "//build/win:default_exe_manifest", ++ ] ++} ++ ++v8_executable("v8_sample_process") { ++ sources = [ "samples/process.cc" ] ++ ++ configs = [ ++ # Note: don't use :internal_config here because this target will get ++ # the :external_config applied to it by virtue of depending on :v8, and ++ # you can't have both applied to the same target. ++ ":internal_config_base", ++ ] ++ ++ deps = [ ++ ":v8", ++ ":v8_libbase", ++ ":v8_libplatform", ++ "//build/win:default_exe_manifest", ++ ] ++} ++ ++if (want_v8_shell) { ++ v8_executable("v8_shell") { ++ sources = [ "samples/shell.cc" ] ++ ++ configs = [ ++ # Note: don't use :internal_config here because this target will get ++ # the :external_config applied to it by virtue of depending on :v8, and ++ # you can't have both applied to the same target. ++ ":internal_config_base", ++ ] ++ ++ deps = [ ++ ":v8", ++ ":v8_libbase", ++ ":v8_libplatform", ++ "//build/win:default_exe_manifest", ++ ] ++ } ++} ++ ++template("v8_fuzzer") { ++ name = target_name ++ forward_variables_from(invoker, "*") ++ v8_executable("v8_simple_" + name) { ++ deps = [ ++ ":" + name, ++ "//build/win:default_exe_manifest", ++ ] ++ ++ sources = [ "test/fuzzer/fuzzer.cc" ] ++ ++ configs = [ ":external_config" ] ++ } ++} ++ ++v8_source_set("json_fuzzer") { ++ sources = [ "test/fuzzer/json.cc" ] ++ ++ deps = [ ":fuzzer_support" ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("json_fuzzer") { ++} ++ ++v8_source_set("multi_return_fuzzer") { ++ sources = [ "test/fuzzer/multi-return.cc" ] ++ ++ deps = [ ":fuzzer_support" ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("multi_return_fuzzer") { ++} ++ ++v8_source_set("parser_fuzzer") { ++ sources = [ "test/fuzzer/parser.cc" ] ++ ++ deps = [ ":fuzzer_support" ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("parser_fuzzer") { ++} ++ ++v8_source_set("regexp_builtins_fuzzer") { ++ sources = [ ++ "test/fuzzer/regexp-builtins.cc", ++ "test/fuzzer/regexp_builtins/mjsunit.js.h", ++ ] ++ ++ deps = [ ":fuzzer_support" ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("regexp_builtins_fuzzer") { ++} ++ ++v8_source_set("regexp_fuzzer") { ++ sources = [ "test/fuzzer/regexp.cc" ] ++ ++ deps = [ ":fuzzer_support" ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("regexp_fuzzer") { ++} ++ ++v8_source_set("wasm_module_runner") { ++ sources = [ ++ "test/common/wasm/wasm-module-runner.cc", ++ "test/common/wasm/wasm-module-runner.h", ++ ] ++ ++ deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ ":v8_tracing", ++ ] ++ ++ public_deps = [ ":v8_maybe_icu" ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_source_set("wasm_fuzzer") { ++ sources = [ "test/fuzzer/wasm.cc" ] ++ ++ deps = [ ++ ":fuzzer_support", ++ ":lib_wasm_fuzzer_common", ++ ":wasm_module_runner", ++ ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("wasm_fuzzer") { ++} ++ ++v8_source_set("wasm_async_fuzzer") { ++ sources = [ "test/fuzzer/wasm-async.cc" ] ++ ++ deps = [ ++ ":fuzzer_support", ++ ":lib_wasm_fuzzer_common", ++ ":wasm_module_runner", ++ ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("wasm_async_fuzzer") { ++} ++ ++v8_source_set("wasm_code_fuzzer") { ++ sources = [ ++ "test/common/wasm/test-signatures.h", ++ "test/fuzzer/wasm-code.cc", ++ ] ++ ++ deps = [ ++ ":fuzzer_support", ++ ":lib_wasm_fuzzer_common", ++ ":wasm_module_runner", ++ ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("wasm_code_fuzzer") { ++} ++ ++v8_source_set("lib_wasm_fuzzer_common") { ++ sources = [ ++ "test/fuzzer/wasm-fuzzer-common.cc", ++ "test/fuzzer/wasm-fuzzer-common.h", ++ ] ++ ++ deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ ":v8_tracing", ++ ] ++ ++ public_deps = [ ":v8_maybe_icu" ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_source_set("wasm_compile_fuzzer") { ++ sources = [ ++ "test/common/wasm/test-signatures.h", ++ "test/fuzzer/wasm-compile.cc", ++ ] ++ ++ deps = [ ++ ":fuzzer_support", ++ ":lib_wasm_fuzzer_common", ++ ":wasm_module_runner", ++ ] ++ ++ configs = [ ++ ":external_config", ++ ":internal_config_base", ++ ] ++} ++ ++v8_fuzzer("wasm_compile_fuzzer") { ++} ++ ++# Target to build all generated .cc files. ++group("v8_generated_cc_files") { ++ testonly = true ++ ++ deps = [ ++ ":generate_bytecode_builtins_list", ++ ":run_torque", ++ "src/inspector:v8_generated_cc_files", ++ ] ++} ++ ++# Protobuf targets, used only when building outside of chromium. ++ ++if (!build_with_chromium && v8_use_perfetto) { ++ # This config is applied to the autogenerated .pb.{cc,h} files in ++ # proto_library.gni. This config is propagated up to the source sets ++ # that depend on generated proto headers. ++ config("protobuf_gen_config") { ++ defines = [ ++ "GOOGLE_PROTOBUF_NO_RTTI", ++ "GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER", ++ ] ++ cflags = [ ++ "-Wno-unknown-warning-option", ++ "-Wno-deprecated", ++ "-Wno-undef", ++ "-Wno-zero-as-null-pointer-constant", ++ "-Wno-thread-safety-attributes", ++ ] ++ include_dirs = [ "third_party/protobuf/src" ] ++ } ++ ++ # Configuration used to build libprotobuf_* and the protoc compiler. ++ config("protobuf_config") { ++ # Apply the lighter supressions and macro definitions from above. ++ configs = [ ":protobuf_gen_config" ] ++ ++ if (!is_win) { ++ defines = [ "HAVE_PTHREAD=1" ] ++ } ++ if (is_clang) { ++ cflags = [ ++ "-Wno-unused-private-field", ++ "-Wno-unused-function", ++ "-Wno-inconsistent-missing-override", ++ "-Wno-unknown-warning-option", ++ "-Wno-enum-compare-switch", ++ "-Wno-user-defined-warnings", ++ "-Wno-tautological-constant-compare", ++ ] ++ } ++ if (is_win && is_clang) { ++ cflags += [ "-Wno-microsoft-unqualified-friend" ] ++ } ++ } ++ ++ source_set("protobuf_lite") { ++ sources = [ ++ "third_party/protobuf/src/google/protobuf/any_lite.cc", ++ "third_party/protobuf/src/google/protobuf/arena.cc", ++ "third_party/protobuf/src/google/protobuf/extension_set.cc", ++ "third_party/protobuf/src/google/protobuf/generated_message_table_driven_lite.cc", ++ "third_party/protobuf/src/google/protobuf/generated_message_util.cc", ++ "third_party/protobuf/src/google/protobuf/implicit_weak_message.cc", ++ "third_party/protobuf/src/google/protobuf/io/coded_stream.cc", ++ "third_party/protobuf/src/google/protobuf/io/strtod.cc", ++ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream.cc", ++ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.cc", ++ "third_party/protobuf/src/google/protobuf/message_lite.cc", ++ "third_party/protobuf/src/google/protobuf/repeated_field.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/bytestream.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/common.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/int128.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/io_win32.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/status.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/statusor.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/stringpiece.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/stringprintf.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/structurally_valid.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/strutil.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/time.cc", ++ "third_party/protobuf/src/google/protobuf/wire_format_lite.cc", ++ ] ++ configs -= [ "//build/config/compiler:chromium_code" ] ++ configs += [ ++ "//build/config/compiler:no_chromium_code", ++ ":protobuf_config", ++ ] ++ if (is_win) { ++ configs -= [ "//build/config/win:lean_and_mean" ] ++ } ++ public_configs = [ ":protobuf_gen_config" ] ++ } ++ ++ # This target should be used only by the protoc compiler and by test targets. ++ source_set("protobuf_full") { ++ deps = [ ":protobuf_lite" ] ++ sources = [ ++ "third_party/protobuf/src/google/protobuf/any.cc", ++ "third_party/protobuf/src/google/protobuf/any.pb.cc", ++ "third_party/protobuf/src/google/protobuf/api.pb.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/importer.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/parser.cc", ++ "third_party/protobuf/src/google/protobuf/descriptor.cc", ++ "third_party/protobuf/src/google/protobuf/descriptor.pb.cc", ++ "third_party/protobuf/src/google/protobuf/descriptor_database.cc", ++ "third_party/protobuf/src/google/protobuf/duration.pb.cc", ++ "third_party/protobuf/src/google/protobuf/dynamic_message.cc", ++ "third_party/protobuf/src/google/protobuf/empty.pb.cc", ++ "third_party/protobuf/src/google/protobuf/extension_set_heavy.cc", ++ "third_party/protobuf/src/google/protobuf/field_mask.pb.cc", ++ "third_party/protobuf/src/google/protobuf/generated_message_reflection.cc", ++ "third_party/protobuf/src/google/protobuf/generated_message_table_driven.cc", ++ "third_party/protobuf/src/google/protobuf/io/gzip_stream.cc", ++ "third_party/protobuf/src/google/protobuf/io/printer.cc", ++ "third_party/protobuf/src/google/protobuf/io/tokenizer.cc", ++ "third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.cc", ++ "third_party/protobuf/src/google/protobuf/map_field.cc", ++ "third_party/protobuf/src/google/protobuf/message.cc", ++ "third_party/protobuf/src/google/protobuf/reflection_ops.cc", ++ "third_party/protobuf/src/google/protobuf/service.cc", ++ "third_party/protobuf/src/google/protobuf/source_context.pb.cc", ++ "third_party/protobuf/src/google/protobuf/struct.pb.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/mathlimits.cc", ++ "third_party/protobuf/src/google/protobuf/stubs/substitute.cc", ++ "third_party/protobuf/src/google/protobuf/text_format.cc", ++ "third_party/protobuf/src/google/protobuf/timestamp.pb.cc", ++ "third_party/protobuf/src/google/protobuf/type.pb.cc", ++ "third_party/protobuf/src/google/protobuf/unknown_field_set.cc", ++ "third_party/protobuf/src/google/protobuf/util/delimited_message_util.cc", ++ "third_party/protobuf/src/google/protobuf/util/field_comparator.cc", ++ "third_party/protobuf/src/google/protobuf/util/field_mask_util.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/error_listener.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/field_mask_utility.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/json_escaping.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/object_writer.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/proto_writer.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/protostream_objectsource.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/protostream_objectwriter.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/type_info.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/type_info_test_helper.cc", ++ "third_party/protobuf/src/google/protobuf/util/internal/utility.cc", ++ "third_party/protobuf/src/google/protobuf/util/json_util.cc", ++ "third_party/protobuf/src/google/protobuf/util/message_differencer.cc", ++ "third_party/protobuf/src/google/protobuf/util/time_util.cc", ++ "third_party/protobuf/src/google/protobuf/util/type_resolver_util.cc", ++ "third_party/protobuf/src/google/protobuf/wire_format.cc", ++ "third_party/protobuf/src/google/protobuf/wrappers.pb.cc", ++ ] ++ configs -= [ "//build/config/compiler:chromium_code" ] ++ configs += [ ++ "//build/config/compiler:no_chromium_code", ++ ":protobuf_config", ++ ] ++ if (is_win) { ++ configs -= [ "//build/config/win:lean_and_mean" ] ++ } ++ public_configs = [ ":protobuf_gen_config" ] ++ } ++ ++ if (current_toolchain == host_toolchain) { ++ source_set("protoc_lib") { ++ deps = [ ":protobuf_full" ] ++ sources = [ ++ "third_party/protobuf/src/google/protobuf/compiler/code_generator.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum_field.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_extension.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_field.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_map_field.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message_field.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_primitive_field.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_service.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_string_field.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/plugin.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/plugin.pb.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/subprocess.cc", ++ "third_party/protobuf/src/google/protobuf/compiler/zip_writer.cc", ++ ] ++ configs -= [ "//build/config/compiler:chromium_code" ] ++ configs += [ ++ "//build/config/compiler:no_chromium_code", ++ ":protobuf_config", ++ ] ++ if (is_win) { ++ configs -= [ "//build/config/win:lean_and_mean" ] ++ } ++ public_configs = [ ":protobuf_gen_config" ] ++ } ++ ++ executable("protoc") { ++ deps = [ ++ ":protoc_lib", ++ "//build/win:default_exe_manifest", ++ ] ++ sources = [ "src/protobuf/protobuf-compiler-main.cc" ] ++ configs -= [ "//build/config/compiler:chromium_code" ] ++ configs += [ "//build/config/compiler:no_chromium_code" ] ++ } ++ } # host_toolchain ++ ++ v8_component("v8_libperfetto") { ++ configs = [ ":v8_tracing_config" ] ++ public_configs = [ "//third_party/perfetto/gn:public_config" ] ++ deps = [ ++ "//third_party/perfetto/src/trace_processor:export_json", ++ "//third_party/perfetto/src/trace_processor:storage_minimal", ++ "//third_party/perfetto/src/tracing:client_api", ++ "//third_party/perfetto/src/tracing/core", ++ ++ # TODO(skyostil): Support non-POSIX platforms. ++ "//third_party/perfetto/protos/perfetto/config:cpp", ++ "//third_party/perfetto/protos/perfetto/trace/track_event:zero", ++ "//third_party/perfetto/src/tracing:in_process_backend", ++ "//third_party/perfetto/src/tracing:platform_posix", ++ ] ++ } ++} # if (!build_with_chromium && v8_use_perfetto) +diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni +index b5fb1823..5b8e6f77 100644 +--- a/deps/v8/gni/snapshot_toolchain.gni ++++ b/deps/v8/gni/snapshot_toolchain.gni +@@ -79,7 +79,8 @@ if (v8_snapshot_toolchain == "") { + + if (v8_current_cpu == "x64" || v8_current_cpu == "x86") { + _cpus = v8_current_cpu +- } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") { ++ } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" || ++ v8_current_cpu == "loong64") { + if (is_win && v8_current_cpu == "arm64") { + # set _cpus to blank for Windows ARM64 so host_toolchain could be + # selected as snapshot toolchain later. +diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h +index 327f9ab3..d895868c 100644 +--- a/deps/v8/src/base/build_config.h ++++ b/deps/v8/src/base/build_config.h +@@ -33,6 +33,9 @@ + #elif defined(__MIPSEB__) || defined(__MIPSEL__) + #define V8_HOST_ARCH_MIPS 1 + #define V8_HOST_ARCH_32_BIT 1 ++#elif defined(__loongarch64) ++#define V8_HOST_ARCH_LOONG64 1 ++#define V8_HOST_ARCH_64_BIT 1 + #elif defined(__PPC64__) || defined(_ARCH_PPC64) + #define V8_HOST_ARCH_PPC64 1 + #define V8_HOST_ARCH_64_BIT 1 +@@ -77,7 +80,8 @@ + // environment as presented by the compiler. + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ + !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ +- !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 ++ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ ++ !V8_TARGET_ARCH_LOONG64 + #if defined(_M_X64) || defined(__x86_64__) + #define V8_TARGET_ARCH_X64 1 + #elif defined(_M_IX86) || defined(__i386__) +@@ -118,6 +122,8 @@ + #define V8_TARGET_ARCH_32_BIT 1 + #elif V8_TARGET_ARCH_MIPS64 + #define V8_TARGET_ARCH_64_BIT 1 ++#elif V8_TARGET_ARCH_LOONG64 ++#define V8_TARGET_ARCH_64_BIT 1 + #elif V8_TARGET_ARCH_PPC + #define V8_TARGET_ARCH_32_BIT 1 + #elif V8_TARGET_ARCH_PPC64 +@@ -156,6 +162,9 @@ + #if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) + #error Target architecture mips64 is only supported on mips64 and x64 host + #endif ++#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64)) ++#error Target architecture loong64 is only supported on loong64 and x64 host ++#endif + + // Determine architecture endianness. + #if V8_TARGET_ARCH_IA32 +@@ -166,6 +175,8 @@ + #define V8_TARGET_LITTLE_ENDIAN 1 + #elif V8_TARGET_ARCH_ARM64 + #define V8_TARGET_LITTLE_ENDIAN 1 ++#elif V8_TARGET_ARCH_LOONG64 ++#define V8_TARGET_LITTLE_ENDIAN 1 + #elif V8_TARGET_ARCH_MIPS + #if defined(__MIPSEB__) + #define V8_TARGET_BIG_ENDIAN 1 +diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc +index e5aa4de1..c6bdfcfb 100644 +--- a/deps/v8/src/base/platform/platform-posix.cc ++++ b/deps/v8/src/base/platform/platform-posix.cc +@@ -303,6 +303,10 @@ void* OS::GetRandomMmapAddr() { + // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance + // to fulfill request. + raw_addr &= uint64_t{0xFFFFFF0000}; ++#elif V8_TARGET_ARCH_LOONG64 ++ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance ++ // to fulfill request. ++ raw_addr &= uint64_t{0xFFFFFF0000}; + #else + raw_addr &= 0x3FFFF000; + +@@ -486,6 +490,8 @@ void OS::DebugBreak() { + asm("break"); + #elif V8_HOST_ARCH_MIPS64 + asm("break"); ++#elif V8_HOST_ARCH_LOONG64 ++ asm("break 0"); + #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 + asm("twge 2,2"); + #elif V8_HOST_ARCH_IA32 +diff --git a/deps/v8/src/base/platform/platform-posix.cc.orig b/deps/v8/src/base/platform/platform-posix.cc.orig +new file mode 100644 +index 00000000..e5aa4de1 +--- /dev/null ++++ b/deps/v8/src/base/platform/platform-posix.cc.orig +@@ -0,0 +1,1027 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Platform-specific code for POSIX goes here. This is not a platform on its ++// own, but contains the parts which are the same across the POSIX platforms ++// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX. ++ ++#include ++#include ++#include ++#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) ++#include // for pthread_set_name_np ++#endif ++#include // for sched_yield ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ ++ defined(__NetBSD__) || defined(__OpenBSD__) ++#include // NOLINT, for sysctl ++#endif ++ ++#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) ++#define LOG_TAG "v8" ++#include // NOLINT ++#endif ++ ++#include ++#include ++ ++#include "src/base/platform/platform-posix.h" ++ ++#include "src/base/lazy-instance.h" ++#include "src/base/macros.h" ++#include "src/base/platform/platform.h" ++#include "src/base/platform/time.h" ++#include "src/base/utils/random-number-generator.h" ++ ++#ifdef V8_FAST_TLS_SUPPORTED ++#include ++#endif ++ ++#if V8_OS_MACOSX ++#include ++#include ++#endif ++ ++#if V8_OS_LINUX ++#include // NOLINT, for prctl ++#endif ++ ++#if defined(V8_OS_FUCHSIA) ++#include ++#else ++#include ++#endif ++ ++#if !defined(_AIX) && !defined(V8_OS_FUCHSIA) ++#include ++#endif ++ ++#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS ++#define MAP_ANONYMOUS MAP_ANON ++#endif ++ ++#if defined(V8_OS_SOLARIS) ++#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__) ++extern "C" int madvise(caddr_t, size_t, int); ++#else ++extern int madvise(caddr_t, size_t, int); ++#endif ++#endif ++ ++#ifndef MADV_FREE ++#define MADV_FREE MADV_DONTNEED ++#endif ++ ++#if defined(V8_LIBC_GLIBC) ++extern "C" void* __libc_stack_end; // NOLINT ++#endif ++ ++namespace v8 { ++namespace base { ++ ++namespace { ++ ++// 0 is never a valid thread id. ++const pthread_t kNoThread = static_cast(0); ++ ++bool g_hard_abort = false; ++ ++const char* g_gc_fake_mmap = nullptr; ++ ++DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator, ++ GetPlatformRandomNumberGenerator) ++static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER; ++ ++#if !V8_OS_FUCHSIA ++#if V8_OS_MACOSX ++// kMmapFd is used to pass vm_alloc flags to tag the region with the user ++// defined tag 255 This helps identify V8-allocated regions in memory analysis ++// tools like vmmap(1). ++const int kMmapFd = VM_MAKE_TAG(255); ++#else // !V8_OS_MACOSX ++const int kMmapFd = -1; ++#endif // !V8_OS_MACOSX ++ ++const int kMmapFdOffset = 0; ++ ++// TODO(v8:10026): Add the right permission flag to make executable pages ++// guarded. ++int GetProtectionFromMemoryPermission(OS::MemoryPermission access) { ++ switch (access) { ++ case OS::MemoryPermission::kNoAccess: ++ case OS::MemoryPermission::kNoAccessWillJitLater: ++ return PROT_NONE; ++ case OS::MemoryPermission::kRead: ++ return PROT_READ; ++ case OS::MemoryPermission::kReadWrite: ++ return PROT_READ | PROT_WRITE; ++ case OS::MemoryPermission::kReadWriteExecute: ++ return PROT_READ | PROT_WRITE | PROT_EXEC; ++ case OS::MemoryPermission::kReadExecute: ++ return PROT_READ | PROT_EXEC; ++ } ++ UNREACHABLE(); ++} ++ ++int GetFlagsForMemoryPermission(OS::MemoryPermission access) { ++ int flags = MAP_PRIVATE | MAP_ANONYMOUS; ++ if (access == OS::MemoryPermission::kNoAccess) { ++#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX ++ flags |= MAP_NORESERVE; ++#endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX ++#if V8_OS_QNX ++ flags |= MAP_LAZY; ++#endif // V8_OS_QNX ++ } ++#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT) ++ if (access == OS::MemoryPermission::kNoAccessWillJitLater) { ++ flags |= MAP_JIT; ++ } ++#endif ++ return flags; ++} ++ ++void* Allocate(void* hint, size_t size, OS::MemoryPermission access) { ++ int prot = GetProtectionFromMemoryPermission(access); ++ int flags = GetFlagsForMemoryPermission(access); ++ void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset); ++ if (result == MAP_FAILED) return nullptr; ++ return result; ++} ++ ++#endif // !V8_OS_FUCHSIA ++ ++} // namespace ++ ++#if V8_OS_LINUX || V8_OS_FREEBSD ++#ifdef __arm__ ++ ++bool OS::ArmUsingHardFloat() { ++ // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify ++ // the Floating Point ABI used (PCS stands for Procedure Call Standard). ++ // We use these as well as a couple of other defines to statically determine ++ // what FP ABI used. ++ // GCC versions 4.4 and below don't support hard-fp. ++ // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or ++ // __ARM_PCS_VFP. ++ ++#define GCC_VERSION \ ++ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) ++#if GCC_VERSION >= 40600 && !defined(__clang__) ++#if defined(__ARM_PCS_VFP) ++ return true; ++#else ++ return false; ++#endif ++ ++#elif GCC_VERSION < 40500 && !defined(__clang__) ++ return false; ++ ++#else ++#if defined(__ARM_PCS_VFP) ++ return true; ++#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \ ++ !defined(__VFP_FP__) ++ return false; ++#else ++#error \ ++ "Your version of compiler does not report the FP ABI compiled for." \ ++ "Please report it on this issue" \ ++ "http://code.google.com/p/v8/issues/detail?id=2140" ++ ++#endif ++#endif ++#undef GCC_VERSION ++} ++ ++#endif // def __arm__ ++#endif ++ ++void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { ++ g_hard_abort = hard_abort; ++ g_gc_fake_mmap = gc_fake_mmap; ++} ++ ++int OS::ActivationFrameAlignment() { ++#if V8_TARGET_ARCH_ARM ++ // On EABI ARM targets this is required for fp correctness in the ++ // runtime system. ++ return 8; ++#elif V8_TARGET_ARCH_MIPS ++ return 8; ++#elif V8_TARGET_ARCH_S390 ++ return 8; ++#else ++ // Otherwise we just assume 16 byte alignment, i.e.: ++ // - With gcc 4.4 the tree vectorization optimizer can generate code ++ // that requires 16 byte alignment such as movdqa on x86. ++ // - Mac OS X, PPC and Solaris (64-bit) activation frames must ++ // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide" ++ return 16; ++#endif ++} ++ ++// static ++size_t OS::AllocatePageSize() { ++ return static_cast(sysconf(_SC_PAGESIZE)); ++} ++ ++// static ++size_t OS::CommitPageSize() { ++ static size_t page_size = getpagesize(); ++ return page_size; ++} ++ ++// static ++void OS::SetRandomMmapSeed(int64_t seed) { ++ if (seed) { ++ MutexGuard guard(rng_mutex.Pointer()); ++ GetPlatformRandomNumberGenerator()->SetSeed(seed); ++ } ++} ++ ++// static ++void* OS::GetRandomMmapAddr() { ++ uintptr_t raw_addr; ++ { ++ MutexGuard guard(rng_mutex.Pointer()); ++ GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr)); ++ } ++#if defined(__APPLE__) ++#if V8_TARGET_ARCH_ARM64 ++ DCHECK_EQ(1 << 14, AllocatePageSize()); ++ raw_addr = RoundDown(raw_addr, 1 << 14); ++#endif ++#endif ++#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ ++ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) ++ // If random hint addresses interfere with address ranges hard coded in ++ // sanitizers, bad things happen. This address range is copied from TSAN ++ // source but works with all tools. ++ // See crbug.com/539863. ++ raw_addr &= 0x007fffff0000ULL; ++ raw_addr += 0x7e8000000000ULL; ++#else ++#if V8_TARGET_ARCH_X64 ++ // Currently available CPUs have 48 bits of virtual addressing. Truncate ++ // the hint address to 46 bits to give the kernel a fighting chance of ++ // fulfilling our placement request. ++ raw_addr &= uint64_t{0x3FFFFFFFF000}; ++#elif V8_TARGET_ARCH_PPC64 ++#if V8_OS_AIX ++ // AIX: 64 bits of virtual addressing, but we limit address range to: ++ // a) minimize Segment Lookaside Buffer (SLB) misses and ++ raw_addr &= uint64_t{0x3FFFF000}; ++ // Use extra address space to isolate the mmap regions. ++ raw_addr += uint64_t{0x400000000000}; ++#elif V8_TARGET_BIG_ENDIAN ++ // Big-endian Linux: 42 bits of virtual addressing. ++ raw_addr &= uint64_t{0x03FFFFFFF000}; ++#else ++ // Little-endian Linux: 46 bits of virtual addressing. ++ raw_addr &= uint64_t{0x3FFFFFFF0000}; ++#endif ++#elif V8_TARGET_ARCH_S390X ++ // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits ++ // of virtual addressing. Truncate to 40 bits to allow kernel chance to ++ // fulfill request. ++ raw_addr &= uint64_t{0xFFFFFFF000}; ++#elif V8_TARGET_ARCH_S390 ++ // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance ++ // to fulfill request. ++ raw_addr &= 0x1FFFF000; ++#elif V8_TARGET_ARCH_MIPS64 ++ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance ++ // to fulfill request. ++ raw_addr &= uint64_t{0xFFFFFF0000}; ++#else ++ raw_addr &= 0x3FFFF000; ++ ++#ifdef __sun ++ // For our Solaris/illumos mmap hint, we pick a random address in the bottom ++ // half of the top half of the address space (that is, the third quarter). ++ // Because we do not MAP_FIXED, this will be treated only as a hint -- the ++ // system will not fail to mmap() because something else happens to already ++ // be mapped at our random address. We deliberately set the hint high enough ++ // to get well above the system's break (that is, the heap); Solaris and ++ // illumos will try the hint and if that fails allocate as if there were ++ // no hint at all. The high hint prevents the break from getting hemmed in ++ // at low values, ceding half of the address space to the system heap. ++ raw_addr += 0x80000000; ++#elif V8_OS_AIX ++ // The range 0x30000000 - 0xD0000000 is available on AIX; ++ // choose the upper range. ++ raw_addr += 0x90000000; ++#else ++ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a ++ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos ++ // 10.6 and 10.7. ++ raw_addr += 0x20000000; ++#endif ++#endif ++#endif ++ return reinterpret_cast(raw_addr); ++} ++ ++// TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files. ++#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA ++// static ++void* OS::Allocate(void* hint, size_t size, size_t alignment, ++ MemoryPermission access) { ++ size_t page_size = AllocatePageSize(); ++ DCHECK_EQ(0, size % page_size); ++ DCHECK_EQ(0, alignment % page_size); ++ hint = AlignedAddress(hint, alignment); ++ // Add the maximum misalignment so we are guaranteed an aligned base address. ++ size_t request_size = size + (alignment - page_size); ++ request_size = RoundUp(request_size, OS::AllocatePageSize()); ++ void* result = base::Allocate(hint, request_size, access); ++ if (result == nullptr) return nullptr; ++ ++ // Unmap memory allocated before the aligned base address. ++ uint8_t* base = static_cast(result); ++ uint8_t* aligned_base = reinterpret_cast( ++ RoundUp(reinterpret_cast(base), alignment)); ++ if (aligned_base != base) { ++ DCHECK_LT(base, aligned_base); ++ size_t prefix_size = static_cast(aligned_base - base); ++ CHECK(Free(base, prefix_size)); ++ request_size -= prefix_size; ++ } ++ // Unmap memory allocated after the potentially unaligned end. ++ if (size != request_size) { ++ DCHECK_LT(size, request_size); ++ size_t suffix_size = request_size - size; ++ CHECK(Free(aligned_base + size, suffix_size)); ++ request_size -= suffix_size; ++ } ++ ++ DCHECK_EQ(size, request_size); ++ return static_cast(aligned_base); ++} ++ ++// static ++bool OS::Free(void* address, const size_t size) { ++ DCHECK_EQ(0, reinterpret_cast(address) % AllocatePageSize()); ++ DCHECK_EQ(0, size % AllocatePageSize()); ++ return munmap(address, size) == 0; ++} ++ ++// static ++bool OS::Release(void* address, size_t size) { ++ DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); ++ DCHECK_EQ(0, size % CommitPageSize()); ++ return munmap(address, size) == 0; ++} ++ ++// static ++bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { ++ DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); ++ DCHECK_EQ(0, size % CommitPageSize()); ++ ++ int prot = GetProtectionFromMemoryPermission(access); ++ int ret = mprotect(address, size, prot); ++ ++ // MacOS 11.2 on Apple Silicon refuses to switch permissions from ++ // rwx to none. Just use madvise instead. ++#if defined(V8_OS_MACOSX) ++ if (ret != 0 && access == OS::MemoryPermission::kNoAccess) { ++ ret = madvise(address, size, MADV_FREE_REUSABLE); ++ return ret == 0; ++ } ++#endif ++ ++ if (ret == 0 && access == OS::MemoryPermission::kNoAccess) { ++ // This is advisory; ignore errors and continue execution. ++ USE(DiscardSystemPages(address, size)); ++ } ++ ++// For accounting purposes, we want to call MADV_FREE_REUSE on macOS after ++// changing permissions away from OS::MemoryPermission::kNoAccess. Since this ++// state is not kept at this layer, we always call this if access != kNoAccess. ++// The cost is a syscall that effectively no-ops. ++// TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary. ++// https://crbug.com/823915 ++#if defined(OS_MACOSX) ++ if (access != OS::MemoryPermission::kNoAccess) ++ madvise(address, size, MADV_FREE_REUSE); ++#endif ++ ++ return ret == 0; ++} ++ ++bool OS::DiscardSystemPages(void* address, size_t size) { ++ DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); ++ DCHECK_EQ(0, size % CommitPageSize()); ++#if defined(OS_MACOSX) ++ // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also ++ // marks the pages with the reusable bit, which allows both Activity Monitor ++ // and memory-infra to correctly track the pages. ++ int ret = madvise(address, size, MADV_FREE_REUSABLE); ++#elif defined(_AIX) || defined(V8_OS_SOLARIS) ++ int ret = madvise(reinterpret_cast(address), size, MADV_FREE); ++#else ++ int ret = madvise(address, size, MADV_FREE); ++#endif ++ if (ret != 0 && errno == ENOSYS) ++ return true; // madvise is not available on all systems. ++ if (ret != 0 && errno == EINVAL) { ++// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older ++// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't ++// imply runtime support. ++#if defined(_AIX) || defined(V8_OS_SOLARIS) ++ ret = madvise(reinterpret_cast(address), size, MADV_DONTNEED); ++#else ++ ret = madvise(address, size, MADV_DONTNEED); ++#endif ++ } ++ return ret == 0; ++} ++ ++// static ++bool OS::HasLazyCommits() { ++#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX ++ return true; ++#else ++ // TODO(bbudge) Return true for all POSIX platforms. ++ return false; ++#endif ++} ++#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA ++ ++const char* OS::GetGCFakeMMapFile() { ++ return g_gc_fake_mmap; ++} ++ ++ ++void OS::Sleep(TimeDelta interval) { ++ usleep(static_cast(interval.InMicroseconds())); ++} ++ ++ ++void OS::Abort() { ++ if (g_hard_abort) { ++ V8_IMMEDIATE_CRASH(); ++ } ++ // Redirect to std abort to signal abnormal program termination. ++ abort(); ++} ++ ++ ++void OS::DebugBreak() { ++#if V8_HOST_ARCH_ARM ++ asm("bkpt 0"); ++#elif V8_HOST_ARCH_ARM64 ++ asm("brk 0"); ++#elif V8_HOST_ARCH_MIPS ++ asm("break"); ++#elif V8_HOST_ARCH_MIPS64 ++ asm("break"); ++#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 ++ asm("twge 2,2"); ++#elif V8_HOST_ARCH_IA32 ++ asm("int $3"); ++#elif V8_HOST_ARCH_X64 ++ asm("int $3"); ++#elif V8_HOST_ARCH_S390 ++ // Software breakpoint instruction is 0x0001 ++ asm volatile(".word 0x0001"); ++#else ++#error Unsupported host architecture. ++#endif ++} ++ ++ ++class PosixMemoryMappedFile final : public OS::MemoryMappedFile { ++ public: ++ PosixMemoryMappedFile(FILE* file, void* memory, size_t size) ++ : file_(file), memory_(memory), size_(size) {} ++ ~PosixMemoryMappedFile() final; ++ void* memory() const final { return memory_; } ++ size_t size() const final { return size_; } ++ ++ private: ++ FILE* const file_; ++ void* const memory_; ++ size_t const size_; ++}; ++ ++ ++// static ++OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, ++ FileMode mode) { ++ const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+"; ++ if (FILE* file = fopen(name, fopen_mode)) { ++ if (fseek(file, 0, SEEK_END) == 0) { ++ long size = ftell(file); // NOLINT(runtime/int) ++ if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0); ++ if (size > 0) { ++ int prot = PROT_READ; ++ int flags = MAP_PRIVATE; ++ if (mode == FileMode::kReadWrite) { ++ prot |= PROT_WRITE; ++ flags = MAP_SHARED; ++ } ++ void* const memory = ++ mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0); ++ if (memory != MAP_FAILED) { ++ return new PosixMemoryMappedFile(file, memory, size); ++ } ++ } ++ } ++ fclose(file); ++ } ++ return nullptr; ++} ++ ++// static ++OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, ++ size_t size, void* initial) { ++ if (FILE* file = fopen(name, "w+")) { ++ if (size == 0) return new PosixMemoryMappedFile(file, 0, 0); ++ size_t result = fwrite(initial, 1, size, file); ++ if (result == size && !ferror(file)) { ++ void* memory = mmap(OS::GetRandomMmapAddr(), result, ++ PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); ++ if (memory != MAP_FAILED) { ++ return new PosixMemoryMappedFile(file, memory, result); ++ } ++ } ++ fclose(file); ++ } ++ return nullptr; ++} ++ ++ ++PosixMemoryMappedFile::~PosixMemoryMappedFile() { ++ if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize()))); ++ fclose(file_); ++} ++ ++ ++int OS::GetCurrentProcessId() { ++ return static_cast(getpid()); ++} ++ ++ ++int OS::GetCurrentThreadId() { ++#if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__)) ++ return static_cast(pthread_mach_thread_np(pthread_self())); ++#elif V8_OS_LINUX ++ return static_cast(syscall(__NR_gettid)); ++#elif V8_OS_ANDROID ++ return static_cast(gettid()); ++#elif V8_OS_AIX ++ return static_cast(thread_self()); ++#elif V8_OS_FUCHSIA ++ return static_cast(zx_thread_self()); ++#elif V8_OS_SOLARIS ++ return static_cast(pthread_self()); ++#else ++ return static_cast(reinterpret_cast(pthread_self())); ++#endif ++} ++ ++void OS::ExitProcess(int exit_code) { ++ // Use _exit instead of exit to avoid races between isolate ++ // threads and static destructors. ++ fflush(stdout); ++ fflush(stderr); ++ _exit(exit_code); ++} ++ ++// ---------------------------------------------------------------------------- ++// POSIX date/time support. ++// ++ ++#if !defined(V8_OS_FUCHSIA) ++int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { ++ struct rusage usage; ++ ++ if (getrusage(RUSAGE_SELF, &usage) < 0) return -1; ++ *secs = static_cast(usage.ru_utime.tv_sec); ++ *usecs = static_cast(usage.ru_utime.tv_usec); ++ return 0; ++} ++#endif ++ ++double OS::TimeCurrentMillis() { ++ return Time::Now().ToJsTime(); ++} ++ ++double PosixTimezoneCache::DaylightSavingsOffset(double time) { ++ if (std::isnan(time)) return std::numeric_limits::quiet_NaN(); ++ time_t tv = static_cast(std::floor(time/msPerSecond)); ++ struct tm tm; ++ struct tm* t = localtime_r(&tv, &tm); ++ if (nullptr == t) return std::numeric_limits::quiet_NaN(); ++ return t->tm_isdst > 0 ? 3600 * msPerSecond : 0; ++} ++ ++ ++int OS::GetLastError() { ++ return errno; ++} ++ ++ ++// ---------------------------------------------------------------------------- ++// POSIX stdio support. ++// ++ ++FILE* OS::FOpen(const char* path, const char* mode) { ++ FILE* file = fopen(path, mode); ++ if (file == nullptr) return nullptr; ++ struct stat file_stat; ++ if (fstat(fileno(file), &file_stat) != 0) { ++ fclose(file); ++ return nullptr; ++ } ++ bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0); ++ if (is_regular_file) return file; ++ fclose(file); ++ return nullptr; ++} ++ ++ ++bool OS::Remove(const char* path) { ++ return (remove(path) == 0); ++} ++ ++char OS::DirectorySeparator() { return '/'; } ++ ++bool OS::isDirectorySeparator(const char ch) { ++ return ch == DirectorySeparator(); ++} ++ ++ ++FILE* OS::OpenTemporaryFile() { ++ return tmpfile(); ++} ++ ++ ++const char* const OS::LogFileOpenMode = "w"; ++ ++ ++void OS::Print(const char* format, ...) { ++ va_list args; ++ va_start(args, format); ++ VPrint(format, args); ++ va_end(args); ++} ++ ++ ++void OS::VPrint(const char* format, va_list args) { ++#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) ++ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); ++#else ++ vprintf(format, args); ++#endif ++} ++ ++ ++void OS::FPrint(FILE* out, const char* format, ...) { ++ va_list args; ++ va_start(args, format); ++ VFPrint(out, format, args); ++ va_end(args); ++} ++ ++ ++void OS::VFPrint(FILE* out, const char* format, va_list args) { ++#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) ++ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); ++#else ++ vfprintf(out, format, args); ++#endif ++} ++ ++ ++void OS::PrintError(const char* format, ...) { ++ va_list args; ++ va_start(args, format); ++ VPrintError(format, args); ++ va_end(args); ++} ++ ++ ++void OS::VPrintError(const char* format, va_list args) { ++#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) ++ __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args); ++#else ++ vfprintf(stderr, format, args); ++#endif ++} ++ ++ ++int OS::SNPrintF(char* str, int length, const char* format, ...) { ++ va_list args; ++ va_start(args, format); ++ int result = VSNPrintF(str, length, format, args); ++ va_end(args); ++ return result; ++} ++ ++ ++int OS::VSNPrintF(char* str, ++ int length, ++ const char* format, ++ va_list args) { ++ int n = vsnprintf(str, length, format, args); ++ if (n < 0 || n >= length) { ++ // If the length is zero, the assignment fails. ++ if (length > 0) ++ str[length - 1] = '\0'; ++ return -1; ++ } else { ++ return n; ++ } ++} ++ ++ ++// ---------------------------------------------------------------------------- ++// POSIX string support. ++// ++ ++void OS::StrNCpy(char* dest, int length, const char* src, size_t n) { ++ strncpy(dest, src, n); ++} ++ ++ ++// ---------------------------------------------------------------------------- ++// POSIX thread support. ++// ++ ++class Thread::PlatformData { ++ public: ++ PlatformData() : thread_(kNoThread) {} ++ pthread_t thread_; // Thread handle for pthread. ++ // Synchronizes thread creation ++ Mutex thread_creation_mutex_; ++}; ++ ++Thread::Thread(const Options& options) ++ : data_(new PlatformData), ++ stack_size_(options.stack_size()), ++ start_semaphore_(nullptr) { ++ if (stack_size_ > 0 && static_cast(stack_size_) < PTHREAD_STACK_MIN) { ++ stack_size_ = PTHREAD_STACK_MIN; ++ } ++ set_name(options.name()); ++} ++ ++ ++Thread::~Thread() { ++ delete data_; ++} ++ ++ ++static void SetThreadName(const char* name) { ++#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD ++ pthread_set_name_np(pthread_self(), name); ++#elif V8_OS_NETBSD ++ STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP); ++ pthread_setname_np(pthread_self(), "%s", name); ++#elif V8_OS_MACOSX ++ // pthread_setname_np is only available in 10.6 or later, so test ++ // for it at runtime. ++ int (*dynamic_pthread_setname_np)(const char*); ++ *reinterpret_cast(&dynamic_pthread_setname_np) = ++ dlsym(RTLD_DEFAULT, "pthread_setname_np"); ++ if (dynamic_pthread_setname_np == nullptr) return; ++ ++ // Mac OS X does not expose the length limit of the name, so hardcode it. ++ static const int kMaxNameLength = 63; ++ STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength); ++ dynamic_pthread_setname_np(name); ++#elif defined(PR_SET_NAME) ++ prctl(PR_SET_NAME, ++ reinterpret_cast(name), // NOLINT ++ 0, 0, 0); ++#endif ++} ++ ++ ++static void* ThreadEntry(void* arg) { ++ Thread* thread = reinterpret_cast(arg); ++ // We take the lock here to make sure that pthread_create finished first since ++ // we don't know which thread will run first (the original thread or the new ++ // one). ++ { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); } ++ SetThreadName(thread->name()); ++ DCHECK_NE(thread->data()->thread_, kNoThread); ++ thread->NotifyStartedAndRun(); ++ return nullptr; ++} ++ ++ ++void Thread::set_name(const char* name) { ++ strncpy(name_, name, sizeof(name_) - 1); ++ name_[sizeof(name_) - 1] = '\0'; ++} ++ ++bool Thread::Start() { ++ int result; ++ pthread_attr_t attr; ++ memset(&attr, 0, sizeof(attr)); ++ result = pthread_attr_init(&attr); ++ if (result != 0) return false; ++ size_t stack_size = stack_size_; ++ if (stack_size == 0) { ++#if V8_OS_MACOSX ++ // Default on Mac OS X is 512kB -- bump up to 1MB ++ stack_size = 1 * 1024 * 1024; ++#elif V8_OS_AIX ++ // Default on AIX is 96kB -- bump up to 2MB ++ stack_size = 2 * 1024 * 1024; ++#endif ++ } ++ if (stack_size > 0) { ++ result = pthread_attr_setstacksize(&attr, stack_size); ++ if (result != 0) return pthread_attr_destroy(&attr), false; ++ } ++ { ++ MutexGuard lock_guard(&data_->thread_creation_mutex_); ++ result = pthread_create(&data_->thread_, &attr, ThreadEntry, this); ++ if (result != 0 || data_->thread_ == kNoThread) { ++ return pthread_attr_destroy(&attr), false; ++ } ++ } ++ result = pthread_attr_destroy(&attr); ++ return result == 0; ++} ++ ++void Thread::Join() { pthread_join(data_->thread_, nullptr); } ++ ++static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) { ++#if V8_OS_CYGWIN ++ // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps ++ // because pthread_key_t is a pointer type on Cygwin. This will probably not ++ // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway. ++ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); ++ intptr_t ptr_key = reinterpret_cast(pthread_key); ++ return static_cast(ptr_key); ++#else ++ return static_cast(pthread_key); ++#endif ++} ++ ++ ++static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) { ++#if V8_OS_CYGWIN ++ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); ++ intptr_t ptr_key = static_cast(local_key); ++ return reinterpret_cast(ptr_key); ++#else ++ return static_cast(local_key); ++#endif ++} ++ ++ ++#ifdef V8_FAST_TLS_SUPPORTED ++ ++static std::atomic tls_base_offset_initialized{false}; ++intptr_t kMacTlsBaseOffset = 0; ++ ++// It's safe to do the initialization more that once, but it has to be ++// done at least once. ++static void InitializeTlsBaseOffset() { ++ const size_t kBufferSize = 128; ++ char buffer[kBufferSize]; ++ size_t buffer_size = kBufferSize; ++ int ctl_name[] = { CTL_KERN , KERN_OSRELEASE }; ++ if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) { ++ FATAL("V8 failed to get kernel version"); ++ } ++ // The buffer now contains a string of the form XX.YY.ZZ, where ++ // XX is the major kernel version component. ++ // Make sure the buffer is 0-terminated. ++ buffer[kBufferSize - 1] = '\0'; ++ char* period_pos = strchr(buffer, '.'); ++ *period_pos = '\0'; ++ int kernel_version_major = ++ static_cast(strtol(buffer, nullptr, 10)); // NOLINT ++ // The constants below are taken from pthreads.s from the XNU kernel ++ // sources archive at www.opensource.apple.com. ++ if (kernel_version_major < 11) { ++ // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the ++ // same offsets. ++#if V8_HOST_ARCH_IA32 ++ kMacTlsBaseOffset = 0x48; ++#else ++ kMacTlsBaseOffset = 0x60; ++#endif ++ } else { ++ // 11.x.x (Lion) changed the offset. ++ kMacTlsBaseOffset = 0; ++ } ++ ++ tls_base_offset_initialized.store(true, std::memory_order_release); ++} ++ ++ ++static void CheckFastTls(Thread::LocalStorageKey key) { ++ void* expected = reinterpret_cast(0x1234CAFE); ++ Thread::SetThreadLocal(key, expected); ++ void* actual = Thread::GetExistingThreadLocal(key); ++ if (expected != actual) { ++ FATAL("V8 failed to initialize fast TLS on current kernel"); ++ } ++ Thread::SetThreadLocal(key, nullptr); ++} ++ ++#endif // V8_FAST_TLS_SUPPORTED ++ ++ ++Thread::LocalStorageKey Thread::CreateThreadLocalKey() { ++#ifdef V8_FAST_TLS_SUPPORTED ++ bool check_fast_tls = false; ++ if (!tls_base_offset_initialized.load(std::memory_order_acquire)) { ++ check_fast_tls = true; ++ InitializeTlsBaseOffset(); ++ } ++#endif ++ pthread_key_t key; ++ int result = pthread_key_create(&key, nullptr); ++ DCHECK_EQ(0, result); ++ USE(result); ++ LocalStorageKey local_key = PthreadKeyToLocalKey(key); ++#ifdef V8_FAST_TLS_SUPPORTED ++ // If we just initialized fast TLS support, make sure it works. ++ if (check_fast_tls) CheckFastTls(local_key); ++#endif ++ return local_key; ++} ++ ++ ++void Thread::DeleteThreadLocalKey(LocalStorageKey key) { ++ pthread_key_t pthread_key = LocalKeyToPthreadKey(key); ++ int result = pthread_key_delete(pthread_key); ++ DCHECK_EQ(0, result); ++ USE(result); ++} ++ ++ ++void* Thread::GetThreadLocal(LocalStorageKey key) { ++ pthread_key_t pthread_key = LocalKeyToPthreadKey(key); ++ return pthread_getspecific(pthread_key); ++} ++ ++ ++void Thread::SetThreadLocal(LocalStorageKey key, void* value) { ++ pthread_key_t pthread_key = LocalKeyToPthreadKey(key); ++ int result = pthread_setspecific(pthread_key, value); ++ DCHECK_EQ(0, result); ++ USE(result); ++} ++ ++// pthread_getattr_np used below is non portable (hence the _np suffix). We ++// keep this version in POSIX as most Linux-compatible derivatives will ++// support it. MacOS and FreeBSD are different here. ++#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX) && \ ++ !defined(V8_OS_SOLARIS) ++ ++// static ++void* Stack::GetStackStart() { ++ pthread_attr_t attr; ++ int error = pthread_getattr_np(pthread_self(), &attr); ++ if (!error) { ++ void* base; ++ size_t size; ++ error = pthread_attr_getstack(&attr, &base, &size); ++ CHECK(!error); ++ pthread_attr_destroy(&attr); ++ return reinterpret_cast(base) + size; ++ } ++ pthread_attr_destroy(&attr); ++ ++#if defined(V8_LIBC_GLIBC) ++ // pthread_getattr_np can fail for the main thread. In this case ++ // just like NaCl we rely on the __libc_stack_end to give us ++ // the start of the stack. ++ // See https://code.google.com/p/nativeclient/issues/detail?id=3431. ++ return __libc_stack_end; ++#endif // !defined(V8_LIBC_GLIBC) ++ return nullptr; ++} ++ ++#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && ++ // !defined(_AIX) && !defined(V8_OS_SOLARIS) ++ ++// static ++void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); } ++ ++#undef LOG_TAG ++#undef MAP_ANONYMOUS ++#undef MADV_FREE ++ ++} // namespace base ++} // namespace v8 +diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc +new file mode 100644 +index 00000000..ab54e2f5 +--- /dev/null ++++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc +@@ -0,0 +1,3191 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/api/api-arguments.h" ++#include "src/codegen/code-factory.h" ++#include "src/debug/debug.h" ++#include "src/deoptimizer/deoptimizer.h" ++#include "src/execution/frame-constants.h" ++#include "src/execution/frames.h" ++#include "src/logging/counters.h" ++// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/codegen/macro-assembler-inl.h" ++#include "src/codegen/register-configuration.h" ++#include "src/heap/heap-inl.h" ++#include "src/objects/cell.h" ++#include "src/objects/foreign.h" ++#include "src/objects/heap-number.h" ++#include "src/objects/js-generator.h" ++#include "src/objects/objects-inl.h" ++#include "src/objects/smi.h" ++#include "src/runtime/runtime.h" ++#include "src/wasm/wasm-linkage.h" ++#include "src/wasm/wasm-objects.h" ++ ++namespace v8 { ++namespace internal { ++ ++#define __ ACCESS_MASM(masm) ++ ++void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { ++ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), ++ RelocInfo::CODE_TARGET); ++} ++ ++static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ++ Runtime::FunctionId function_id) { ++ // ----------- S t a t e ------------- ++ // -- a1 : target function (preserved for callee) ++ // -- a3 : new target (preserved for callee) ++ // ----------------------------------- ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ // Push a copy of the function onto the stack. ++ // Push a copy of the target function and the new target. ++ __ Push(a1, a3, a1); ++ ++ __ CallRuntime(function_id, 1); ++ __ LoadCodeObjectEntry(a2, a0); ++ // Restore target function and new target. ++ __ Pop(a1, a3); ++ } ++ ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Jump(a2); ++} ++ ++namespace { ++ ++enum StackLimitKind { kInterruptStackLimit, kRealStackLimit }; ++ ++void LoadStackLimit(MacroAssembler* masm, Register destination, ++ StackLimitKind kind) { ++ DCHECK(masm->root_array_available()); ++ Isolate* isolate = masm->isolate(); ++ ExternalReference limit = ++ kind == StackLimitKind::kRealStackLimit ++ ? ExternalReference::address_of_real_jslimit(isolate) ++ : ExternalReference::address_of_jslimit(isolate); ++ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); ++ ++ intptr_t offset = ++ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); ++ CHECK(is_int32(offset)); ++ __ Ld_d(destination, MemOperand(kRootRegister, static_cast(offset))); ++} ++ ++void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : number of arguments ++ // -- a1 : constructor function ++ // -- a3 : new target ++ // -- cp : context ++ // -- ra : return address ++ // -- sp[...]: constructor arguments ++ // ----------------------------------- ++ ++ // Enter a construct frame. ++ { ++ FrameScope scope(masm, StackFrame::CONSTRUCT); ++ ++ // Preserve the incoming parameters on the stack. ++ __ SmiTag(a0); ++ __ Push(cp, a0); ++ __ SmiUntag(a0); ++ ++ // The receiver for the builtin/api call. ++ __ PushRoot(RootIndex::kTheHoleValue); ++ ++ // Set up pointer to last argument. ++ __ Add_d(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); ++ ++ // Copy arguments and receiver to the expression stack. ++ Label loop, entry; ++ __ mov(t3, a0); ++ // ----------- S t a t e ------------- ++ // -- a0: number of arguments (untagged) ++ // -- a3: new target ++ // -- t2: pointer to last argument ++ // -- t3: counter ++ // -- sp[0*kPointerSize]: the hole (receiver) ++ // -- sp[1*kPointerSize]: number of arguments (tagged) ++ // -- sp[2*kPointerSize]: context ++ // ----------------------------------- ++ __ jmp(&entry); ++ __ bind(&loop); ++ __ Alsl_d(t0, t3, t2, kPointerSizeLog2, t7); ++ __ Ld_d(t1, MemOperand(t0, 0)); ++ __ push(t1); ++ __ bind(&entry); ++ __ Add_d(t3, t3, Operand(-1)); ++ __ Branch(&loop, greater_equal, t3, Operand(zero_reg)); ++ ++ // Call the function. ++ // a0: number of arguments (untagged) ++ // a1: constructor function ++ // a3: new target ++ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); ++ ++ // Restore context from the frame. ++ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ // Restore smi-tagged arguments count from the frame. ++ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ // Leave construct frame. ++ } ++ ++ // Remove caller arguments from the stack and return. ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Add_d(sp, sp, a4); ++ __ Add_d(sp, sp, kPointerSize); ++ __ Ret(); ++} ++ ++static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, ++ Register scratch1, Register scratch2, ++ Label* stack_overflow) { ++ // Check the stack for overflow. We are not trying to catch ++ // interruptions (e.g. debug break and preemption) here, so the "real stack ++ // limit" is checked. ++ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); ++ // Make scratch1 the space we have left. The stack might already be overflowed ++ // here which will cause scratch1 to become negative. ++ __ sub_d(scratch1, sp, scratch1); ++ // Check if the arguments will overflow the stack. ++ __ slli_d(scratch2, num_args, kPointerSizeLog2); ++ // Signed comparison. ++ __ Branch(stack_overflow, le, scratch1, Operand(scratch2)); ++} ++ ++} // namespace ++ ++// The construct stub for ES5 constructor functions and ES6 class constructors. ++void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0: number of arguments (untagged) ++ // -- a1: constructor function ++ // -- a3: new target ++ // -- cp: context ++ // -- ra: return address ++ // -- sp[...]: constructor arguments ++ // ----------------------------------- ++ ++ // Enter a construct frame. ++ { ++ FrameScope scope(masm, StackFrame::CONSTRUCT); ++ Label post_instantiation_deopt_entry, not_create_implicit_receiver; ++ ++ // Preserve the incoming parameters on the stack. ++ __ SmiTag(a0); ++ __ Push(cp, a0, a1); ++ __ PushRoot(RootIndex::kTheHoleValue); ++ __ Push(a3); ++ ++ // ----------- S t a t e ------------- ++ // -- sp[0*kPointerSize]: new target ++ // -- sp[1*kPointerSize]: padding ++ // -- a1 and sp[2*kPointerSize]: constructor function ++ // -- sp[3*kPointerSize]: number of arguments (tagged) ++ // -- sp[4*kPointerSize]: context ++ // ----------------------------------- ++ ++ __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); ++ __ DecodeField(t2); ++ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor, ++ ¬_create_implicit_receiver); ++ ++ // If not derived class constructor: Allocate the new receiver object. ++ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, ++ t2, t3); ++ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), ++ RelocInfo::CODE_TARGET); ++ __ Branch(&post_instantiation_deopt_entry); ++ ++ // Else: use TheHoleValue as receiver for constructor call ++ __ bind(¬_create_implicit_receiver); ++ __ LoadRoot(a0, RootIndex::kTheHoleValue); ++ ++ // ----------- S t a t e ------------- ++ // -- a0: receiver ++ // -- Slot 4 / sp[0*kPointerSize]: new target ++ // -- Slot 3 / sp[1*kPointerSize]: padding ++ // -- Slot 2 / sp[2*kPointerSize]: constructor function ++ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) ++ // -- Slot 0 / sp[4*kPointerSize]: context ++ // ----------------------------------- ++ // Deoptimizer enters here. ++ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( ++ masm->pc_offset()); ++ __ bind(&post_instantiation_deopt_entry); ++ ++ // Restore new target. ++ __ Pop(a3); ++ // Push the allocated receiver to the stack. We need two copies ++ // because we may have to return the original one and the calling ++ // conventions dictate that the called function pops the receiver. ++ __ Push(a0, a0); ++ ++ // ----------- S t a t e ------------- ++ // -- r3: new target ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: implicit receiver ++ // -- sp[2*kPointerSize]: padding ++ // -- sp[3*kPointerSize]: constructor function ++ // -- sp[4*kPointerSize]: number of arguments (tagged) ++ // -- sp[5*kPointerSize]: context ++ // ----------------------------------- ++ ++ // Restore constructor function and argument count. ++ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); ++ __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ __ SmiUntag(a0); ++ ++ // Set up pointer to last argument. ++ __ Add_d(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); ++ ++ Label enough_stack_space, stack_overflow; ++ Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow); ++ __ Branch(&enough_stack_space); ++ ++ __ bind(&stack_overflow); ++ // Restore the context from the frame. ++ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++ ++ __ bind(&enough_stack_space); ++ ++ // Copy arguments and receiver to the expression stack. ++ Label loop, entry; ++ __ mov(t3, a0); ++ // ----------- S t a t e ------------- ++ // -- a0: number of arguments (untagged) ++ // -- a3: new target ++ // -- t2: pointer to last argument ++ // -- t3: counter ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: implicit receiver ++ // -- sp[2*kPointerSize]: padding ++ // -- a1 and sp[3*kPointerSize]: constructor function ++ // -- sp[4*kPointerSize]: number of arguments (tagged) ++ // -- sp[5*kPointerSize]: context ++ // ----------------------------------- ++ __ jmp(&entry); ++ __ bind(&loop); ++ __ Alsl_d(t0, t3, t2, kPointerSizeLog2, t7); ++ __ Ld_d(t1, MemOperand(t0, 0)); ++ __ push(t1); ++ __ bind(&entry); ++ __ Add_d(t3, t3, Operand(-1)); ++ __ Branch(&loop, greater_equal, t3, Operand(zero_reg)); ++ ++ // Call the function. ++ __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); ++ ++ // ----------- S t a t e ------------- ++ // -- t5: constructor result ++ // -- sp[0*kPointerSize]: implicit receiver ++ // -- sp[1*kPointerSize]: padding ++ // -- sp[2*kPointerSize]: constructor function ++ // -- sp[3*kPointerSize]: number of arguments ++ // -- sp[4*kPointerSize]: context ++ // ----------------------------------- ++ ++ // Store offset of return address for deoptimizer. ++ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( ++ masm->pc_offset()); ++ ++ // Restore the context from the frame. ++ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); ++ ++ // If the result is an object (in the ECMA sense), we should get rid ++ // of the receiver and use the result; see ECMA-262 section 13.2.2-7 ++ // on page 74. ++ Label use_receiver, do_throw, leave_frame; ++ ++ // If the result is undefined, we jump out to using the implicit receiver. ++ __ JumpIfRoot(a0, RootIndex::kUndefinedValue, &use_receiver); ++ ++ // Otherwise we do a smi check and fall through to check if the return value ++ // is a valid receiver. ++ ++ // If the result is a smi, it is *not* an object in the ECMA sense. ++ __ JumpIfSmi(a0, &use_receiver); ++ ++ // If the type of the result (stored in its map) is less than ++ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. ++ __ GetObjectType(a0, t2, t2); ++ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); ++ __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE)); ++ __ Branch(&use_receiver); ++ ++ __ bind(&do_throw); ++ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); ++ ++ // Throw away the result of the constructor invocation and use the ++ // on-stack receiver as the result. ++ __ bind(&use_receiver); ++ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize)); ++ __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw); ++ ++ __ bind(&leave_frame); ++ // Restore smi-tagged arguments count from the frame. ++ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); ++ // Leave construct frame. ++ } ++ // Remove caller arguments from the stack and return. ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Add_d(sp, sp, a4); ++ __ Add_d(sp, sp, kPointerSize); ++ __ Ret(); ++} ++ ++void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { ++ Generate_JSBuiltinsConstructStubHelper(masm); ++} ++ ++static void GetSharedFunctionInfoBytecode(MacroAssembler* masm, ++ Register sfi_data, ++ Register scratch1) { ++ Label done; ++ ++ __ GetObjectType(sfi_data, scratch1, scratch1); ++ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); ++ __ Ld_d(sfi_data, ++ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); ++ ++ __ bind(&done); ++} ++ ++// static ++void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the value to pass to the generator ++ // -- a1 : the JSGeneratorObject to resume ++ // -- ra : return address ++ // ----------------------------------- ++ __ AssertGeneratorObject(a1); ++ ++ // Store input value into generator object. ++ __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); ++ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, a3, ++ kRAHasNotBeenSaved, kDontSaveFPRegs); ++ ++ // Load suspended function and context. ++ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset)); ++ ++ // Flood function if we are stepping. ++ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; ++ Label stepping_prepared; ++ ExternalReference debug_hook = ++ ExternalReference::debug_hook_on_function_call_address(masm->isolate()); ++ __ li(a5, debug_hook); ++ __ Ld_b(a5, MemOperand(a5, 0)); ++ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg)); ++ ++ // Flood function if we need to continue stepping in the suspended generator. ++ ExternalReference debug_suspended_generator = ++ ExternalReference::debug_suspended_generator_address(masm->isolate()); ++ __ li(a5, debug_suspended_generator); ++ __ Ld_d(a5, MemOperand(a5, 0)); ++ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5)); ++ __ bind(&stepping_prepared); ++ ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ Label stack_overflow; ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); ++ ++ // Push receiver. ++ __ Ld_d(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); ++ __ Push(a5); ++ ++ // ----------- S t a t e ------------- ++ // -- a1 : the JSGeneratorObject to resume ++ // -- a4 : generator function ++ // -- cp : generator context ++ // -- ra : return address ++ // -- sp[0] : generator receiver ++ // ----------------------------------- ++ ++ // Push holes for arguments to generator function. Since the parser forced ++ // context allocation for any variables in generators, the actual argument ++ // values have already been copied into the context and these dummy values ++ // will never be used. ++ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu( ++ a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ Ld_d(t1, FieldMemOperand( ++ a1, JSGeneratorObject::kParametersAndRegistersOffset)); ++ { ++ Label done_loop, loop; ++ __ Move(t2, zero_reg); ++ __ bind(&loop); ++ __ Sub_d(a3, a3, Operand(1)); ++ __ Branch(&done_loop, lt, a3, Operand(zero_reg)); ++ __ Alsl_d(kScratchReg, t2, t1, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); ++ __ Push(kScratchReg); ++ __ Add_d(t2, t2, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Underlying function needs to have bytecode available. ++ if (FLAG_debug_code) { ++ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); ++ GetSharedFunctionInfoBytecode(masm, a3, t5); ++ __ GetObjectType(a3, a3, a3); ++ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, ++ Operand(BYTECODE_ARRAY_TYPE)); ++ } ++ ++ // Resume (Ignition/TurboFan) generator object. ++ { ++ __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu(a0, FieldMemOperand( ++ a0, SharedFunctionInfo::kFormalParameterCountOffset)); ++ // We abuse new.target both to indicate that this is a resume call and to ++ // pass in the generator object. In ordinary calls, new.target is always ++ // undefined because generator functions are non-constructable. ++ __ Move(a3, a1); ++ __ Move(a1, a4); ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ JumpCodeObject(a2); ++ } ++ ++ __ bind(&prepare_step_in_if_stepping); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1, a4); ++ // Push hole as receiver since we do not use it for stepping. ++ __ PushRoot(RootIndex::kTheHoleValue); ++ __ CallRuntime(Runtime::kDebugOnFunctionCall); ++ __ Pop(a1); ++ } ++ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Branch(&stepping_prepared); ++ ++ __ bind(&prepare_step_in_suspended_generator); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); ++ __ Pop(a1); ++ } ++ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); ++ __ Branch(&stepping_prepared); ++ ++ __ bind(&stack_overflow); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ __ break_(0xCC); // This should be unreachable. ++ } ++} ++ ++void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowConstructedNonConstructable); ++} ++ ++// Clobbers scratch1 and scratch2; preserves all other registers. ++static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, ++ Register scratch1, Register scratch2) { ++ // Check the stack for overflow. We are not trying to catch ++ // interruptions (e.g. debug break and preemption) here, so the "real stack ++ // limit" is checked. ++ Label okay; ++ LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit); ++ // Make a2 the space we have left. The stack might already be overflowed ++ // here which will cause r2 to become negative. ++ __ sub_d(scratch1, sp, scratch1); ++ // Check if the arguments will overflow the stack. ++ __ slli_d(scratch2, argc, kPointerSizeLog2); ++ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison. ++ ++ // Out of stack space. ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ ++ __ bind(&okay); ++} ++ ++namespace { ++ ++// Called with the native C calling convention. The corresponding function ++// signature is either: ++// ++// using JSEntryFunction = GeneratedCode; ++// or ++// using JSEntryFunction = GeneratedCode; ++void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ++ Builtins::Name entry_trampoline) { ++ Label invoke, handler_entry, exit; ++ ++ { ++ NoRootArrayScope no_root_array(masm); ++ ++ // TODO(plind): unify the ABI description here. ++ // Registers: ++ // either ++ // a0: root register value ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a0: root register value ++ // a1: microtask_queue ++ // ++ // Stack: ++ // 0 arg slots on mips64 (4 args slots on mips) ++ ++ // Save callee saved registers on the stack. ++ __ MultiPush(kCalleeSaved | ra.bit()); ++ ++ // Save callee-saved FPU registers. ++ __ MultiPushFPU(kCalleeSavedFPU); ++ // Set up the reserved register for 0.0. ++ __ Move(kDoubleRegZero, 0.0); ++ ++ // Initialize the root register. ++ // C calling convention. The first argument is passed in a0. ++ __ mov(kRootRegister, a0); ++ } ++ ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ ++ // We build an EntryFrame. ++ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used. ++ __ li(s2, Operand(StackFrame::TypeToMarker(type))); ++ __ li(s3, Operand(StackFrame::TypeToMarker(type))); ++ ExternalReference c_entry_fp = ExternalReference::Create( ++ IsolateAddressId::kCEntryFPAddress, masm->isolate()); ++ __ li(s4, c_entry_fp); ++ __ Ld_d(s4, MemOperand(s4, 0)); ++ __ Push(s1, s2, s3, s4); ++ // Set up frame pointer for the frame to be pushed. ++ __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset); ++ ++ // Registers: ++ // either ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a1: microtask_queue ++ // ++ // Stack: ++ // caller fp | ++ // function slot | entry frame ++ // context slot | ++ // bad fp (0xFF...F) | ++ // callee saved registers + ra ++ // [ O32: 4 args slots] ++ // args ++ ++ // If this is the outermost JS call, set js_entry_sp value. ++ Label non_outermost_js; ++ ExternalReference js_entry_sp = ExternalReference::Create( ++ IsolateAddressId::kJSEntrySPAddress, masm->isolate()); ++ __ li(s1, js_entry_sp); ++ __ Ld_d(s2, MemOperand(s1, 0)); ++ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg)); ++ __ St_d(fp, MemOperand(s1, 0)); ++ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); ++ Label cont; ++ __ b(&cont); ++ __ nop(); // Branch delay slot nop. ++ __ bind(&non_outermost_js); ++ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME)); ++ __ bind(&cont); ++ __ push(s3); ++ ++ // Jump to a faked try block that does the invoke, with a faked catch ++ // block that sets the pending exception. ++ __ jmp(&invoke); ++ __ bind(&handler_entry); ++ ++ // Store the current pc as the handler offset. It's used later to create the ++ // handler table. ++ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos()); ++ ++ // Caught exception: Store result (exception) in the pending exception ++ // field in the JSEnv and return a failure sentinel. Coming in here the ++ // fp will be invalid because the PushStackHandler below sets it to 0 to ++ // signal the existence of the JSEntry frame. ++ __ li(s1, ExternalReference::Create( ++ IsolateAddressId::kPendingExceptionAddress, masm->isolate())); ++ __ St_d(a0, ++ MemOperand(s1, 0)); // We come back from 'invoke'. result is in a0. ++ __ LoadRoot(a0, RootIndex::kException); ++ __ b(&exit); // b exposes branch delay slot. ++ __ nop(); // Branch delay slot nop. ++ ++ // Invoke: Link this frame into the handler chain. ++ __ bind(&invoke); ++ __ PushStackHandler(); ++ // If an exception not caught by another handler occurs, this handler ++ // returns control to the code after the bal(&invoke) above, which ++ // restores all kCalleeSaved registers (including cp and fp) to their ++ // saved values before returning a failure to C. ++ // ++ // Registers: ++ // either ++ // a0: root register value ++ // a1: entry address ++ // a2: function ++ // a3: receiver ++ // a4: argc ++ // a5: argv ++ // or ++ // a0: root register value ++ // a1: microtask_queue ++ // ++ // Stack: ++ // handler frame ++ // entry frame ++ // callee saved registers + ra ++ // [ O32: 4 args slots] ++ // args ++ // ++ // Invoke the function by calling through JS entry trampoline builtin and ++ // pop the faked function when we return. ++ ++ Handle trampoline_code = ++ masm->isolate()->builtins()->builtin_handle(entry_trampoline); ++ __ Call(trampoline_code, RelocInfo::CODE_TARGET); ++ ++ // Unlink this frame from the handler chain. ++ __ PopStackHandler(); ++ ++ __ bind(&exit); // a0 holds result ++ // Check if the current stack frame is marked as the outermost JS frame. ++ Label non_outermost_js_2; ++ __ pop(a5); ++ __ Branch(&non_outermost_js_2, ne, a5, ++ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); ++ __ li(a5, js_entry_sp); ++ __ St_d(zero_reg, MemOperand(a5, 0)); ++ __ bind(&non_outermost_js_2); ++ ++ // Restore the top frame descriptors from the stack. ++ __ pop(a5); ++ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, ++ masm->isolate())); ++ __ St_d(a5, MemOperand(a4, 0)); ++ ++ // Reset the stack to the callee saved registers. ++ __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset); ++ ++ // Restore callee-saved fpu registers. ++ __ MultiPopFPU(kCalleeSavedFPU); ++ ++ // Restore callee saved registers from the stack. ++ __ MultiPop(kCalleeSaved | ra.bit()); ++ // Return. ++ __ Jump(ra); ++} ++ ++} // namespace ++ ++void Builtins::Generate_JSEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::ENTRY, ++ Builtins::kJSEntryTrampoline); ++} ++ ++void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY, ++ Builtins::kJSConstructEntryTrampoline); ++} ++ ++void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) { ++ Generate_JSEntryVariant(masm, StackFrame::ENTRY, ++ Builtins::kRunMicrotasksTrampoline); ++} ++ ++static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ++ bool is_construct) { ++ // ----------- S t a t e ------------- ++ // -- a1: new.target ++ // -- a2: function ++ // -- a3: receiver_pointer ++ // -- a4: argc ++ // -- a5: argv ++ // ----------------------------------- ++ ++ // Enter an internal frame. ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ ++ // Setup the context (we need to use the caller context from the isolate). ++ ExternalReference context_address = ExternalReference::Create( ++ IsolateAddressId::kContextAddress, masm->isolate()); ++ __ li(cp, context_address); ++ __ Ld_d(cp, MemOperand(cp, 0)); ++ ++ // Push the function and the receiver onto the stack. ++ __ Push(a2, a3); ++ ++ // Check if we have enough stack space to push all arguments. ++ // Clobbers a0 and a3. ++ Generate_CheckStackOverflow(masm, a4, t5, a3); ++ ++ // Setup new.target, function and argc. ++ __ mov(a3, a1); ++ __ mov(a1, a2); ++ __ mov(a0, a4); ++ ++ // a0: argc ++ // a1: function ++ // a3: new.target ++ // a5: argv ++ ++ // Copy arguments to the stack in a loop. ++ // a3: argc ++ // a5: argv, i.e. points to first arg ++ Label loop, entry; ++ __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7); ++ __ b(&entry); ++ __ nop(); // Branch delay slot nop. ++ // s1 points past last arg. ++ __ bind(&loop); ++ __ Ld_d(s2, MemOperand(a5, 0)); // Read next parameter. ++ __ addi_d(a5, a5, kPointerSize); ++ __ Ld_d(s2, MemOperand(s2, 0)); // Dereference handle. ++ __ push(s2); // Push parameter. ++ __ bind(&entry); ++ __ Branch(&loop, ne, a5, Operand(s1)); ++ ++ // a0: argc ++ // a1: function ++ // a3: new.target ++ ++ // Initialize all JavaScript callee-saved registers, since they will be seen ++ // by the garbage collector as part of handlers. ++ __ LoadRoot(a4, RootIndex::kUndefinedValue); ++ __ mov(a5, a4); ++ __ mov(s1, a4); ++ __ mov(s2, a4); ++ __ mov(s3, a4); ++ __ mov(s4, a4); ++ __ mov(s5, a4); ++ // s6 holds the root address. Do not clobber. ++ // s7 is cp. Do not init. ++ ++ // Invoke the code. ++ Handle builtin = is_construct ++ ? BUILTIN_CODE(masm->isolate(), Construct) ++ : masm->isolate()->builtins()->Call(); ++ __ Call(builtin, RelocInfo::CODE_TARGET); ++ ++ // Leave internal frame. ++ } ++ __ Jump(ra); ++} ++ ++void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { ++ Generate_JSEntryTrampolineHelper(masm, false); ++} ++ ++void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { ++ Generate_JSEntryTrampolineHelper(masm, true); ++} ++ ++void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { ++ // a1: microtask_queue ++ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1); ++ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); ++} ++ ++static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, ++ Register optimized_code, ++ Register closure, ++ Register scratch1, ++ Register scratch2) { ++ // Store code entry in the closure. ++ __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); ++ __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. ++ __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, ++ kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, ++ OMIT_SMI_CHECK); ++} ++ ++static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { ++ Register args_count = scratch; ++ ++ // Get the arguments + receiver count. ++ __ Ld_d(args_count, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ld_w(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset)); ++ ++ // Leave the frame (also dropping the register file). ++ __ LeaveFrame(StackFrame::INTERPRETED); ++ ++ // Drop receiver + arguments. ++ __ Add_d(sp, sp, args_count); ++} ++ ++// Tail-call |function_id| if |smi_entry| == |marker| ++static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ++ Register smi_entry, ++ OptimizationMarker marker, ++ Runtime::FunctionId function_id) { ++ Label no_match; ++ __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker))); ++ GenerateTailCallToReturnedCode(masm, function_id); ++ __ bind(&no_match); ++} ++ ++static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ++ Register optimized_code_entry, ++ Register scratch1, Register scratch2) { ++ // ----------- S t a t e ------------- ++ // -- a3 : new target (preserved for callee if needed, and caller) ++ // -- a1 : target function (preserved for callee if needed, and caller) ++ // ----------------------------------- ++ DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2)); ++ ++ Register closure = a1; ++ ++ // Check if the optimized code is marked for deopt. If it is, call the ++ // runtime to clear it. ++ Label found_deoptimized_code; ++ __ Ld_d(a5, FieldMemOperand(optimized_code_entry, ++ Code::kCodeDataContainerOffset)); ++ __ Ld_w(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset)); ++ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit)); ++ __ Branch(&found_deoptimized_code, ne, a5, Operand(zero_reg)); ++ ++ // Optimized code is good, get it into the closure and link the closure into ++ // the optimized functions list, then tail call the optimized code. ++ // The feedback vector is no longer used, so re-use it as a scratch ++ // register. ++ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, ++ scratch1, scratch2); ++ ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ LoadCodeObjectEntry(a2, optimized_code_entry); ++ __ Jump(a2); ++ ++ // Optimized code slot contains deoptimized code, evict it and re-enter the ++ // closure's code. ++ __ bind(&found_deoptimized_code); ++ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot); ++} ++ ++static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ++ Register optimization_marker) { ++ // ----------- S t a t e ------------- ++ // -- a3 : new target (preserved for callee if needed, and caller) ++ // -- a1 : target function (preserved for callee if needed, and caller) ++ // -- feedback vector (preserved for caller if needed) ++ // -- optimization_marker : a Smi containing a non-zero optimization marker. ++ // ----------------------------------- ++ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker)); ++ ++ // TODO(v8:8394): The logging of first execution will break if ++ // feedback vectors are not allocated. We need to find a different way of ++ // logging these events if required. ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kLogFirstExecution, ++ Runtime::kFunctionFirstExecution); ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kCompileOptimized, ++ Runtime::kCompileOptimized_NotConcurrent); ++ TailCallRuntimeIfMarkerEquals(masm, optimization_marker, ++ OptimizationMarker::kCompileOptimizedConcurrent, ++ Runtime::kCompileOptimized_Concurrent); ++ ++ // Otherwise, the marker is InOptimizationQueue, so fall through hoping ++ // that an interrupt will eventually update the slot with optimized code. ++ if (FLAG_debug_code) { ++ __ Assert(eq, AbortReason::kExpectedOptimizationSentinel, ++ optimization_marker, ++ Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); ++ } ++} ++ ++// Advance the current bytecode offset. This simulates what all bytecode ++// handlers do upon completion of the underlying operation. Will bail out to a ++// label if the bytecode (without prefix) is a return bytecode. Will not advance ++// the bytecode offset if the current bytecode is a JumpLoop, instead just ++// re-executing the JumpLoop to jump to the correct bytecode. ++static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ++ Register bytecode_array, ++ Register bytecode_offset, ++ Register bytecode, Register scratch1, ++ Register scratch2, Register scratch3, ++ Label* if_return) { ++ Register bytecode_size_table = scratch1; ++ ++ // The bytecode offset value will be increased by one in wide and extra wide ++ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we ++ // will restore the original bytecode. In order to simplify the code, we have ++ // a backup of it. ++ Register original_bytecode_offset = scratch3; ++ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode, ++ bytecode_size_table, original_bytecode_offset)); ++ __ Move(original_bytecode_offset, bytecode_offset); ++ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); ++ ++ // Check if the bytecode is a Wide or ExtraWide prefix bytecode. ++ Label process_bytecode, extra_wide; ++ STATIC_ASSERT(0 == static_cast(interpreter::Bytecode::kWide)); ++ STATIC_ASSERT(1 == static_cast(interpreter::Bytecode::kExtraWide)); ++ STATIC_ASSERT(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); ++ STATIC_ASSERT(3 == ++ static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); ++ __ Branch(&process_bytecode, hi, bytecode, Operand(3)); ++ __ And(scratch2, bytecode, Operand(1)); ++ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg)); ++ ++ // Load the next bytecode and update table to the wide scaled table. ++ __ Add_d(bytecode_offset, bytecode_offset, Operand(1)); ++ __ Add_d(scratch2, bytecode_array, bytecode_offset); ++ __ Ld_bu(bytecode, MemOperand(scratch2, 0)); ++ __ Add_d(bytecode_size_table, bytecode_size_table, ++ Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount)); ++ __ jmp(&process_bytecode); ++ ++ __ bind(&extra_wide); ++ // Load the next bytecode and update table to the extra wide scaled table. ++ __ Add_d(bytecode_offset, bytecode_offset, Operand(1)); ++ __ Add_d(scratch2, bytecode_array, bytecode_offset); ++ __ Ld_bu(bytecode, MemOperand(scratch2, 0)); ++ __ Add_d(bytecode_size_table, bytecode_size_table, ++ Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); ++ ++ __ bind(&process_bytecode); ++ ++// Bailout to the return label if this is a return bytecode. ++#define JUMP_IF_EQUAL(NAME) \ ++ __ Branch(if_return, eq, bytecode, \ ++ Operand(static_cast(interpreter::Bytecode::k##NAME))); ++ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) ++#undef JUMP_IF_EQUAL ++ ++ // If this is a JumpLoop, re-execute it to perform the jump to the beginning ++ // of the loop. ++ Label end, not_jump_loop; ++ __ Branch(¬_jump_loop, ne, bytecode, ++ Operand(static_cast(interpreter::Bytecode::kJumpLoop))); ++ // We need to restore the original bytecode_offset since we might have ++ // increased it to skip the wide / extra-wide prefix bytecode. ++ __ Move(bytecode_offset, original_bytecode_offset); ++ __ jmp(&end); ++ ++ __ bind(¬_jump_loop); ++ // Otherwise, load the size of the current bytecode and advance the offset. ++ __ Alsl_d(scratch2, bytecode, bytecode_size_table, 2, t7); ++ __ Ld_w(scratch2, MemOperand(scratch2, 0)); ++ __ Add_d(bytecode_offset, bytecode_offset, scratch2); ++ ++ __ bind(&end); ++} ++ ++// Generate code for entering a JS function with the interpreter. ++// On entry to the function the receiver and arguments have been pushed on the ++// stack left to right. The actual argument count matches the formal parameter ++// count expected by the function. ++// ++// The live registers are: ++// o a1: the JS function object being called. ++// o a3: the incoming new target or generator object ++// o cp: our context ++// o fp: the caller's frame pointer ++// o sp: stack pointer ++// o ra: return address ++// ++// The function builds an interpreter frame. See InterpreterFrameConstants in ++// frames.h for its layout. ++void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ++ Register closure = a1; ++ Register feedback_vector = a2; ++ ++ // Get the bytecode array from the function object and load it into ++ // kInterpreterBytecodeArrayRegister. ++ __ Ld_d(t5, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ FieldMemOperand(t5, SharedFunctionInfo::kFunctionDataOffset)); ++ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4); ++ ++ // The bytecode array could have been flushed from the shared function info, ++ // if so, call into CompileLazy. ++ Label compile_lazy; ++ __ GetObjectType(kInterpreterBytecodeArrayRegister, t5, t5); ++ __ Branch(&compile_lazy, ne, t5, Operand(BYTECODE_ARRAY_TYPE)); ++ ++ // Load the feedback vector from the closure. ++ __ Ld_d(feedback_vector, ++ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); ++ __ Ld_d(feedback_vector, ++ FieldMemOperand(feedback_vector, Cell::kValueOffset)); ++ ++ Label push_stack_frame; ++ // Check if feedback vector is valid. If valid, check for optimized code ++ // and update invocation count. Otherwise, setup the stack frame. ++ __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); ++ __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); ++ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); ++ ++ // Read off the optimized code slot in the feedback vector, and if there ++ // is optimized code or an optimization marker, call that instead. ++ Register optimized_code_entry = a4; ++ __ Ld_d(optimized_code_entry, ++ FieldMemOperand(feedback_vector, ++ FeedbackVector::kOptimizedCodeWeakOrSmiOffset)); ++ ++ // Check if the optimized code slot is not empty. ++ Label optimized_code_slot_not_empty; ++ ++ __ Branch(&optimized_code_slot_not_empty, ne, optimized_code_entry, ++ Operand(Smi::FromEnum(OptimizationMarker::kNone))); ++ ++ Label not_optimized; ++ __ bind(¬_optimized); ++ ++ // Increment invocation count for the function. ++ __ Ld_w(a4, FieldMemOperand(feedback_vector, ++ FeedbackVector::kInvocationCountOffset)); ++ __ Add_w(a4, a4, Operand(1)); ++ __ St_w(a4, FieldMemOperand(feedback_vector, ++ FeedbackVector::kInvocationCountOffset)); ++ ++ // Open a frame scope to indicate that there is a frame on the stack. The ++ // MANUAL indicates that the scope shouldn't actually generate code to set up ++ // the frame (that is done below). ++ __ bind(&push_stack_frame); ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ PushStandardFrame(closure); ++ ++ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are ++ // 8-bit fields next to each other, so we could just optimize by writing a ++ // 16-bit. These static asserts guard our assumption is valid. ++ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == ++ BytecodeArray::kOsrNestingLevelOffset + kCharSize); ++ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); ++ __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kOsrNestingLevelOffset)); ++ ++ // Load initial bytecode offset. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ ++ // Push bytecode array and Smi tagged bytecode array offset. ++ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister); ++ __ Push(kInterpreterBytecodeArrayRegister, a4); ++ ++ // Allocate the local and temporary register file on the stack. ++ Label stack_overflow; ++ { ++ // Load frame size (word) from the BytecodeArray object. ++ __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kFrameSizeOffset)); ++ ++ // Do a stack check to ensure we don't go over the limit. ++ __ Sub_d(a5, sp, Operand(a4)); ++ LoadStackLimit(masm, a2, StackLimitKind::kRealStackLimit); ++ __ Branch(&stack_overflow, lo, a5, Operand(a2)); ++ ++ // If ok, push undefined as the initial value for all register file entries. ++ Label loop_header; ++ Label loop_check; ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ Branch(&loop_check); ++ __ bind(&loop_header); ++ // TODO(rmcilroy): Consider doing more than one push per loop iteration. ++ __ push(a5); ++ // Continue loop if not done. ++ __ bind(&loop_check); ++ __ Sub_d(a4, a4, Operand(kPointerSize)); ++ __ Branch(&loop_header, ge, a4, Operand(zero_reg)); ++ } ++ ++ // If the bytecode array has a valid incoming new target or generator object ++ // register, initialize it with incoming value which was passed in r3. ++ Label no_incoming_new_target_or_generator_register; ++ __ Ld_w(a5, FieldMemOperand( ++ kInterpreterBytecodeArrayRegister, ++ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); ++ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5, ++ Operand(zero_reg)); ++ __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7); ++ __ St_d(a3, MemOperand(a5, 0)); ++ __ bind(&no_incoming_new_target_or_generator_register); ++ ++ // Perform interrupt stack check. ++ // TODO(solanes): Merge with the real stack limit check above. ++ Label stack_check_interrupt, after_stack_check_interrupt; ++ LoadStackLimit(masm, a5, StackLimitKind::kInterruptStackLimit); ++ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5)); ++ __ bind(&after_stack_check_interrupt); ++ ++ // Load accumulator as undefined. ++ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); ++ ++ // Load the dispatch table into a register and dispatch to the bytecode ++ // handler at the current bytecode offset. ++ Label do_dispatch; ++ __ bind(&do_dispatch); ++ __ li(kInterpreterDispatchTableRegister, ++ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); ++ __ Add_d(t5, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a7, MemOperand(t5, 0)); ++ __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister, ++ kPointerSizeLog2, t7); ++ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0)); ++ __ Call(kJavaScriptCallCodeStartRegister); ++ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); ++ ++ // Any returns to the entry trampoline are either due to the return bytecode ++ // or the interpreter tail calling a builtin and then a dispatch. ++ ++ // Get bytecode array and bytecode offset from the stack frame. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ld_d(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister); ++ ++ // Either return, or advance to the next bytecode and dispatch. ++ Label do_return; ++ __ Add_d(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a1, MemOperand(a1, 0)); ++ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister, a1, a2, a3, ++ a4, &do_return); ++ __ jmp(&do_dispatch); ++ ++ __ bind(&do_return); ++ // The return value is in a0. ++ LeaveInterpreterFrame(masm, t0); ++ __ Jump(ra); ++ ++ __ bind(&stack_check_interrupt); ++ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset ++ // for the call to the StackGuard. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + ++ kFunctionEntryBytecodeOffset))); ++ __ St_d(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ CallRuntime(Runtime::kStackGuard); ++ ++ // After the call, restore the bytecode array, bytecode offset and accumulator ++ // registers again. Also, restore the bytecode offset in the stack to its ++ // previous value. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); ++ ++ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister); ++ __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ __ jmp(&after_stack_check_interrupt); ++ ++ __ bind(&optimized_code_slot_not_empty); ++ Label maybe_has_optimized_code; ++ // Check if optimized code marker is actually a weak reference to the ++ // optimized code as opposed to an optimization marker. ++ __ JumpIfNotSmi(optimized_code_entry, &maybe_has_optimized_code, t7); ++ MaybeOptimizeCode(masm, feedback_vector, optimized_code_entry); ++ // Fall through if there's no runnable optimized code. ++ __ jmp(¬_optimized); ++ ++ __ bind(&maybe_has_optimized_code); ++ // Load code entry from the weak reference, if it was cleared, resume ++ // execution of unoptimized code. ++ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, ¬_optimized); ++ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5); ++ ++ __ bind(&compile_lazy); ++ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); ++ // Unreachable code. ++ __ break_(0xCC); ++ ++ __ bind(&stack_overflow); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++} ++ ++static void Generate_InterpreterPushArgs(MacroAssembler* masm, ++ Register num_args, Register index, ++ Register scratch, Register scratch2) { ++ // Find the address of the last argument. ++ __ mov(scratch2, num_args); ++ __ slli_d(scratch2, scratch2, kPointerSizeLog2); ++ __ Sub_d(scratch2, index, Operand(scratch2)); ++ ++ // Push the arguments. ++ Label loop_header, loop_check; ++ __ Branch(&loop_check); ++ __ bind(&loop_header); ++ __ Ld_d(scratch, MemOperand(index, 0)); ++ __ Add_d(index, index, Operand(-kPointerSize)); ++ __ push(scratch); ++ __ bind(&loop_check); ++ __ Branch(&loop_header, hi, index, Operand(scratch2)); ++} ++ ++// static ++void Builtins::Generate_InterpreterPushArgsThenCallImpl( ++ MacroAssembler* masm, ConvertReceiverMode receiver_mode, ++ InterpreterPushArgsMode mode) { ++ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a2 : the address of the first argument to be pushed. Subsequent ++ // arguments should be consecutive above this, in the same order as ++ // they are to be pushed onto the stack. ++ // -- a1 : the target to call (can be any Object). ++ // ----------------------------------- ++ Label stack_overflow; ++ ++ __ Add_d(a3, a0, Operand(1)); // Add one for receiver. ++ ++ // Push "undefined" as the receiver arg if we need to. ++ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { ++ __ PushRoot(RootIndex::kUndefinedValue); ++ __ Sub_d(a3, a3, Operand(1)); // Subtract one for receiver. ++ } ++ ++ Generate_StackOverflowCheck(masm, a3, a4, t0, &stack_overflow); ++ ++ // This function modifies a2, t0 and a4. ++ Generate_InterpreterPushArgs(masm, a3, a2, a4, t0); ++ ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ __ Pop(a2); // Pass the spread in a register ++ __ Sub_d(a0, a0, Operand(1)); // Subtract one for spread ++ } ++ ++ // Call the target. ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), ++ RelocInfo::CODE_TARGET); ++ } else { ++ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), ++ RelocInfo::CODE_TARGET); ++ } ++ ++ __ bind(&stack_overflow); ++ { ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++ } ++} ++ ++// static ++void Builtins::Generate_InterpreterPushArgsThenConstructImpl( ++ MacroAssembler* masm, InterpreterPushArgsMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argument count (not including receiver) ++ // -- a3 : new target ++ // -- a1 : constructor to call ++ // -- a2 : allocation site feedback if available, undefined otherwise. ++ // -- a4 : address of the first argument ++ // ----------------------------------- ++ Label stack_overflow; ++ ++ // Push a slot for the receiver. ++ __ push(zero_reg); ++ ++ Generate_StackOverflowCheck(masm, a0, a5, t0, &stack_overflow); ++ ++ // This function modifies t0, a4 and a5. ++ Generate_InterpreterPushArgs(masm, a0, a4, a5, t0); ++ ++ if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ __ Pop(a2); // Pass the spread in a register ++ __ Sub_d(a0, a0, Operand(1)); // Subtract one for spread ++ } else { ++ __ AssertUndefinedOrAllocationSite(a2, t0); ++ } ++ ++ if (mode == InterpreterPushArgsMode::kArrayFunction) { ++ __ AssertFunction(a1); ++ ++ // Tail call to the function-specific construct stub (still in the caller ++ // context at this point). ++ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), ++ RelocInfo::CODE_TARGET); ++ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { ++ // Call the constructor with a0, a1, and a3 unmodified. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), ++ RelocInfo::CODE_TARGET); ++ } else { ++ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); ++ // Call the constructor with a0, a1, and a3 unmodified. ++ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); ++ } ++ ++ __ bind(&stack_overflow); ++ { ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ // Unreachable code. ++ __ break_(0xCC); ++ } ++} ++ ++static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ++ // Set the return address to the correct point in the interpreter entry ++ // trampoline. ++ Label builtin_trampoline, trampoline_loaded; ++ Smi interpreter_entry_return_pc_offset( ++ masm->isolate()->heap()->interpreter_entry_return_pc_offset()); ++ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); ++ ++ // If the SFI function_data is an InterpreterData, the function will have a ++ // custom copy of the interpreter entry trampoline for profiling. If so, ++ // get the custom trampoline, otherwise grab the entry address of the global ++ // trampoline. ++ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); ++ __ GetObjectType(t0, kInterpreterDispatchTableRegister, ++ kInterpreterDispatchTableRegister); ++ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, ++ Operand(INTERPRETER_DATA_TYPE)); ++ ++ __ Ld_d(t0, ++ FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); ++ __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ __ Branch(&trampoline_loaded); ++ ++ __ bind(&builtin_trampoline); ++ __ li(t0, ExternalReference:: ++ address_of_interpreter_entry_trampoline_instruction_start( ++ masm->isolate())); ++ __ Ld_d(t0, MemOperand(t0, 0)); ++ ++ __ bind(&trampoline_loaded); ++ __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value())); ++ ++ // Initialize the dispatch table register. ++ __ li(kInterpreterDispatchTableRegister, ++ ExternalReference::interpreter_dispatch_table_address(masm->isolate())); ++ ++ // Get the bytecode array pointer from the frame. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ ++ if (FLAG_debug_code) { ++ // Check function data field is actually a BytecodeArray object. ++ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg); ++ __ Assert(ne, ++ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, ++ kScratchReg, Operand(zero_reg)); ++ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); ++ __ Assert(eq, ++ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, ++ a1, Operand(BYTECODE_ARRAY_TYPE)); ++ } ++ ++ // Get the target bytecode offset from the frame. ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ if (FLAG_debug_code) { ++ Label okay; ++ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ // Unreachable code. ++ __ break_(0xCC); ++ __ bind(&okay); ++ } ++ ++ // Dispatch to the target bytecode. ++ __ Add_d(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a7, MemOperand(a1, 0)); ++ __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7); ++ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0)); ++ __ Jump(kJavaScriptCallCodeStartRegister); ++} ++ ++void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { ++ // Advance the current bytecode offset stored within the given interpreter ++ // stack frame. This simulates what all bytecode handlers do upon completion ++ // of the underlying operation. ++ __ Ld_d(kInterpreterBytecodeArrayRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); ++ __ Ld_d(kInterpreterBytecodeOffsetRegister, ++ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ __ SmiUntag(kInterpreterBytecodeOffsetRegister); ++ ++ Label enter_bytecode, function_entry_bytecode; ++ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + ++ kFunctionEntryBytecodeOffset)); ++ ++ // Load the current bytecode. ++ __ Add_d(a1, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister); ++ __ Ld_bu(a1, MemOperand(a1, 0)); ++ ++ // Advance to the next bytecode. ++ Label if_return; ++ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, ++ kInterpreterBytecodeOffsetRegister, a1, a2, a3, ++ a4, &if_return); ++ ++ __ bind(&enter_bytecode); ++ // Convert new bytecode offset to a Smi and save in the stackframe. ++ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); ++ __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); ++ ++ Generate_InterpreterEnterBytecode(masm); ++ ++ __ bind(&function_entry_bytecode); ++ // If the code deoptimizes during the implicit function entry stack interrupt ++ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is ++ // not a valid bytecode offset. Detect this case and advance to the first ++ // actual bytecode. ++ __ li(kInterpreterBytecodeOffsetRegister, ++ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); ++ __ Branch(&enter_bytecode); ++ ++ // We should never take the if_return path. ++ __ bind(&if_return); ++ __ Abort(AbortReason::kInvalidBytecodeAdvance); ++} ++ ++void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { ++ Generate_InterpreterEnterBytecode(masm); ++} ++ ++namespace { ++void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, ++ bool java_script_builtin, ++ bool with_result) { ++ const RegisterConfiguration* config(RegisterConfiguration::Default()); ++ int allocatable_register_count = config->num_allocatable_general_registers(); ++ if (with_result) { ++ // Overwrite the hole inserted by the deoptimizer with the return value from ++ // the LAZY deopt point. ++ __ St_d(a0, ++ MemOperand( ++ sp, config->num_allocatable_general_registers() * kPointerSize + ++ BuiltinContinuationFrameConstants::kFixedFrameSize)); ++ } ++ for (int i = allocatable_register_count - 1; i >= 0; --i) { ++ int code = config->GetAllocatableGeneralCode(i); ++ __ Pop(Register::from_code(code)); ++ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { ++ __ SmiUntag(Register::from_code(code)); ++ } ++ } ++ __ Ld_d( ++ fp, ++ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); ++ // Load builtin index (stored as a Smi) and use it to get the builtin start ++ // address from the builtins table. ++ __ Pop(t0); ++ __ Add_d(sp, sp, ++ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); ++ __ Pop(ra); ++ __ LoadEntryFromBuiltinIndex(t0); ++ __ Jump(t0); ++} ++} // namespace ++ ++void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, false, false); ++} ++ ++void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( ++ MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, false, true); ++} ++ ++void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, true, false); ++} ++ ++void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( ++ MacroAssembler* masm) { ++ Generate_ContinueToBuiltinHelper(masm, true, true); ++} ++ ++void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kNotifyDeoptimized); ++ } ++ ++ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code()); ++ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize)); ++ __ Add_d(sp, sp, Operand(1 * kPointerSize)); // Remove state. ++ __ Ret(); ++} ++ ++void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kCompileForOnStackReplacement); ++ } ++ ++ // If the code object is null, just return to the caller. ++ __ Ret(eq, a0, Operand(Smi::zero())); ++ ++ // Drop the handler frame that is be sitting on top of the actual ++ // JavaScript frame. This is the case then OSR is triggered from bytecode. ++ __ LeaveFrame(StackFrame::STUB); ++ ++ // Load deoptimization data from the code object. ++ // = [#deoptimization_data_offset] ++ __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); ++ ++ // Load the OSR entrypoint offset from the deoptimization data. ++ // = [#header_size + #osr_pc_offset] ++ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( ++ DeoptimizationData::kOsrPcOffsetIndex) - ++ kHeapObjectTag)); ++ ++ // Compute the target address = code_obj + header_size + osr_offset ++ // = + #header_size + ++ __ Add_d(a0, a0, a1); ++ __ addi_d(ra, a0, Code::kHeaderSize - kHeapObjectTag); ++ ++ // And "return" to the OSR entry point of the function. ++ __ Ret(); ++} ++ ++// static ++void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : argArray ++ // -- sp[4] : thisArg ++ // -- sp[8] : receiver ++ // ----------------------------------- ++ ++ Register argc = a0; ++ Register arg_array = a2; ++ Register receiver = a1; ++ Register this_arg = a5; ++ Register undefined_value = a3; ++ Register scratch = a4; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load receiver into a1, argArray into a2 (if present), remove all ++ // arguments from the stack (including the receiver), and push thisArg (if ++ // present) instead. ++ { ++ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ Sub_d(sp, sp, Operand(2 * kPointerSize)); ++ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); ++ __ mov(scratch, argc); ++ __ Pop(this_arg, arg_array); // Overwrite argc ++ __ Movz(arg_array, undefined_value, scratch); // if argc == 0 ++ __ Movz(this_arg, undefined_value, scratch); // if argc == 0 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arg_array, undefined_value, scratch); // if argc == 1 ++ __ Ld_d(receiver, MemOperand(sp, 0)); ++ __ St_d(this_arg, MemOperand(sp, 0)); ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argArray ++ // -- a1 : receiver ++ // -- a3 : undefined root value ++ // -- sp[0] : thisArg ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for callable receiver here, ++ // since that's the first thing the Call/CallWithArrayLike builtins ++ // will do. ++ ++ // 3. Tail call with no arguments if argArray is null or undefined. ++ Label no_arguments; ++ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); ++ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value)); ++ ++ // 4a. Apply the receiver to the given argArray. ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), ++ RelocInfo::CODE_TARGET); ++ ++ // 4b. The argArray is either null or undefined, so we tail call without any ++ // arguments to the receiver. ++ __ bind(&no_arguments); ++ { ++ __ mov(a0, zero_reg); ++ DCHECK(receiver == a1); ++ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); ++ } ++} ++ ++// static ++void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ++ // 1. Make sure we have at least one argument. ++ // a0: actual number of arguments ++ { ++ Label done; ++ __ Branch(&done, ne, a0, Operand(zero_reg)); ++ __ PushRoot(RootIndex::kUndefinedValue); ++ __ Add_d(a0, a0, Operand(1)); ++ __ bind(&done); ++ } ++ ++ // 2. Get the function to call (passed as receiver) from the stack. ++ // a0: actual number of arguments ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ Ld_d(a1, MemOperand(kScratchReg, 0)); ++ ++ // 3. Shift arguments and return address one slot down on the stack ++ // (overwriting the original receiver). Adjust argument count to make ++ // the original first argument the new receiver. ++ // a0: actual number of arguments ++ // a1: function ++ { ++ Label loop; ++ // Calculate the copy start address (destination). Copy end address is sp. ++ __ Alsl_d(a2, a0, sp, kPointerSizeLog2, t7); ++ ++ __ bind(&loop); ++ __ Ld_d(kScratchReg, MemOperand(a2, -kPointerSize)); ++ __ St_d(kScratchReg, MemOperand(a2, 0)); ++ __ Sub_d(a2, a2, Operand(kPointerSize)); ++ __ Branch(&loop, ne, a2, Operand(sp)); ++ // Adjust the actual number of arguments and remove the top element ++ // (which is a copy of the last argument). ++ __ Sub_d(a0, a0, Operand(1)); ++ __ Pop(); ++ } ++ ++ // 4. Call the callable. ++ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ReflectApply(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : argumentsList (if argc ==3) ++ // -- sp[4] : thisArgument (if argc >=2) ++ // -- sp[8] : target (if argc >=1) ++ // -- sp[12] : receiver ++ // ----------------------------------- ++ ++ Register argc = a0; ++ Register arguments_list = a2; ++ Register target = a1; ++ Register this_argument = a5; ++ Register undefined_value = a3; ++ Register scratch = a4; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load target into a1 (if present), argumentsList into a2 (if present), ++ // remove all arguments from the stack (including the receiver), and push ++ // thisArgument (if present) instead. ++ { ++ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ Sub_d(sp, sp, Operand(3 * kPointerSize)); ++ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); ++ __ mov(scratch, argc); ++ __ Pop(target, this_argument, arguments_list); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0 ++ __ Movz(this_argument, undefined_value, scratch); // if argc == 0 ++ __ Movz(target, undefined_value, scratch); // if argc == 0 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1 ++ __ Movz(this_argument, undefined_value, scratch); // if argc == 1 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 ++ ++ __ St_d(this_argument, MemOperand(sp, 0)); // Overwrite receiver ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argumentsList ++ // -- a1 : target ++ // -- a3 : undefined root value ++ // -- sp[0] : thisArgument ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for callable target here, ++ // since that's the first thing the Call/CallWithArrayLike builtins ++ // will do. ++ ++ // 3. Apply the target to the given argumentsList. ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), ++ RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : argc ++ // -- sp[0] : new.target (optional) (dummy value if argc <= 2) ++ // -- sp[4] : argumentsList (dummy value if argc <= 1) ++ // -- sp[8] : target (dummy value if argc == 0) ++ // -- sp[12] : receiver ++ // ----------------------------------- ++ Register argc = a0; ++ Register arguments_list = a2; ++ Register target = a1; ++ Register new_target = a3; ++ Register undefined_value = a4; ++ Register scratch = a5; ++ ++ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); ++ ++ // 1. Load target into a1 (if present), argumentsList into a2 (if present), ++ // new.target into a3 (if present, otherwise use target), remove all ++ // arguments from the stack (including the receiver), and push thisArgument ++ // (if present) instead. ++ { ++ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a ++ // consistent state for a simple pop operation. ++ ++ __ Sub_d(sp, sp, Operand(3 * kPointerSize)); ++ __ Alsl_d(sp, argc, sp, kPointerSizeLog2, t7); ++ __ mov(scratch, argc); ++ __ Pop(target, arguments_list, new_target); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0 ++ __ Movz(new_target, undefined_value, scratch); // if argc == 0 ++ __ Movz(target, undefined_value, scratch); // if argc == 0 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1 ++ __ Movz(new_target, target, scratch); // if argc == 1 ++ __ Sub_d(scratch, scratch, Operand(1)); ++ __ Movz(new_target, target, scratch); // if argc == 2 ++ ++ __ St_d(undefined_value, MemOperand(sp, 0)); // Overwrite receiver ++ } ++ ++ // ----------- S t a t e ------------- ++ // -- a2 : argumentsList ++ // -- a1 : target ++ // -- a3 : new.target ++ // -- sp[0] : receiver (undefined) ++ // ----------------------------------- ++ ++ // 2. We don't need to check explicitly for constructor target here, ++ // since that's the first thing the Construct/ConstructWithArrayLike ++ // builtins will do. ++ ++ // 3. We don't need to check explicitly for constructor new.target here, ++ // since that's the second thing the Construct/ConstructWithArrayLike ++ // builtins will do. ++ ++ // 4. Construct the target with the given new.target and argumentsList. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike), ++ RelocInfo::CODE_TARGET); ++} ++ ++static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { ++ __ SmiTag(a0); ++ __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ __ Push(ra, fp, a4, a1, a0); ++ __ Push(Smi::zero()); // Padding. ++ __ Add_d(fp, sp, ++ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); ++} ++ ++static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : result being passed through ++ // ----------------------------------- ++ // Get the number of arguments passed (as a smi), tear down the frame and ++ // then tear down the parameters. ++ __ Ld_d(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ __ mov(sp, fp); ++ __ Pop(ra, fp); ++ __ SmiScale(a4, a1, kPointerSizeLog2); ++ __ Add_d(sp, sp, a4); ++ // Adjust for the receiver. ++ __ Add_d(sp, sp, Operand(kPointerSize)); ++} ++ ++// static ++void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ++ Handle code) { ++ // ----------- S t a t e ------------- ++ // -- a1 : target ++ // -- a0 : number of parameters on the stack (not including the receiver) ++ // -- a2 : arguments list (a FixedArray) ++ // -- a4 : len (number of elements to push from args) ++ // -- a3 : new.target (for [[Construct]]) ++ // ----------------------------------- ++ if (masm->emit_debug_code()) { ++ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0. ++ Label ok, fail; ++ __ AssertNotSmi(a2); ++ __ GetObjectType(a2, t8, t8); ++ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE)); ++ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE)); ++ __ Branch(&ok, eq, a4, Operand(zero_reg)); ++ // Fall through. ++ __ bind(&fail); ++ __ Abort(AbortReason::kOperandIsNotAFixedArray); ++ ++ __ bind(&ok); ++ } ++ ++ Register args = a2; ++ Register len = a4; ++ ++ // Check for stack overflow. ++ Label stack_overflow; ++ Generate_StackOverflowCheck(masm, len, kScratchReg, a5, &stack_overflow); ++ ++ // Push arguments onto the stack (thisArgument is already on the stack). ++ { ++ Label done, push, loop; ++ Register src = a6; ++ Register scratch = len; ++ ++ __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag); ++ __ Add_d(a0, a0, len); // The 'len' argument for Call() or Construct(). ++ __ Branch(&done, eq, len, Operand(zero_reg)); ++ __ slli_d(scratch, len, kPointerSizeLog2); ++ __ Sub_d(scratch, sp, Operand(scratch)); ++ __ LoadRoot(t1, RootIndex::kTheHoleValue); ++ __ bind(&loop); ++ __ Ld_d(a5, MemOperand(src, 0)); ++ __ Branch(&push, ne, a5, Operand(t1)); ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ bind(&push); ++ __ addi_d(src, src, kPointerSize); ++ __ Push(a5); ++ __ Branch(&loop, ne, scratch, Operand(sp)); ++ __ bind(&done); ++ } ++ ++ // Tail-call to the actual Call or Construct builtin. ++ __ Jump(code, RelocInfo::CODE_TARGET); ++ ++ __ bind(&stack_overflow); ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++} ++ ++// static ++void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ++ CallOrConstructMode mode, ++ Handle code) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a3 : the new.target (for [[Construct]] calls) ++ // -- a1 : the target to call (can be any Object) ++ // -- a2 : start index (to support rest parameters) ++ // ----------------------------------- ++ ++ // Check if new.target has a [[Construct]] internal method. ++ if (mode == CallOrConstructMode::kConstruct) { ++ Label new_target_constructor, new_target_not_constructor; ++ __ JumpIfSmi(a3, &new_target_not_constructor); ++ __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); ++ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); ++ __ bind(&new_target_not_constructor); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ Push(a3); ++ __ CallRuntime(Runtime::kThrowNotConstructor); ++ } ++ __ bind(&new_target_constructor); ++ } ++ ++ // Check if we have an arguments adaptor frame below the function frame. ++ Label arguments_adaptor, arguments_done; ++ __ Ld_d(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ __ Ld_d(a7, MemOperand(a6, CommonFrameConstants::kContextOrFrameTypeOffset)); ++ __ Branch(&arguments_adaptor, eq, a7, ++ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ { ++ __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ Ld_d(a7, FieldMemOperand(a7, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu(a7, FieldMemOperand( ++ a7, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ mov(a6, fp); ++ } ++ __ Branch(&arguments_done); ++ __ bind(&arguments_adaptor); ++ { ++ // Just get the length from the ArgumentsAdaptorFrame. ++ __ SmiUntag(a7, ++ MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ } ++ __ bind(&arguments_done); ++ ++ Label stack_done, stack_overflow; ++ __ Sub_w(a7, a7, a2); ++ __ Branch(&stack_done, le, a7, Operand(zero_reg)); ++ { ++ // Check for stack overflow. ++ Generate_StackOverflowCheck(masm, a7, a4, a5, &stack_overflow); ++ ++ // Forward the arguments from the caller frame. ++ { ++ Label loop; ++ __ Add_d(a0, a0, a7); ++ __ bind(&loop); ++ { ++ __ Alsl_d(kScratchReg, a7, a6, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize)); ++ __ push(kScratchReg); ++ __ Sub_w(a7, a7, Operand(1)); ++ __ Branch(&loop, ne, a7, Operand(zero_reg)); ++ } ++ } ++ } ++ __ Branch(&stack_done); ++ __ bind(&stack_overflow); ++ __ TailCallRuntime(Runtime::kThrowStackOverflow); ++ __ bind(&stack_done); ++ ++ // Tail-call to the {code} handler. ++ __ Jump(code, RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_CallFunction(MacroAssembler* masm, ++ ConvertReceiverMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // ----------------------------------- ++ __ AssertFunction(a1); ++ ++ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) ++ // Check that function is not a "classConstructor". ++ Label class_constructor; ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); ++ __ And(kScratchReg, a3, ++ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); ++ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg)); ++ ++ // Enter the context of the function; ToObject has to run in the function ++ // context, and we also need to take the global proxy from the function ++ // context in case of conversion. ++ __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ // We need to convert the receiver for non-native sloppy mode functions. ++ Label done_convert; ++ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); ++ __ And(kScratchReg, a3, ++ Operand(SharedFunctionInfo::IsNativeBit::kMask | ++ SharedFunctionInfo::IsStrictBit::kMask)); ++ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); ++ { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // -- a2 : the shared function info. ++ // -- cp : the function context. ++ // ----------------------------------- ++ ++ if (mode == ConvertReceiverMode::kNullOrUndefined) { ++ // Patch receiver to global proxy. ++ __ LoadGlobalProxy(a3); ++ } else { ++ Label convert_to_object, convert_receiver; ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ Ld_d(a3, MemOperand(kScratchReg, 0)); ++ __ JumpIfSmi(a3, &convert_to_object); ++ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); ++ __ GetObjectType(a3, a4, a4); ++ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE)); ++ if (mode != ConvertReceiverMode::kNotNullOrUndefined) { ++ Label convert_global_proxy; ++ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); ++ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); ++ __ bind(&convert_global_proxy); ++ { ++ // Patch receiver to global proxy. ++ __ LoadGlobalProxy(a3); ++ } ++ __ Branch(&convert_receiver); ++ } ++ __ bind(&convert_to_object); ++ { ++ // Convert receiver using ToObject. ++ // TODO(bmeurer): Inline the allocation here to avoid building the frame ++ // in the fast case? (fall back to AllocateInNewSpace?) ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ SmiTag(a0); ++ __ Push(a0, a1); ++ __ mov(a0, a3); ++ __ Push(cp); ++ __ Call(BUILTIN_CODE(masm->isolate(), ToObject), ++ RelocInfo::CODE_TARGET); ++ __ Pop(cp); ++ __ mov(a3, a0); ++ __ Pop(a0, a1); ++ __ SmiUntag(a0); ++ } ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ bind(&convert_receiver); ++ } ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(a3, MemOperand(kScratchReg, 0)); ++ } ++ __ bind(&done_convert); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSFunction) ++ // -- a2 : the shared function info. ++ // -- cp : the function context. ++ // ----------------------------------- ++ ++ __ Ld_hu( ++ a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION); ++ ++ // The function is a "classConstructor", need to raise an exception. ++ __ bind(&class_constructor); ++ { ++ FrameScope frame(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowConstructorNonCallableError); ++ } ++} ++ ++// static ++void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // ----------------------------------- ++ __ AssertBoundFunction(a1); ++ ++ // Patch the receiver to [[BoundThis]]. ++ { ++ __ Ld_d(kScratchReg, ++ FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); ++ __ Alsl_d(a4, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a4, 0)); ++ } ++ ++ // Load [[BoundArguments]] into a2 and length of that into a4. ++ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) ++ // -- a4 : the number of [[BoundArguments]] ++ // ----------------------------------- ++ ++ // Reserve stack space for the [[BoundArguments]]. ++ { ++ Label done; ++ __ slli_d(a5, a4, kPointerSizeLog2); ++ __ Sub_d(sp, sp, Operand(a5)); ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&done, hs, sp, Operand(kScratchReg)); ++ // Restore the stack pointer. ++ __ Add_d(sp, sp, Operand(a5)); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ } ++ __ bind(&done); ++ } ++ ++ // Relocate arguments down the stack. ++ { ++ Label loop, done_loop; ++ __ mov(a5, zero_reg); ++ __ bind(&loop); ++ __ Branch(&done_loop, gt, a5, Operand(a0)); ++ __ Alsl_d(a6, a4, sp, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a6, 0)); ++ __ Alsl_d(a6, a5, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a6, 0)); ++ __ Add_d(a4, a4, Operand(1)); ++ __ Add_d(a5, a5, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Copy [[BoundArguments]] to the stack (below the arguments). ++ { ++ Label loop, done_loop; ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); ++ __ bind(&loop); ++ __ Sub_d(a4, a4, Operand(1)); ++ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); ++ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a5, 0)); ++ __ Alsl_d(a5, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a5, 0)); ++ __ Add_d(a0, a0, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Call the [[BoundTargetFunction]] via the Call builtin. ++ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), ++ RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the target to call (can be any Object). ++ // ----------------------------------- ++ ++ Label non_callable, non_smi; ++ __ JumpIfSmi(a1, &non_callable); ++ __ bind(&non_smi); ++ __ GetObjectType(a1, t1, t2); ++ __ Jump(masm->isolate()->builtins()->CallFunction(mode), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); ++ ++ // Check if target has a [[Call]] internal method. ++ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask)); ++ __ Branch(&non_callable, eq, t1, Operand(zero_reg)); ++ ++ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, ++ t2, Operand(JS_PROXY_TYPE)); ++ ++ // 2. Call to something else, which might have a [[Call]] internal method (if ++ // not we raise an exception). ++ // Overwrite the original receiver with the (original) target. ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(a1, MemOperand(kScratchReg, 0)); ++ // Let the "call_as_function_delegate" take care of the rest. ++ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); ++ __ Jump(masm->isolate()->builtins()->CallFunction( ++ ConvertReceiverMode::kNotNullOrUndefined), ++ RelocInfo::CODE_TARGET); ++ ++ // 3. Call to something that is not callable. ++ __ bind(&non_callable); ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ Push(a1); ++ __ CallRuntime(Runtime::kThrowCalledNonCallable); ++ } ++} ++ ++void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the constructor to call (checked to be a JSFunction) ++ // -- a3 : the new target (checked to be a constructor) ++ // ----------------------------------- ++ __ AssertConstructor(a1); ++ __ AssertFunction(a1); ++ ++ // Calling convention for function specific ConstructStubs require ++ // a2 to contain either an AllocationSite or undefined. ++ __ LoadRoot(a2, RootIndex::kUndefinedValue); ++ ++ Label call_generic_stub; ++ ++ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. ++ __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset)); ++ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); ++ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg)); ++ ++ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), ++ RelocInfo::CODE_TARGET); ++ ++ __ bind(&call_generic_stub); ++ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), ++ RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a3 : the new target (checked to be a constructor) ++ // ----------------------------------- ++ __ AssertConstructor(a1); ++ __ AssertBoundFunction(a1); ++ ++ // Load [[BoundArguments]] into a2 and length of that into a4. ++ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the function to call (checked to be a JSBoundFunction) ++ // -- a2 : the [[BoundArguments]] (implemented as FixedArray) ++ // -- a3 : the new target (checked to be a constructor) ++ // -- a4 : the number of [[BoundArguments]] ++ // ----------------------------------- ++ ++ // Reserve stack space for the [[BoundArguments]]. ++ { ++ Label done; ++ __ slli_d(a5, a4, kPointerSizeLog2); ++ __ Sub_d(sp, sp, Operand(a5)); ++ // Check the stack for overflow. We are not trying to catch interruptions ++ // (i.e. debug break and preemption) here, so check the "real stack limit". ++ LoadStackLimit(masm, kScratchReg, StackLimitKind::kRealStackLimit); ++ __ Branch(&done, hs, sp, Operand(kScratchReg)); ++ // Restore the stack pointer. ++ __ Add_d(sp, sp, Operand(a5)); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterFrame(StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ } ++ __ bind(&done); ++ } ++ ++ // Relocate arguments down the stack. ++ { ++ Label loop, done_loop; ++ __ mov(a5, zero_reg); ++ __ bind(&loop); ++ __ Branch(&done_loop, ge, a5, Operand(a0)); ++ __ Alsl_d(a6, a4, sp, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a6, 0)); ++ __ Alsl_d(a6, a5, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a6, 0)); ++ __ Add_d(a4, a4, Operand(1)); ++ __ Add_d(a5, a5, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Copy [[BoundArguments]] to the stack (below the arguments). ++ { ++ Label loop, done_loop; ++ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); ++ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); ++ __ bind(&loop); ++ __ Sub_d(a4, a4, Operand(1)); ++ __ Branch(&done_loop, lt, a4, Operand(zero_reg)); ++ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7); ++ __ Ld_d(kScratchReg, MemOperand(a5, 0)); ++ __ Alsl_d(a5, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(kScratchReg, MemOperand(a5, 0)); ++ __ Add_d(a0, a0, Operand(1)); ++ __ Branch(&loop); ++ __ bind(&done_loop); ++ } ++ ++ // Patch new.target to [[BoundTargetFunction]] if new.target equals target. ++ { ++ Label skip_load; ++ __ Branch(&skip_load, ne, a1, Operand(a3)); ++ __ Ld_d(a3, ++ FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ bind(&skip_load); ++ } ++ ++ // Construct the [[BoundTargetFunction]] via the Construct builtin. ++ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); ++} ++ ++// static ++void Builtins::Generate_Construct(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- a0 : the number of arguments (not including the receiver) ++ // -- a1 : the constructor to call (can be any Object) ++ // -- a3 : the new target (either the same as the constructor or ++ // the JSFunction on which new was invoked initially) ++ // ----------------------------------- ++ ++ // Check if target is a Smi. ++ Label non_constructor, non_proxy; ++ __ JumpIfSmi(a1, &non_constructor); ++ ++ // Check if target has a [[Construct]] internal method. ++ __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); ++ __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); ++ __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); ++ ++ // Dispatch based on instance type. ++ __ Ld_hu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE)); ++ ++ // Only dispatch to bound functions after checking whether they are ++ // constructors. ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), ++ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); ++ ++ // Only dispatch to proxies after checking whether they are constructors. ++ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE)); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), ++ RelocInfo::CODE_TARGET); ++ ++ // Called Construct on an exotic Object with a [[Construct]] internal method. ++ __ bind(&non_proxy); ++ { ++ // Overwrite the original receiver with the (original) target. ++ __ Alsl_d(kScratchReg, a0, sp, kPointerSizeLog2, t7); ++ __ St_d(a1, MemOperand(kScratchReg, 0)); ++ // Let the "call_as_constructor_delegate" take care of the rest. ++ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); ++ __ Jump(masm->isolate()->builtins()->CallFunction(), ++ RelocInfo::CODE_TARGET); ++ } ++ ++ // Called Construct on an Object that doesn't have a [[Construct]] internal ++ // method. ++ __ bind(&non_constructor); ++ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable), ++ RelocInfo::CODE_TARGET); ++} ++ ++void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ++ // State setup as expected by MacroAssembler::InvokePrologue. ++ // ----------- S t a t e ------------- ++ // -- a0: actual arguments count ++ // -- a1: function (passed through to callee) ++ // -- a2: expected arguments count ++ // -- a3: new target (passed through to callee) ++ // ----------------------------------- ++ ++ Label invoke, dont_adapt_arguments, stack_overflow; ++ ++ Label enough, too_few; ++ __ Branch(&dont_adapt_arguments, eq, a2, ++ Operand(kDontAdaptArgumentsSentinel)); ++ // We use Uless as the number of argument should always be greater than 0. ++ __ Branch(&too_few, Uless, a0, Operand(a2)); ++ ++ { // Enough parameters: actual >= expected. ++ // a0: actual number of arguments as a smi ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ __ bind(&enough); ++ EnterArgumentsAdaptorFrame(masm); ++ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); ++ ++ // Calculate copy start address into a0 and copy end address into a4. ++ __ SmiScale(a0, a0, kPointerSizeLog2); ++ __ Add_d(a0, fp, a0); ++ // Adjust for return address and receiver. ++ __ Add_d(a0, a0, Operand(2 * kPointerSize)); ++ // Compute copy end address. ++ __ slli_d(a4, a2, kPointerSizeLog2); ++ __ sub_d(a4, a0, a4); ++ ++ // Copy the arguments (including the receiver) to the new stack frame. ++ // a0: copy start address ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ // a4: copy end address ++ ++ Label copy; ++ __ bind(©); ++ __ Ld_d(a5, MemOperand(a0, 0)); ++ __ push(a5); ++ __ addi_d(a0, a0, -kPointerSize); ++ __ Branch(©, ge, a0, Operand(a4)); ++ ++ __ jmp(&invoke); ++ } ++ ++ { // Too few parameters: Actual < expected. ++ __ bind(&too_few); ++ EnterArgumentsAdaptorFrame(masm); ++ Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow); ++ ++ // Calculate copy start address into a0 and copy end address into a7. ++ // a0: actual number of arguments as a smi ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ __ SmiScale(a0, a0, kPointerSizeLog2); ++ __ Add_d(a0, fp, a0); ++ // Adjust for return address and receiver. ++ __ Add_d(a0, a0, Operand(2 * kPointerSize)); ++ // Compute copy end address. Also adjust for return address. ++ __ Add_d(a7, fp, kPointerSize); ++ ++ // Copy the arguments (including the receiver) to the new stack frame. ++ // a0: copy start address ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ // a7: copy end address ++ Label copy; ++ __ bind(©); ++ __ Ld_d(a4, ++ MemOperand(a0, 0)); // Adjusted above for return addr and receiver. ++ __ Sub_d(sp, sp, kPointerSize); ++ __ Sub_d(a0, a0, kPointerSize); ++ __ St_d(a4, MemOperand(sp, 0)); ++ __ Branch(©, ne, a0, Operand(a7)); ++ ++ // Fill the remaining expected arguments with undefined. ++ // a1: function ++ // a2: expected number of arguments ++ // a3: new target (passed through to callee) ++ __ LoadRoot(a5, RootIndex::kUndefinedValue); ++ __ slli_d(a6, a2, kPointerSizeLog2); ++ __ Sub_d(a4, fp, Operand(a6)); ++ // Adjust for frame. ++ __ Sub_d(a4, a4, ++ Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + ++ kPointerSize)); ++ ++ Label fill; ++ __ bind(&fill); ++ __ Sub_d(sp, sp, kPointerSize); ++ __ St_d(a5, MemOperand(sp, 0)); ++ __ Branch(&fill, ne, sp, Operand(a4)); ++ } ++ ++ // Call the entry point. ++ __ bind(&invoke); ++ __ mov(a0, a2); ++ // a0 : expected number of arguments ++ // a1 : function (passed through to callee) ++ // a3: new target (passed through to callee) ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ CallCodeObject(a2); ++ ++ // Store offset of return address for deoptimizer. ++ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); ++ ++ // Exit frame and return. ++ LeaveArgumentsAdaptorFrame(masm); ++ __ Ret(); ++ ++ // ------------------------------------------- ++ // Don't adapt arguments. ++ // ------------------------------------------- ++ __ bind(&dont_adapt_arguments); ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); ++ __ JumpCodeObject(a2); ++ ++ __ bind(&stack_overflow); ++ { ++ FrameScope frame(masm, StackFrame::MANUAL); ++ __ CallRuntime(Runtime::kThrowStackOverflow); ++ __ break_(0xCC); ++ } ++} ++ ++void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ++ // The function index was put in t0 by the jump table trampoline. ++ // Convert to Smi for the runtime call ++ __ SmiTag(kWasmCompileLazyFuncIndexRegister); ++ { ++ HardAbortScope hard_abort(masm); // Avoid calls to Abort. ++ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); ++ ++ // Save all parameter registers (see wasm-linkage.cc). They might be ++ // overwritten in the runtime call below. We don't have any callee-saved ++ // registers in wasm, so no need to store anything else. ++ constexpr RegList gp_regs = Register::ListOf(a0, a2, a3, a4, a5, a6, a7); ++ constexpr RegList fp_regs = ++ DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14); ++ __ MultiPush(gp_regs); ++ __ MultiPushFPU(fp_regs); ++ ++ // Pass instance and function index as an explicit arguments to the runtime ++ // function. ++ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); ++ // Initialize the JavaScript context with 0. CEntry will use it to ++ // set the current context on the isolate. ++ __ Move(kContextRegister, Smi::zero()); ++ __ CallRuntime(Runtime::kWasmCompileLazy, 2); ++ __ mov(t8, a0); ++ ++ // Restore registers. ++ __ MultiPopFPU(fp_regs); ++ __ MultiPop(gp_regs); ++ } ++ // Finally, jump to the entrypoint. ++ __ Jump(t8); ++} ++ ++void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { ++ HardAbortScope hard_abort(masm); // Avoid calls to Abort. ++ { ++ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK); ++ ++ // Save all parameter registers. They might hold live values, we restore ++ // them after the runtime call. ++ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs); ++ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); ++ ++ // Initialize the JavaScript context with 0. CEntry will use it to ++ // set the current context on the isolate. ++ __ Move(cp, Smi::zero()); ++ __ CallRuntime(Runtime::kWasmDebugBreak, 0); ++ ++ // Restore registers. ++ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); ++ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs); ++ } ++ __ Ret(); ++} ++ ++void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ++ SaveFPRegsMode save_doubles, ArgvMode argv_mode, ++ bool builtin_exit_frame) { ++ // Called from JavaScript; parameters are on stack as if calling JS function ++ // a0: number of arguments including receiver ++ // a1: pointer to builtin function ++ // fp: frame pointer (restored after C call) ++ // sp: stack pointer (restored as callee's sp after C call) ++ // cp: current context (C callee-saved) ++ // ++ // If argv_mode == kArgvInRegister: ++ // a2: pointer to the first argument ++ ++ if (argv_mode == kArgvInRegister) { ++ // Move argv into the correct register. ++ __ mov(s1, a2); ++ } else { ++ // Compute the argv pointer in a callee-saved register. ++ __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7); ++ __ Sub_d(s1, s1, kPointerSize); ++ } ++ ++ // Enter the exit frame that transitions from JavaScript to C++. ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame( ++ save_doubles == kSaveFPRegs, 0, ++ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); ++ ++ // s0: number of arguments including receiver (C callee-saved) ++ // s1: pointer to first argument (C callee-saved) ++ // s2: pointer to builtin function (C callee-saved) ++ ++ // Prepare arguments for C routine. ++ // a0 = argc ++ __ mov(s0, a0); ++ __ mov(s2, a1); ++ ++ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We ++ // also need to reserve the 4 argument slots on the stack. ++ ++ __ AssertStackIsAligned(); ++ ++ // a0 = argc, a1 = argv, a2 = isolate ++ __ li(a2, ExternalReference::isolate_address(masm->isolate())); ++ __ mov(a1, s1); ++ ++ __ StoreReturnAddressAndCall(s2); ++ ++ // Result returned in a0 or a1:a0 - do not destroy these registers! ++ ++ // Check result for exception sentinel. ++ Label exception_returned; ++ __ LoadRoot(a4, RootIndex::kException); ++ __ Branch(&exception_returned, eq, a4, Operand(a0)); ++ ++ // Check that there is no pending exception, otherwise we ++ // should have returned the exception sentinel. ++ if (FLAG_debug_code) { ++ Label okay; ++ ExternalReference pending_exception_address = ExternalReference::Create( ++ IsolateAddressId::kPendingExceptionAddress, masm->isolate()); ++ __ li(a2, pending_exception_address); ++ __ Ld_d(a2, MemOperand(a2, 0)); ++ __ LoadRoot(a4, RootIndex::kTheHoleValue); ++ // Cannot use check here as it attempts to generate call into runtime. ++ __ Branch(&okay, eq, a4, Operand(a2)); ++ __ stop(); ++ __ bind(&okay); ++ } ++ ++ // Exit C frame and return. ++ // a0:a1: result ++ // sp: stack pointer ++ // fp: frame pointer ++ Register argc = argv_mode == kArgvInRegister ++ // We don't want to pop arguments so set argc to no_reg. ++ ? no_reg ++ // s0: still holds argc (callee-saved). ++ : s0; ++ __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN); ++ ++ // Handling of exception. ++ __ bind(&exception_returned); ++ ++ ExternalReference pending_handler_context_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate()); ++ ExternalReference pending_handler_entrypoint_address = ++ ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate()); ++ ExternalReference pending_handler_fp_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate()); ++ ExternalReference pending_handler_sp_address = ExternalReference::Create( ++ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate()); ++ ++ // Ask the runtime for help to determine the handler. This will set a0 to ++ // contain the current pending exception, don't clobber it. ++ ExternalReference find_handler = ++ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler); ++ { ++ FrameScope scope(masm, StackFrame::MANUAL); ++ __ PrepareCallCFunction(3, 0, a0); ++ __ mov(a0, zero_reg); ++ __ mov(a1, zero_reg); ++ __ li(a2, ExternalReference::isolate_address(masm->isolate())); ++ __ CallCFunction(find_handler, 3); ++ } ++ ++ // Retrieve the handler context, SP and FP. ++ __ li(cp, pending_handler_context_address); ++ __ Ld_d(cp, MemOperand(cp, 0)); ++ __ li(sp, pending_handler_sp_address); ++ __ Ld_d(sp, MemOperand(sp, 0)); ++ __ li(fp, pending_handler_fp_address); ++ __ Ld_d(fp, MemOperand(fp, 0)); ++ ++ // If the handler is a JS frame, restore the context to the frame. Note that ++ // the context will be set to (cp == 0) for non-JS frames. ++ Label zero; ++ __ Branch(&zero, eq, cp, Operand(zero_reg)); ++ __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); ++ __ bind(&zero); ++ ++ // Reset the masking register. This is done independent of the underlying ++ // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work ++ // with both configurations. It is safe to always do this, because the ++ // underlying register is caller-saved and can be arbitrarily clobbered. ++ __ ResetSpeculationPoisonRegister(); ++ ++ // Compute the handler entry address and jump to it. ++ __ li(t7, pending_handler_entrypoint_address); ++ __ Ld_d(t7, MemOperand(t7, 0)); ++ __ Jump(t7); ++} ++ ++void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ++ Label done; ++ Register result_reg = t0; ++ ++ Register scratch = GetRegisterThatIsNotOneOf(result_reg); ++ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch); ++ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2); ++ DoubleRegister double_scratch = kScratchDoubleReg; ++ ++ // Account for saved regs. ++ const int kArgumentOffset = 4 * kPointerSize; ++ ++ __ Push(result_reg); ++ __ Push(scratch, scratch2, scratch3); ++ ++ // Load double input. ++ __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset)); ++ ++ // Clear cumulative exception flags and save the FCSR. ++ // __ movfcsr2gr(scratch2, FCSR); ++ // __ movgr2fcsr(FCSR, zero_reg); ++ ++ // Try a conversion to a signed integer. ++ __ ftintrz_w_d(double_scratch, double_scratch); ++ // Move the converted value into the result register. ++ __ movfr2gr_s(scratch3, double_scratch); ++ ++ // Retrieve and restore the FCSR. ++ __ movfcsr2gr(scratch); // __ cfc1(scratch, FCSR); ++ // __ ctc1(scratch2, FCSR); ++ ++ // Check for overflow and NaNs. ++ __ And( ++ scratch, scratch, ++ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); ++ // If we had no exceptions then set result_reg and we are done. ++ Label error; ++ __ Branch(&error, ne, scratch, Operand(zero_reg)); ++ __ Move(result_reg, scratch3); ++ __ Branch(&done); ++ __ bind(&error); ++ ++ // Load the double value and perform a manual truncation. ++ Register input_high = scratch2; ++ Register input_low = scratch3; ++ ++ __ Ld_w(input_low, ++ MemOperand(sp, kArgumentOffset + Register::kMantissaOffset)); ++ __ Ld_w(input_high, ++ MemOperand(sp, kArgumentOffset + Register::kExponentOffset)); ++ ++ Label normal_exponent; ++ // Extract the biased exponent in result. ++ __ bstrpick_w(result_reg, input_high, ++ HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1, ++ HeapNumber::kExponentShift); ++ ++ // Check for Infinity and NaNs, which should return 0. ++ __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask); ++ __ Movz(result_reg, zero_reg, scratch); ++ __ Branch(&done, eq, scratch, Operand(zero_reg)); ++ ++ // Express exponent as delta to (number of mantissa bits + 31). ++ __ Sub_w(result_reg, result_reg, ++ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); ++ ++ // If the delta is strictly positive, all bits would be shifted away, ++ // which means that we can return 0. ++ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); ++ __ mov(result_reg, zero_reg); ++ __ Branch(&done); ++ ++ __ bind(&normal_exponent); ++ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; ++ // Calculate shift. ++ __ Add_w(scratch, result_reg, ++ Operand(kShiftBase + HeapNumber::kMantissaBits)); ++ ++ // Save the sign. ++ Register sign = result_reg; ++ result_reg = no_reg; ++ __ And(sign, input_high, Operand(HeapNumber::kSignMask)); ++ ++ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need ++ // to check for this specific case. ++ Label high_shift_needed, high_shift_done; ++ __ Branch(&high_shift_needed, lt, scratch, Operand(32)); ++ __ mov(input_high, zero_reg); ++ __ Branch(&high_shift_done); ++ __ bind(&high_shift_needed); ++ ++ // Set the implicit 1 before the mantissa part in input_high. ++ __ Or(input_high, input_high, ++ Operand(1 << HeapNumber::kMantissaBitsInTopWord)); ++ // Shift the mantissa bits to the correct position. ++ // We don't need to clear non-mantissa bits as they will be shifted away. ++ // If they weren't, it would mean that the answer is in the 32bit range. ++ __ sll_w(input_high, input_high, scratch); ++ ++ __ bind(&high_shift_done); ++ ++ // Replace the shifted bits with bits from the lower mantissa word. ++ Label pos_shift, shift_done; ++ __ li(kScratchReg, 32); ++ __ sub_w(scratch, kScratchReg, scratch); ++ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); ++ ++ // Negate scratch. ++ __ Sub_w(scratch, zero_reg, scratch); ++ __ sll_w(input_low, input_low, scratch); ++ __ Branch(&shift_done); ++ ++ __ bind(&pos_shift); ++ __ srl_w(input_low, input_low, scratch); ++ ++ __ bind(&shift_done); ++ __ Or(input_high, input_high, Operand(input_low)); ++ // Restore sign if necessary. ++ __ mov(scratch, sign); ++ result_reg = sign; ++ sign = no_reg; ++ __ Sub_w(result_reg, zero_reg, input_high); ++ __ Movz(result_reg, input_high, scratch); ++ ++ __ bind(&done); ++ ++ __ St_d(result_reg, MemOperand(sp, kArgumentOffset)); ++ __ Pop(scratch, scratch2, scratch3); ++ __ Pop(result_reg); ++ __ Ret(); ++} ++ ++namespace { ++ ++int AddressOffset(ExternalReference ref0, ExternalReference ref1) { ++ int64_t offset = (ref0.address() - ref1.address()); ++ DCHECK(static_cast(offset) == offset); ++ return static_cast(offset); ++} ++ ++// Calls an API function. Allocates HandleScope, extracts returned value ++// from handle and propagates exceptions. Restores context. stack_space ++// - space to be unwound on exit (includes the call JS arguments space and ++// the additional space allocated for the fast call). ++void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, ++ ExternalReference thunk_ref, int stack_space, ++ MemOperand* stack_space_operand, ++ MemOperand return_value_operand) { ++ Isolate* isolate = masm->isolate(); ++ ExternalReference next_address = ++ ExternalReference::handle_scope_next_address(isolate); ++ const int kNextOffset = 0; ++ const int kLimitOffset = AddressOffset( ++ ExternalReference::handle_scope_limit_address(isolate), next_address); ++ const int kLevelOffset = AddressOffset( ++ ExternalReference::handle_scope_level_address(isolate), next_address); ++ ++ DCHECK(function_address == a1 || function_address == a2); ++ ++ Label profiler_enabled, end_profiler_check; ++ __ li(t7, ExternalReference::is_profiling_address(isolate)); ++ __ Ld_b(t7, MemOperand(t7, 0)); ++ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg)); ++ __ li(t7, ExternalReference::address_of_runtime_stats_flag()); ++ __ Ld_w(t7, MemOperand(t7, 0)); ++ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg)); ++ { ++ // Call the api function directly. ++ __ mov(t7, function_address); ++ __ Branch(&end_profiler_check); ++ } ++ ++ __ bind(&profiler_enabled); ++ { ++ // Additional parameter is the address of the actual callback. ++ __ li(t7, thunk_ref); ++ } ++ __ bind(&end_profiler_check); ++ ++ // Allocate HandleScope in callee-save registers. ++ __ li(s5, next_address); ++ __ Ld_d(s0, MemOperand(s5, kNextOffset)); ++ __ Ld_d(s1, MemOperand(s5, kLimitOffset)); ++ __ Ld_w(s2, MemOperand(s5, kLevelOffset)); ++ __ Add_w(s2, s2, Operand(1)); ++ __ St_w(s2, MemOperand(s5, kLevelOffset)); ++ ++ __ StoreReturnAddressAndCall(t7); ++ ++ Label promote_scheduled_exception; ++ Label delete_allocated_handles; ++ Label leave_exit_frame; ++ Label return_value_loaded; ++ ++ // Load value from ReturnValue. ++ __ Ld_d(a0, return_value_operand); ++ __ bind(&return_value_loaded); ++ ++ // No more valid handles (the result handle was the last one). Restore ++ // previous handle scope. ++ __ St_d(s0, MemOperand(s5, kNextOffset)); ++ if (__ emit_debug_code()) { ++ __ Ld_w(a1, MemOperand(s5, kLevelOffset)); ++ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, ++ Operand(s2)); ++ } ++ __ Sub_w(s2, s2, Operand(1)); ++ __ St_w(s2, MemOperand(s5, kLevelOffset)); ++ __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset)); ++ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg)); ++ ++ // Leave the API exit frame. ++ __ bind(&leave_exit_frame); ++ ++ if (stack_space_operand == nullptr) { ++ DCHECK_NE(stack_space, 0); ++ __ li(s0, Operand(stack_space)); ++ } else { ++ DCHECK_EQ(stack_space, 0); ++ STATIC_ASSERT(kCArgSlotCount == 0); ++ __ Ld_d(s0, *stack_space_operand); ++ } ++ ++ static constexpr bool kDontSaveDoubles = false; ++ static constexpr bool kRegisterContainsSlotCount = false; ++ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, ++ kRegisterContainsSlotCount); ++ ++ // Check if the function scheduled an exception. ++ __ LoadRoot(a4, RootIndex::kTheHoleValue); ++ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); ++ __ Ld_d(a5, MemOperand(kScratchReg, 0)); ++ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); ++ ++ __ Ret(); ++ ++ // Re-throw by promoting a scheduled exception. ++ __ bind(&promote_scheduled_exception); ++ __ TailCallRuntime(Runtime::kPromoteScheduledException); ++ ++ // HandleScope limit has changed. Delete allocated extensions. ++ __ bind(&delete_allocated_handles); ++ __ St_d(s1, MemOperand(s5, kLimitOffset)); ++ __ mov(s0, a0); ++ __ PrepareCallCFunction(1, s1); ++ __ li(a0, ExternalReference::isolate_address(isolate)); ++ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); ++ __ mov(a0, s0); ++ __ jmp(&leave_exit_frame); ++} ++ ++} // namespace ++ ++void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { ++ // ----------- S t a t e ------------- ++ // -- cp : context ++ // -- a1 : api function address ++ // -- a2 : arguments count (not including the receiver) ++ // -- a3 : call data ++ // -- a0 : holder ++ // -- ++ // -- sp[0] : last argument ++ // -- ... ++ // -- sp[(argc - 1) * 8] : first argument ++ // -- sp[(argc + 0) * 8] : receiver ++ // ----------------------------------- ++ ++ Register api_function_address = a1; ++ Register argc = a2; ++ Register call_data = a3; ++ Register holder = a0; ++ Register scratch = t0; ++ Register base = t1; // For addressing MemOperands on the stack. ++ ++ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch, ++ base)); ++ ++ using FCA = FunctionCallbackArguments; ++ ++ STATIC_ASSERT(FCA::kArgsLength == 6); ++ STATIC_ASSERT(FCA::kNewTargetIndex == 5); ++ STATIC_ASSERT(FCA::kDataIndex == 4); ++ STATIC_ASSERT(FCA::kReturnValueOffset == 3); ++ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); ++ STATIC_ASSERT(FCA::kIsolateIndex == 1); ++ STATIC_ASSERT(FCA::kHolderIndex == 0); ++ ++ // Set up FunctionCallbackInfo's implicit_args on the stack as follows: ++ // ++ // Target state: ++ // sp[0 * kPointerSize]: kHolder ++ // sp[1 * kPointerSize]: kIsolate ++ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) ++ // sp[3 * kPointerSize]: undefined (kReturnValue) ++ // sp[4 * kPointerSize]: kData ++ // sp[5 * kPointerSize]: undefined (kNewTarget) ++ ++ // Set up the base register for addressing through MemOperands. It will point ++ // at the receiver (located at sp + argc * kPointerSize). ++ __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7); ++ ++ // Reserve space on the stack. ++ __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize)); ++ ++ // kHolder. ++ __ St_d(holder, MemOperand(sp, 0 * kPointerSize)); ++ ++ // kIsolate. ++ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); ++ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); ++ ++ // kReturnValueDefaultValue and kReturnValue. ++ __ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize)); ++ __ St_d(scratch, MemOperand(sp, 3 * kPointerSize)); ++ ++ // kData. ++ __ St_d(call_data, MemOperand(sp, 4 * kPointerSize)); ++ ++ // kNewTarget. ++ __ St_d(scratch, MemOperand(sp, 5 * kPointerSize)); ++ ++ // Keep a pointer to kHolder (= implicit_args) in a scratch register. ++ // We use it below to set up the FunctionCallbackInfo object. ++ __ mov(scratch, sp); ++ ++ // Allocate the v8::Arguments structure in the arguments' space since ++ // it's not controlled by GC. ++ static constexpr int kApiStackSpace = 4; ++ static constexpr bool kDontSaveDoubles = false; ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); ++ ++ // EnterExitFrame may align the sp. ++ ++ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). ++ // Arguments are after the return address (pushed by EnterExitFrame()). ++ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); ++ ++ // FunctionCallbackInfo::values_ (points at the first varargs argument passed ++ // on the stack). ++ __ Sub_d(scratch, base, Operand(1 * kPointerSize)); ++ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize)); ++ ++ // FunctionCallbackInfo::length_. ++ // Stored as int field, 32-bit integers within struct on stack always left ++ // justified by n64 ABI. ++ __ St_w(argc, MemOperand(sp, 3 * kPointerSize)); ++ ++ // We also store the number of bytes to drop from the stack after returning ++ // from the API function here. ++ // Note: Unlike on other architectures, this stores the number of slots to ++ // drop, not the number of bytes. ++ __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */)); ++ __ St_d(scratch, MemOperand(sp, 4 * kPointerSize)); ++ ++ // v8::InvocationCallback's argument. ++ DCHECK(!AreAliased(api_function_address, scratch, a0)); ++ __ Add_d(a0, sp, Operand(1 * kPointerSize)); ++ ++ ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); ++ ++ // There are two stack slots above the arguments we constructed on the stack. ++ // TODO(jgruber): Document what these arguments are. ++ static constexpr int kStackSlotsAboveFCA = 2; ++ MemOperand return_value_operand( ++ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); ++ ++ static constexpr int kUseStackSpaceOperand = 0; ++ MemOperand stack_space_operand(sp, 4 * kPointerSize); ++ ++ AllowExternalCallThatCantCauseGC scope(masm); ++ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, ++ kUseStackSpaceOperand, &stack_space_operand, ++ return_value_operand); ++} ++ ++void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { ++ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property ++ // name below the exit frame to make GC aware of them. ++ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0); ++ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1); ++ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2); ++ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3); ++ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4); ++ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5); ++ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6); ++ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7); ++ ++ Register receiver = ApiGetterDescriptor::ReceiverRegister(); ++ Register holder = ApiGetterDescriptor::HolderRegister(); ++ Register callback = ApiGetterDescriptor::CallbackRegister(); ++ Register scratch = a4; ++ DCHECK(!AreAliased(receiver, holder, callback, scratch)); ++ ++ Register api_function_address = a2; ++ ++ // Here and below +1 is for name() pushed after the args_ array. ++ using PCA = PropertyCallbackArguments; ++ __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); ++ __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); ++ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); ++ __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); ++ __ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ __ St_d(scratch, ++ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); ++ __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * ++ kPointerSize)); ++ __ li(scratch, ExternalReference::isolate_address(masm->isolate())); ++ __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); ++ __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); ++ // should_throw_on_error -> false ++ DCHECK_EQ(0, Smi::zero().ptr()); ++ __ St_d(zero_reg, ++ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); ++ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); ++ __ St_d(scratch, MemOperand(sp, 0 * kPointerSize)); ++ ++ // v8::PropertyCallbackInfo::args_ array and name handle. ++ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; ++ ++ // Load address of v8::PropertyAccessorInfo::args_ array and name handle. ++ __ mov(a0, sp); // a0 = Handle ++ __ Add_d(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ ++ ++ const int kApiStackSpace = 1; ++ FrameScope frame_scope(masm, StackFrame::MANUAL); ++ __ EnterExitFrame(false, kApiStackSpace); ++ ++ // Create v8::PropertyCallbackInfo object on the stack and initialize ++ // it's args_ field. ++ __ St_d(a1, MemOperand(sp, 1 * kPointerSize)); ++ __ Add_d(a1, sp, Operand(1 * kPointerSize)); ++ // a1 = v8::PropertyCallbackInfo& ++ ++ ExternalReference thunk_ref = ++ ExternalReference::invoke_accessor_getter_callback(); ++ ++ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); ++ __ Ld_d(api_function_address, ++ FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); ++ ++ // +3 is to skip prolog, return address and name handle. ++ MemOperand return_value_operand( ++ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); ++ MemOperand* const kUseStackSpaceConstant = nullptr; ++ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, ++ kStackUnwindSpace, kUseStackSpaceConstant, ++ return_value_operand); ++} ++ ++void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { ++ // The sole purpose of DirectCEntry is for movable callers (e.g. any general ++ // purpose Code object) to be able to call into C functions that may trigger ++ // GC and thus move the caller. ++ // ++ // DirectCEntry places the return address on the stack (updated by the GC), ++ // making the call GC safe. The irregexp backend relies on this. ++ ++ // Make place for arguments to fit C calling convention. Callers use ++ // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't ++ // have to do that here. Any caller must drop kCArgsSlotsSize stack space ++ // after the call. ++ __ addi_d(sp, sp, -kCArgsSlotsSize); ++ ++ __ St_d(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address. ++ __ Call(t7); // Call the C++ function. ++ __ Ld_d(t7, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code. ++ ++ if (FLAG_debug_code && FLAG_enable_slow_asserts) { ++ // In case of an error the return address may point to a memory area ++ // filled with kZapValue by the GC. Dereference the address and check for ++ // this. ++ __ Ld_d(a4, MemOperand(t7, 0)); ++ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4, ++ Operand(reinterpret_cast(kZapValue))); ++ } ++ ++ __ Jump(t7); ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h +index d56b3725..97be9eb7 100644 +--- a/deps/v8/src/codegen/assembler-arch.h ++++ b/deps/v8/src/codegen/assembler-arch.h +@@ -21,6 +21,8 @@ + #include "src/codegen/mips/assembler-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/assembler-mips64.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/codegen/loong64/assembler-loong64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/assembler-s390.h" + #else +diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h +index 8c81315d..e14dbe81 100644 +--- a/deps/v8/src/codegen/assembler-inl.h ++++ b/deps/v8/src/codegen/assembler-inl.h +@@ -21,6 +21,8 @@ + #include "src/codegen/mips/assembler-mips-inl.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/assembler-mips64-inl.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/codegen/loong64/assembler-loong64-inl.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/assembler-s390-inl.h" + #else +diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h +index 7a222c96..b885cecc 100644 +--- a/deps/v8/src/codegen/constants-arch.h ++++ b/deps/v8/src/codegen/constants-arch.h +@@ -15,6 +15,8 @@ + #include "src/codegen/mips/constants-mips.h" // NOLINT + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/constants-mips64.h" // NOLINT ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/codegen/loong64/constants-loong64.h" // NOLINT + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + #include "src/codegen/ppc/constants-ppc.h" // NOLINT + #elif V8_TARGET_ARCH_S390 +diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h +index 14c94eba..64baaed5 100644 +--- a/deps/v8/src/codegen/cpu-features.h ++++ b/deps/v8/src/codegen/cpu-features.h +@@ -47,6 +47,9 @@ enum CpuFeature { + MIPSr6, + MIPS_SIMD, // MSA instructions + ++#elif V8_TARGET_ARCH_LOONG64 ++ FPU, // TODO ++ + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + FPU, + FPR_GPR_MOV, +diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc +index 512b64e5..6b764f23 100644 +--- a/deps/v8/src/codegen/external-reference.cc ++++ b/deps/v8/src/codegen/external-reference.cc +@@ -472,6 +472,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { + #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState + #elif V8_TARGET_ARCH_MIPS64 + #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState ++#elif V8_TARGET_ARCH_LOONG64 ++#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState + #elif V8_TARGET_ARCH_S390 + #define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState + #else +diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc +index 503da3cb..a65e2ede 100644 +--- a/deps/v8/src/codegen/interface-descriptors.cc ++++ b/deps/v8/src/codegen/interface-descriptors.cc +@@ -130,7 +130,8 @@ const char* CallInterfaceDescriptor::DebugName() const { + return ""; + } + +-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) ++#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ ++ !defined(V8_TARGET_ARCH_LOONG64) + bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { + return true; + } +@@ -408,7 +409,8 @@ void WasmAtomicNotifyDescriptor::InitializePlatformSpecific( + DefaultInitializePlatformSpecific(data, kParameterCount); + } + +-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) ++#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ ++ !defined(V8_TARGET_ARCH_LOONG64) + void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + DefaultInitializePlatformSpecific(data, kParameterCount); +diff --git a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h +new file mode 100644 +index 00000000..805ef1f5 +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h +@@ -0,0 +1,268 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_ ++#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_ ++ ++#include "src/codegen/loong64/assembler-loong64.h" ++ ++#include "src/codegen/assembler.h" ++#include "src/debug/debug.h" ++#include "src/objects/objects-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } ++ ++bool CpuFeatures::SupportsWasmSimd128() { return false; } ++ ++// ----------------------------------------------------------------------------- ++// Operand and MemOperand. ++ ++bool Operand::is_reg() const { return rm_.is_valid(); } ++ ++int64_t Operand::immediate() const { ++ DCHECK(!is_reg()); ++ DCHECK(!IsHeapObjectRequest()); ++ return value_.immediate; ++} ++ ++// ----------------------------------------------------------------------------- ++// RelocInfo. ++ ++void RelocInfo::apply(intptr_t delta) { ++ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { ++ // Absolute code pointer inside code object moves with the code object. ++ Assembler::RelocateInternalReference(rmode_, pc_, delta); ++ } ++} ++ ++Address RelocInfo::target_address() { ++ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++Address RelocInfo::target_address_address() { ++ DCHECK(HasTargetAddressAddress()); ++ // Read the address of the word containing the target_address in an ++ // instruction stream. ++ // The only architecture-independent user of this function is the serializer. ++ // The serializer uses it to find out how many raw bytes of instruction to ++ // output before the next target. ++ // For an instruction like LUI/ORI where the target bits are mixed into the ++ // instruction bits, the size of the target will be zero, indicating that the ++ // serializer should not step forward in memory after a target is resolved ++ // and written. In this case the target_address_address function should ++ // return the end of the instructions to be patched, allowing the ++ // deserializer to deserialize the instructions as raw bytes and put them in ++ // place, ready to be patched with the target. After jump optimization, ++ // that is the address of the instruction that follows J/JAL/JR/JALR ++ // instruction. ++ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize; ++} ++ ++Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } ++ ++int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } ++ ++void Assembler::deserialization_set_special_target_at( ++ Address instruction_payload, Code code, Address target) { ++ set_target_address_at(instruction_payload, ++ !code.is_null() ? code.constant_pool() : kNullAddress, ++ target); ++} ++ ++int Assembler::deserialization_special_target_size( ++ Address instruction_payload) { ++ return kSpecialTargetSize; ++} ++ ++void Assembler::set_target_internal_reference_encoded_at(Address pc, ++ Address target) { ++ // TODO, see AssembleJumpTable, loong64 does not generate internal reference? ++ abort(); ++} ++ ++void Assembler::deserialization_set_target_internal_reference_at( ++ Address pc, Address target, RelocInfo::Mode mode) { ++ if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) { ++ DCHECK(IsJ(instr_at(pc))); ++ set_target_internal_reference_encoded_at(pc, target); ++ } else { ++ DCHECK(mode == RelocInfo::INTERNAL_REFERENCE); ++ Memory
(pc) = target; ++ } ++} ++ ++HeapObject RelocInfo::target_object() { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return HeapObject::cast( ++ Object(Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { ++ return target_object(); ++} ++ ++Handle RelocInfo::target_object_handle(Assembler* origin) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ return Handle(reinterpret_cast( ++ Assembler::target_address_at(pc_, constant_pool_))); ++} ++ ++void RelocInfo::set_target_object(Heap* heap, HeapObject target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); ++ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), ++ icache_flush_mode); ++ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && ++ !FLAG_disable_write_barriers) { ++ WriteBarrierForCode(host(), this, target); ++ } ++} ++ ++Address RelocInfo::target_external_reference() { ++ DCHECK(rmode_ == EXTERNAL_REFERENCE); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::set_target_external_reference( ++ Address target, ICacheFlushMode icache_flush_mode) { ++ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE); ++ Assembler::set_target_address_at(pc_, constant_pool_, target, ++ icache_flush_mode); ++} ++ ++Address RelocInfo::target_internal_reference() { ++ if (rmode_ == INTERNAL_REFERENCE) { ++ return Memory
(pc_); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++Address RelocInfo::target_internal_reference_address() { ++ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED); ++ return pc_; ++} ++ ++Address RelocInfo::target_runtime_entry(Assembler* origin) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ return target_address(); ++} ++ ++void RelocInfo::set_target_runtime_entry(Address target, ++ WriteBarrierMode write_barrier_mode, ++ ICacheFlushMode icache_flush_mode) { ++ DCHECK(IsRuntimeEntry(rmode_)); ++ if (target_address() != target) ++ set_target_address(target, write_barrier_mode, icache_flush_mode); ++} ++ ++Address RelocInfo::target_off_heap_target() { ++ DCHECK(IsOffHeapTarget(rmode_)); ++ return Assembler::target_address_at(pc_, constant_pool_); ++} ++ ++void RelocInfo::WipeOut() { ++ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || ++ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || ++ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || ++ IsOffHeapTarget(rmode_)); ++ if (IsInternalReference(rmode_)) { ++ Memory
(pc_) = kNullAddress; ++ } else if (IsInternalReferenceEncoded(rmode_)) { ++ Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); ++ } else { ++ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Assembler. ++ ++void Assembler::CheckBuffer() { ++ if (buffer_space() <= kGap) { ++ GrowBuffer(); ++ } ++} ++ ++void Assembler::EmitHelper(Instr x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += kInstrSize; ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++inline void Assembler::EmitHelper(uint8_t x); ++ ++template ++void Assembler::EmitHelper(T x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ CheckTrampolinePoolQuick(); ++} ++ ++template <> ++void Assembler::EmitHelper(uint8_t x) { ++ *reinterpret_cast(pc_) = x; ++ pc_ += sizeof(x); ++ if (reinterpret_cast(pc_) % kInstrSize == 0) { ++ CheckTrampolinePoolQuick(); ++ } ++} ++ ++void Assembler::emit(Instr x) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(x); ++} ++ ++void Assembler::emit(uint64_t data) { ++ // CheckForEmitInForbiddenSlot(); ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(data); ++} ++ ++EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_ +diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc +new file mode 100644 +index 00000000..395cc4e1 +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc +@@ -0,0 +1,2685 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#include "src/codegen/loong64/assembler-loong64.h" ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/base/cpu.h" ++#include "src/codegen/loong64/assembler-loong64-inl.h" ++#include "src/codegen/safepoint-table.h" ++#include "src/codegen/string-constants.h" ++#include "src/deoptimizer/deoptimizer.h" ++#include "src/objects/heap-number-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++void CpuFeatures::ProbeImpl(bool cross_compile) { ++ supported_ |= 1u << FPU; ++ ++ // Only use statically determined features for cross compile (snapshot). ++ if (cross_compile) return; ++ ++#if defined(_loongisa_vec) ++ supported_ |= 0u; ++#endif ++ // If the compiler is allowed to use fpu then we can use fpu too in our ++ // code generation. ++#ifdef __loongarch__ ++ // Probe for additional features at runtime. ++ base::CPU cpu; ++ supported_ |= 0u; ++#endif ++} ++ ++void CpuFeatures::PrintTarget() {} ++void CpuFeatures::PrintFeatures() {} ++ ++int ToNumber(Register reg) { ++ DCHECK(reg.is_valid()); ++ const int kNumbers[] = { ++ 0, // zero_reg ++ 1, // r1 ra ++ 2, // r2 gp ++ 3, // r3 sp ++ 4, // a0 v0 ++ 5, // a1 v1 ++ 6, // a2 ++ 7, // a3 ++ 8, // a4 ++ 9, // a5 ++ 10, // a6 ++ 11, // a7 ++ 12, // t0 ++ 13, // t1 ++ 14, // t2 ++ 15, // t3 ++ 16, // t4 ++ 17, // t5 ++ 18, // t6 ++ 19, // t7 ++ 20, // t8 ++ 21, // tp ++ 22, // fp ++ 23, // s0 ++ 24, // s1 ++ 25, // s2 ++ 26, // s3 ++ 27, // s4 ++ 28, // s5 ++ 29, // s6 ++ 30, // s7 ++ 31, // s8 ++ }; ++ return kNumbers[reg.code()]; ++} ++ ++Register ToRegister(int num) { ++ DCHECK(num >= 0 && num < kNumRegisters); ++ const Register kRegisters[] = { ++ zero_reg, ra, gp, sp, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, ++ t4, t5, t6, t7, t8, tp, fp, s0, s1, s2, s3, s4, s5, s6, s7, s8}; ++ return kRegisters[num]; ++} ++ ++// ----------------------------------------------------------------------------- ++// Implementation of RelocInfo. ++ ++const int RelocInfo::kApplyMask = ++ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | ++ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED); ++ ++bool RelocInfo::IsCodedSpecially() { ++ // The deserializer needs to know whether a pointer is specially coded. Being ++ // specially coded on loongisa means that it is a lui/ori instruction, and ++ // that is always the case inside code objects. ++ return true; ++} ++ ++bool RelocInfo::IsInConstantPool() { return false; } ++ ++uint32_t RelocInfo::wasm_call_tag() const { ++ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); ++ return static_cast( ++ Assembler::target_address_at(pc_, constant_pool_)); ++} ++ ++// ----------------------------------------------------------------------------- ++// Implementation of Operand and MemOperand. ++// See assembler-loong64-inl.h for inlined constructors. ++ ++Operand::Operand(Handle handle) ++ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) { ++ value_.immediate = static_cast(handle.address()); ++} ++ ++Operand Operand::EmbeddedNumber(double value) { ++ int32_t smi; ++ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); ++ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); ++ result.is_heap_object_request_ = true; ++ result.value_.heap_object_request = HeapObjectRequest(value); ++ return result; ++} ++ ++Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { ++ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); ++ result.is_heap_object_request_ = true; ++ result.value_.heap_object_request = HeapObjectRequest(str); ++ return result; ++} ++ ++MemOperand::MemOperand(Register base, int32_t offset) ++ : base_(base), index_(no_reg), offset_(offset) {} ++ ++MemOperand::MemOperand(Register base, Register index) ++ : base_(base), index_(index), offset_(0) {} ++ ++void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { ++ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); ++ for (auto& request : heap_object_requests_) { ++ Handle object; ++ switch (request.kind()) { ++ case HeapObjectRequest::kHeapNumber: ++ object = isolate->factory()->NewHeapNumber( ++ request.heap_number()); ++ break; ++ case HeapObjectRequest::kStringConstant: ++ const StringConstantBase* str = request.string(); ++ CHECK_NOT_NULL(str); ++ object = str->AllocateStringConstant(isolate); ++ break; ++ } ++ Address pc = reinterpret_cast
(buffer_start_) + request.offset(); ++ set_target_value_at(pc, reinterpret_cast(object.location())); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Specific instructions, constants, and masks. ++ ++// addi_d(sp, sp, 8) aka Pop() operation or part of Pop(r) ++// operations as post-increment of sp. ++const Instr kPopInstruction = ADDI_D | (kPointerSize & kImm12Mask) << kRkShift | ++ (sp.code() << kRjShift) | sp.code(); // NOLINT ++// addi_d(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. ++const Instr kPushInstruction = ADDI_D | ++ (-kPointerSize & kImm12Mask) << kRkShift | ++ (sp.code() << kRjShift) | sp.code(); // NOLINT ++// St_d(r, MemOperand(sp, 0)) ++const Instr kPushRegPattern = ST_D | (sp.code() << kRjShift); // NOLINT ++// Ld_d(r, MemOperand(sp, 0)) ++const Instr kPopRegPattern = LD_D | (sp.code() << kRjShift); // NOLINT ++ ++Assembler::Assembler(const AssemblerOptions& options, ++ std::unique_ptr buffer) ++ : AssemblerBase(options, std::move(buffer)), ++ scratch_register_list_(t7.bit() | t6.bit()) { ++ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); ++ ++ last_trampoline_pool_end_ = 0; ++ no_trampoline_pool_before_ = 0; ++ trampoline_pool_blocked_nesting_ = 0; ++ // We leave space (16 * kTrampolineSlotsSize) ++ // for BlockTrampolinePoolScope buffer. ++ next_buffer_check_ = FLAG_force_long_branches ++ ? kMaxInt ++ : kMax16BranchOffset - kTrampolineSlotsSize * 16; ++ internal_trampoline_exception_ = false; ++ last_bound_pos_ = 0; ++ ++ trampoline_emitted_ = FLAG_force_long_branches; // TODO remove this ++ unbound_labels_count_ = 0; ++ block_buffer_growth_ = false; ++} ++ ++void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, ++ SafepointTableBuilder* safepoint_table_builder, ++ int handler_table_offset) { ++ // EmitForbiddenSlotInstruction(); // TODO why? ++ ++ int code_comments_size = WriteCodeComments(); ++ ++ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. ++ ++ AllocateAndInstallRequestedHeapObjects(isolate); ++ ++ // Set up code descriptor. ++ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to ++ // this point to make CodeDesc initialization less fiddly. ++ ++ static constexpr int kConstantPoolSize = 0; ++ const int instruction_size = pc_offset(); ++ const int code_comments_offset = instruction_size - code_comments_size; ++ const int constant_pool_offset = code_comments_offset - kConstantPoolSize; ++ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) ++ ? constant_pool_offset ++ : handler_table_offset; ++ const int safepoint_table_offset = ++ (safepoint_table_builder == kNoSafepointTable) ++ ? handler_table_offset2 ++ : safepoint_table_builder->GetCodeOffset(); ++ const int reloc_info_offset = ++ static_cast(reloc_info_writer.pos() - buffer_->start()); ++ CodeDesc::Initialize(desc, this, safepoint_table_offset, ++ handler_table_offset2, constant_pool_offset, ++ code_comments_offset, reloc_info_offset); ++} ++ ++void Assembler::Align(int m) { ++ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); ++ while ((pc_offset() & (m - 1)) != 0) { ++ nop(); ++ } ++} ++ ++void Assembler::CodeTargetAlign() { ++ // No advantage to aligning branch/call targets to more than ++ // single instruction, that I am aware of. ++ Align(4); ++} ++ ++Register Assembler::GetRkReg(Instr instr) { ++ return Register::from_code((instr & kRkFieldMask) >> kRkShift); ++} ++ ++Register Assembler::GetRjReg(Instr instr) { ++ return Register::from_code((instr & kRjFieldMask) >> kRjShift); ++} ++ ++Register Assembler::GetRdReg(Instr instr) { ++ return Register::from_code((instr & kRdFieldMask) >> kRdShift); ++} ++ ++uint32_t Assembler::GetRk(Instr instr) { ++ return (instr & kRkFieldMask) >> kRkShift; ++} ++ ++uint32_t Assembler::GetRkField(Instr instr) { return instr & kRkFieldMask; } ++ ++uint32_t Assembler::GetRj(Instr instr) { ++ return (instr & kRjFieldMask) >> kRjShift; ++} ++ ++uint32_t Assembler::GetRjField(Instr instr) { return instr & kRjFieldMask; } ++ ++uint32_t Assembler::GetRd(Instr instr) { ++ return (instr & kRdFieldMask) >> kRdShift; ++} ++ ++uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; } ++ ++uint32_t Assembler::GetSa2(Instr instr) { ++ return (instr & kSa2FieldMask) >> kSaShift; ++} ++ ++uint32_t Assembler::GetSa2Field(Instr instr) { return instr & kSa2FieldMask; } ++ ++uint32_t Assembler::GetSa3(Instr instr) { ++ return (instr & kSa3FieldMask) >> kSaShift; ++} ++ ++uint32_t Assembler::GetSa3Field(Instr instr) { return instr & kSa3FieldMask; } ++ ++bool Assembler::IsPop(Instr instr) { ++ return (instr & 0xffc003e0) == kPopRegPattern; ++} ++ ++bool Assembler::IsPush(Instr instr) { ++ return (instr & 0xffc003e0) == kPushRegPattern; ++} ++ ++// Labels refer to positions in the (to be) generated code. ++// There are bound, linked, and unused labels. ++// ++// Bound labels refer to known positions in the already ++// generated code. pos() is the position the label refers to. ++// ++// Linked labels refer to unknown positions in the code ++// to be generated; pos() is the position of the last ++// instruction using the label. ++ ++// The link chain is terminated by a value in the instruction of -1, ++// which is an otherwise illegal value (branch -1 is inf loop). ++// The instruction 16-bit offset field addresses 32-bit words, but in ++// code is conv to an 18-bit value addressing bytes, hence the -4 value. ++ ++const int kEndOfChain = 0; ++// Determines the end of the Jump chain (a subset of the label link chain). ++const int kEndOfJumpChain = 0; ++ ++bool Assembler::IsBranch(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a branch. ++ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ || ++ opcode == B || opcode == BL || opcode == BEQ || ++ opcode == BNE || opcode == BLT || opcode == BGE || ++ opcode == BLTU || opcode == BGEU; ++ return isBranch; ++} ++ ++bool Assembler::IsB(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a b. ++ bool isBranch = opcode == B || opcode == BL; ++ return isBranch; ++} ++ ++bool Assembler::IsBz(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a branch. ++ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ; ++ return isBranch; ++} ++ ++bool Assembler::IsEmittedConstant(Instr instr) { ++ // Add GetLabelConst function? ++ uint32_t label_constant = instr & ~kImm16Mask; ++ return label_constant == 0; // Emitted label const in reg-exp engine. ++} ++ ++bool Assembler::IsJ(Instr instr) { ++ uint32_t opcode = (instr >> 26) << 26; ++ // Checks if the instruction is a jump. ++ return opcode == JIRL; ++} ++ ++bool Assembler::IsLu12i_w(Instr instr) { ++ uint32_t opcode = (instr >> 25) << 25; ++ return opcode == LU12I_W; ++} ++ ++bool Assembler::IsOri(Instr instr) { ++ uint32_t opcode = (instr >> 22) << 22; ++ return opcode == ORI; ++} ++ ++bool Assembler::IsLu32i_d(Instr instr) { ++ uint32_t opcode = (instr >> 25) << 25; ++ return opcode == LU32I_D; ++} ++ ++bool Assembler::IsLu52i_d(Instr instr) { ++ uint32_t opcode = (instr >> 22) << 22; ++ return opcode == LU52I_D; ++} ++ ++bool Assembler::IsMov(Instr instr, Register rd, Register rj) { ++ // Checks if the instruction is a OR with zero_reg argument (aka MOV). ++ Instr instr1 = ++ OR | zero_reg.code() << kRkShift | rj.code() << kRjShift | rd.code(); ++ return instr == instr1; ++} ++ ++bool Assembler::IsPcAddi(Instr instr, Register rd, int32_t si20) { ++ DCHECK(is_int20(si20)); ++ Instr instr1 = PCADDI | (si20 & 0xfffff) << kRjShift | rd.code(); ++ return instr == instr1; ++} ++ ++bool Assembler::IsNop(Instr instr, unsigned int type) { ++ // See Assembler::nop(type). ++ DCHECK_LT(type, 32); ++ // Traditional loongisa nop == andi(zero_reg, zero_reg, 0) ++ // When marking non-zero type, use andi(zero_reg, t7, type) ++ // to avoid use of ssnop and ehb special encodings of the ++ // andi instruction. ++ ++ Register nop_rt_reg = (type == 0) ? zero_reg : t7; ++ Instr instr1 = ANDI | ((type & kImm12Mask) << kRkShift) | ++ (nop_rt_reg.code() << kRjShift); ++ ++ return instr == instr1; ++} ++ ++static inline int32_t GetOffsetOfBranch(Instr instr, ++ Assembler::OffsetSize bits) { ++ int32_t result = 0; ++ if (bits == 16) { ++ result = (instr << 6) >> 16; ++ } else if (bits == 21) { ++ uint32_t low16 = instr << 6; ++ low16 = low16 >> 16; ++ low16 &= 0xffff; ++ int32_t hi5 = (instr << 27) >> 11; ++ result = hi5 | low16; ++ } else { ++ uint32_t low16 = instr << 6; ++ low16 = low16 >> 16; ++ low16 &= 0xffff; ++ int32_t hi10 = (instr << 22) >> 6; ++ result = hi10 | low16; ++ DCHECK_EQ(bits, 26); ++ } ++ return result << 2; ++} ++ ++static Assembler::OffsetSize OffsetSizeInBits(Instr instr) { ++ if (Assembler::IsB(instr)) { ++ return Assembler::OffsetSize::kOffset26; ++ } else if (Assembler::IsBz(instr)) { ++ return Assembler::OffsetSize::kOffset21; ++ } else { ++ DCHECK(Assembler::IsBranch(instr)); ++ return Assembler::OffsetSize::kOffset16; ++ } ++} ++ ++static inline int32_t AddBranchOffset(int pos, Instr instr) { ++ Assembler::OffsetSize bits = OffsetSizeInBits(instr); ++ ++ int32_t imm = GetOffsetOfBranch(instr, bits); ++ ++ if (imm == kEndOfChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or pos. ++ return kEndOfChain; ++ } else { ++ // Handle the case that next branch position is 0. ++ // TODO: Define -4 as a constant ++ int32_t offset = pos + Assembler::kBranchPCOffset + imm; ++ return offset == 0 ? -4 : offset; ++ } ++} ++ ++int Assembler::target_at(int pos, bool is_internal) { ++ if (is_internal) { ++ int64_t* p = reinterpret_cast(buffer_start_ + pos); ++ int64_t address = *p; ++ if (address == kEndOfJumpChain) { ++ return kEndOfChain; ++ } else { ++ int64_t instr_address = reinterpret_cast(p); ++ DCHECK(instr_address - address < INT_MAX); ++ int delta = static_cast(instr_address - address); ++ DCHECK(pos > delta); ++ return pos - delta; ++ } ++ } ++ Instr instr = instr_at(pos); ++ ++ // TODO remove after remove label_at_put? ++ if ((instr & ~kImm16Mask) == 0) { ++ // Emitted label constant, not part of a branch. ++ if (instr == 0) { ++ return kEndOfChain; ++ } else { ++ int32_t imm18 = ((instr & static_cast(kImm16Mask)) << 16) >> 14; ++ return (imm18 + pos); ++ } ++ } ++ ++ // Check we have a branch or jump instruction. ++ DCHECK(IsBranch(instr) || IsJ(instr) || IsLu12i_w(instr) || ++ IsPcAddi(instr, t8, 16)); ++ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming ++ // the compiler uses arithmetic shifts for signed integers. ++ if (IsBranch(instr)) { ++ return AddBranchOffset(pos, instr); ++ } else if (IsPcAddi(instr, t8, 16)) { ++ // see BranchLong(Label* L) and BranchAndLinkLong ?? ++ int32_t imm32; ++ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 2 * kInstrSize); ++ DCHECK(IsLu12i_w(instr_lu12i_w)); ++ // DCHECK(IsOri(instr_ori)); ++ imm32 = ((instr_lu12i_w >> 5) & 0xfffff) << 12; ++ imm32 |= ((instr_ori >> 10) & static_cast(kImm12Mask)); ++ if (imm32 == kEndOfJumpChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or pos. ++ return kEndOfChain; ++ } ++ return pos + imm32; ++ } else if (IsLu12i_w(instr)) { ++ abort(); ++ // TODO no used?? ++ /* Instr instr_lui = instr_at(pos + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize); ++ DCHECK(IsOri(instr_ori)); ++ DCHECK(IsOri(instr_ori2)); ++ ++ // TODO(plind) create named constants for shift values. ++ int64_t imm = static_cast(instr_lui & kImm16Mask) << 48; ++ imm |= static_cast(instr_ori & kImm16Mask) << 32; ++ imm |= static_cast(instr_ori2 & kImm16Mask) << 16; ++ // Sign extend address; ++ imm >>= 16; ++ ++ if (imm == kEndOfJumpChain) { ++ // EndOfChain sentinel is returned directly, not relative to pc or ++ pos. return kEndOfChain; } else { uint64_t instr_address = ++ reinterpret_cast(buffer_start_ + pos); DCHECK(instr_address - ++ imm < INT_MAX); int delta = static_cast(instr_address - imm); ++ DCHECK(pos > delta); ++ return pos - delta; ++ }*/ ++ } else { ++ DCHECK(IsJ(instr)); ++ // TODO not used??? ++ abort(); ++ } ++} ++ ++static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos, ++ Instr instr) { ++ int32_t bits = OffsetSizeInBits(instr); ++ int32_t imm = target_pos - pos; ++ DCHECK_EQ(imm & 3, 0); ++ imm >>= 2; ++ ++ DCHECK(is_intn(imm, bits)); ++ ++ if (bits == 16) { ++ const int32_t mask = ((1 << 16) - 1) << 10; ++ instr &= ~mask; ++ return instr | ((imm << 10) & mask); ++ } else if (bits == 21) { ++ const int32_t mask = 0x3fffc1f; ++ instr &= ~mask; ++ uint32_t low16 = (imm & kImm16Mask) << 10; ++ int32_t hi5 = (imm >> 16) & 0x1f; ++ return instr | low16 | hi5; ++ } else { ++ DCHECK_EQ(bits, 26); ++ const int32_t mask = 0x3ffffff; ++ instr &= ~mask; ++ uint32_t low16 = (imm & kImm16Mask) << 10; ++ int32_t hi10 = (imm >> 16) & 0x3ff; ++ return instr | low16 | hi10; ++ } ++} ++ ++void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { ++ if (is_internal) { ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ *reinterpret_cast(buffer_start_ + pos) = imm; ++ return; ++ } ++ Instr instr = instr_at(pos); ++ if ((instr & ~kImm16Mask) == 0) { ++ DCHECK(target_pos == kEndOfChain || target_pos >= 0); ++ // Emitted label constant, not part of a branch. ++ // Make label relative to Code pointer of generated Code object. ++ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); ++ return; ++ } ++ ++ if (IsBranch(instr)) { ++ instr = SetBranchOffset(pos, target_pos, instr); ++ instr_at_put(pos, instr); ++ } else if (0 == 1 /*IsLui(instr)*/) { ++ /* if (IsPcAddi(instr, t8, 16)) { ++ Instr instr_lui = instr_at(pos + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 2 * kInstrSize); ++ DCHECK(IsLui(instr_lui)); ++ DCHECK(IsOri(instr_ori)); ++ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); ++ DCHECK_EQ(imm & 3, 0); ++ if (is_int16(imm + Assembler::kLongBranchPCOffset - ++ Assembler::kBranchPCOffset)) { ++ // Optimize by converting to regular branch and link with 16-bit ++ // offset. ++ Instr instr_b = REGIMM | BGEZAL; // Branch and link. ++ instr_b = SetBranchOffset(pos, target_pos, instr_b); ++ // Correct ra register to point to one instruction after jalr from ++ // TurboAssembler::BranchAndLinkLong. ++ Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift ++ | kOptimizedBranchAndLinkLongReturnOffset; ++ ++ instr_at_put(pos, instr_b); ++ instr_at_put(pos + 1 * kInstrSize, instr_a); ++ } else { ++ instr_lui &= ~kImm16Mask; ++ instr_ori &= ~kImm16Mask; ++ ++ instr_at_put(pos + 0 * kInstrSize, ++ instr_lui | ((imm >> kLuiShift) & kImm16Mask)); ++ instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask)); ++ } ++ } else { ++ Instr instr_lui = instr_at(pos + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize); ++ DCHECK(IsOri(instr_ori)); ++ DCHECK(IsOri(instr_ori2)); ++ ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ DCHECK_EQ(imm & 3, 0); ++ ++ instr_lui &= ~kImm16Mask; ++ instr_ori &= ~kImm16Mask; ++ instr_ori2 &= ~kImm16Mask; ++ ++ instr_at_put(pos + 0 * kInstrSize, ++ instr_lui | ((imm >> 32) & kImm16Mask)); ++ instr_at_put(pos + 1 * kInstrSize, ++ instr_ori | ((imm >> 16) & kImm16Mask)); ++ instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask)); ++ }*/ ++ } else if (IsPcAddi(instr, t8, 16)) { ++ abort(); /* ++ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize); ++ Instr instr_ori = instr_at(pos + 2 * kInstrSize); ++ DCHECK(IsLu12i_w(instr_lu12i_w)); ++ //DCHECK(IsOri(instr_ori)); ++ ++ int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset); ++ ++ if (is_int21(imm_short)) { ++ // Optimize by converting to regular branch with 21-bit ++ // offset ++ Instr instr_b = B; ++ instr_b = SetBranchOffset(pos, target_pos, instr_b); ++ ++ instr_at_put(pos, instr_b); ++ } else { ++ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); ++ DCHECK_EQ(imm & 3, 0); ++ ++ instr_lu12i_w &= 0xfe00001fu; // opcode:7 | bit20 | rd:5 ++ instr_ori &= 0xffc003ffu; // opcode:10 | bit12 | rj:5 | rd:5 ++ ++ instr_at_put(pos + 1 * kInstrSize, ++ instr_lu12i_w | (((imm >> 12) & 0xfffff) << 5)); ++ instr_at_put(pos + 2 * kInstrSize, instr_ori | ++ ((imm & 0xfff) << 10)); ++ }*/ ++ } else if (IsJ(instr)) { ++ /* ++ int32_t imm28 = target_pos - pos; ++ DCHECK_EQ(imm28 & 3, 0); ++ ++ uint32_t imm26 = static_cast(imm28 >> 2); ++ DCHECK(is_uint26(imm26)); ++ // Place 26-bit signed offset with markings. ++ // When code is committed it will be resolved to j/jal. ++ int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark; ++ instr_at_put(pos, mark | (imm26 & kImm26Mask));*/ ++ abort(); ++ } else { ++ /* int32_t imm28 = target_pos - pos; ++ DCHECK_EQ(imm28 & 3, 0); ++ ++ uint32_t imm26 = static_cast(imm28 >> 2); ++ DCHECK(is_uint26(imm26)); ++ // Place raw 26-bit signed offset. ++ // When code is committed it will be resolved to j/jal. ++ instr &= ~kImm26Mask; ++ instr_at_put(pos, instr | (imm26 & kImm26Mask));*/ ++ abort(); ++ } ++} ++ ++void Assembler::print(const Label* L) { ++ if (L->is_unused()) { ++ PrintF("unused label\n"); ++ } else if (L->is_bound()) { ++ PrintF("bound label to %d\n", L->pos()); ++ } else if (L->is_linked()) { ++ Label l; ++ l.link_to(L->pos()); ++ PrintF("unbound label"); ++ while (l.is_linked()) { ++ PrintF("@ %d ", l.pos()); ++ Instr instr = instr_at(l.pos()); ++ if ((instr & ~kImm16Mask) == 0) { ++ PrintF("value\n"); ++ } else { ++ PrintF("%d\n", instr); ++ } ++ next(&l, is_internal_reference(&l)); ++ } ++ } else { ++ PrintF("label in inconsistent state (pos = %d)\n", L->pos_); ++ } ++} ++ ++void Assembler::bind_to(Label* L, int pos) { ++ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. ++ int trampoline_pos = kInvalidSlotPos; ++ bool is_internal = false; ++ if (L->is_linked() && !trampoline_emitted_) { ++ unbound_labels_count_--; ++ if (!is_internal_reference(L)) { ++ next_buffer_check_ += kTrampolineSlotsSize; ++ } ++ } ++ ++ while (L->is_linked()) { ++ int fixup_pos = L->pos(); ++ int dist = pos - fixup_pos; ++ is_internal = is_internal_reference(L); ++ next(L, is_internal); // Call next before overwriting link with target at ++ // fixup_pos. ++ Instr instr = instr_at(fixup_pos); ++ if (is_internal) { ++ target_at_put(fixup_pos, pos, is_internal); ++ } else { ++ if (IsBranch(instr)) { ++ int branch_offset = BranchOffset(instr); ++ if (dist > branch_offset) { ++ if (trampoline_pos == kInvalidSlotPos) { ++ trampoline_pos = get_trampoline_entry(fixup_pos); ++ CHECK_NE(trampoline_pos, kInvalidSlotPos); ++ } ++ CHECK((trampoline_pos - fixup_pos) <= branch_offset); ++ target_at_put(fixup_pos, trampoline_pos, false); ++ fixup_pos = trampoline_pos; ++ } ++ target_at_put(fixup_pos, pos, false); ++ } else { ++ DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) || ++ IsPcAddi(instr, t8, 8)); ++ target_at_put(fixup_pos, pos, false); ++ } ++ } ++ } ++ L->bind_to(pos); ++ ++ // Keep track of the last bound label so we don't eliminate any instructions ++ // before a bound label. ++ if (pos > last_bound_pos_) last_bound_pos_ = pos; ++} ++ ++void Assembler::bind(Label* L) { ++ DCHECK(!L->is_bound()); // Label can only be bound once. ++ bind_to(L, pc_offset()); ++} ++ ++void Assembler::next(Label* L, bool is_internal) { ++ DCHECK(L->is_linked()); ++ int link = target_at(L->pos(), is_internal); ++ if (link == kEndOfChain) { ++ L->Unuse(); ++ } else if (link == -4) { ++ // Next position is pc_offset == 0 ++ L->link_to(0); ++ } else { ++ DCHECK_GE(link, 0); ++ L->link_to(link); ++ } ++} ++ ++bool Assembler::is_near_c(Label* L) { ++ DCHECK(L->is_bound()); ++ return pc_offset() - L->pos() < kMax16BranchOffset - 4 * kInstrSize; ++} ++ ++bool Assembler::is_near(Label* L, OffsetSize bits) { ++ DCHECK(L->is_bound()); ++ return ((pc_offset() - L->pos()) < ++ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize); ++} ++ ++bool Assembler::is_near_a(Label* L) { ++ DCHECK(L->is_bound()); ++ return pc_offset() - L->pos() <= kMax26BranchOffset - 4 * kInstrSize; ++} ++ ++int Assembler::BranchOffset(Instr instr) { ++ int bits = OffsetSize::kOffset16; ++ ++ uint32_t opcode = (instr >> 26) << 26; ++ switch (opcode) { ++ case B: ++ case BL: ++ bits = OffsetSize::kOffset26; ++ break; ++ case BNEZ: ++ case BEQZ: ++ case BCZ: ++ bits = OffsetSize::kOffset21; ++ break; ++ case BNE: ++ case BEQ: ++ case BLT: ++ case BGE: ++ case BLTU: ++ case BGEU: ++ case JIRL: ++ bits = OffsetSize::kOffset16; ++ break; ++ default: ++ break; ++ } ++ ++ return (1 << (bits + 2 - 1)) - 1; ++} ++ ++// We have to use a temporary register for things that can be relocated even ++// if they can be encoded in the LA's 16 bits of immediate-offset instruction ++// space. There is no guarantee that the relocated location can be similarly ++// encoded. ++bool Assembler::MustUseReg(RelocInfo::Mode rmode) { ++ return !RelocInfo::IsNone(rmode); ++} ++ ++void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK((BEQZ == opcode || BNEZ == opcode) && is_int21(si21) && rj.is_valid()); ++ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | ++ (rj.code() << kRjShift) | ((si21 & 0x1fffff) >> 16); ++ emit(instr); ++} ++ ++void Assembler::GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(BCZ == opcode && is_int21(si21)); ++ DCHECK(cj >= 0 && cj <= 7); ++ int32_t sc = (isEq ? cj : cj + 8); ++ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | (sc << kRjShift) | ++ ((si21 & 0x1fffff) >> 16); ++ emit(instr); ++} ++ ++void Assembler::GenB(Opcode opcode, int32_t si26) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK((B == opcode || BL == opcode) && is_int26(si26)); ++ Instr instr = ++ opcode | ((si26 & kImm16Mask) << kRkShift) | ((si26 & kImm26Mask) >> 16); ++ emit(instr); ++} ++ ++void Assembler::GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(is_int16(si16)); ++ Instr instr = opcode | ((si16 & kImm16Mask) << kRkShift) | ++ (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, ++ FPURegister fj, CFRegister cd) { ++ DCHECK(opcode == FCMP_COND_S || opcode == FCMP_COND_D); ++ Instr instr = opcode | cond << kCondShift | (fk.code() << kFkShift) | ++ (fj.code() << kFjShift) | cd; ++ emit(instr); ++} ++ ++void Assembler::GenSel(Opcode opcode, CFRegister ca, FPURegister fk, ++ FPURegister fj, FPURegister rd) { ++ DCHECK((opcode == FSEL)); ++ Instr instr = opcode | ca << kCondShift | (fk.code() << kFkShift) | ++ (fj.code() << kFjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, Register rd, ++ bool rjrd) { ++ DCHECK(rjrd); ++ Instr instr = 0; ++ instr = opcode | (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fj, FPURegister fd) { ++ Instr instr = opcode | (fj.code() << kFjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, FPURegister fd) { ++ DCHECK((opcode == MOVGR2FR_W) || (opcode == MOVGR2FR_D) || ++ (opcode == MOVGR2FRH_W)); ++ Instr instr = opcode | (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fj, Register rd) { ++ DCHECK((opcode == MOVFR2GR_S) || (opcode == MOVFR2GR_D) || ++ (opcode == MOVFRH2GR_S)); ++ Instr instr = opcode | (fj.code() << kFjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, FPUControlRegister fd) { ++ DCHECK((opcode == MOVGR2FCSR)); ++ Instr instr = opcode | (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPUControlRegister fj, Register rd) { ++ DCHECK((opcode == MOVFCSR2GR)); ++ Instr instr = opcode | (fj.code() << kFjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fj, CFRegister cd) { ++ DCHECK((opcode == MOVFR2CF)); ++ Instr instr = opcode | (fj.code() << kFjShift) | cd; ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, CFRegister cj, FPURegister fd) { ++ DCHECK((opcode == MOVCF2FR)); ++ Instr instr = opcode | cj << kFjShift | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rj, CFRegister cd) { ++ DCHECK((opcode == MOVGR2CF)); ++ Instr instr = opcode | (rj.code() << kRjShift) | cd; ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, CFRegister cj, Register rd) { ++ DCHECK((opcode == MOVCF2GR)); ++ Instr instr = opcode | cj << kFjShift | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rk, Register rj, ++ Register rd) { ++ Instr instr = ++ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fk, FPURegister fj, ++ FPURegister fd) { ++ Instr instr = ++ opcode | (fk.code() << kFkShift) | (fj.code() << kFjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, FPURegister fa, FPURegister fk, ++ FPURegister fj, FPURegister fd) { ++ Instr instr = opcode | (fa.code() << kFaShift) | (fk.code() << kFkShift) | ++ (fj.code() << kFjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenRegister(Opcode opcode, Register rk, Register rj, ++ FPURegister fd) { ++ Instr instr = ++ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj, ++ Register rd) { ++ DCHECK(is_uint3(bit3)); ++ Instr instr = opcode | (bit3 & 0x7) << kSaShift | (rk.code() << kRkShift) | ++ (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj, ++ Register rd) { ++ DCHECK(is_uint6(bit6m) && is_uint6(bit6l)); ++ Instr instr = opcode | (bit6m & 0x3f) << 16 | (bit6l & 0x3f) << kRkShift | ++ (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit20, Register rd) { ++ // DCHECK(is_uint20(bit20) || is_int20(bit20)); ++ Instr instr = opcode | (bit20 & 0xfffff) << kRjShift | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit15) { ++ DCHECK(is_uint15(bit15)); ++ Instr instr = opcode | (bit15 & 0x7fff); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t value, Register rj, Register rd, ++ int32_t value_bits) { ++ DCHECK(value_bits == 6 || value_bits == 12 || value_bits == 14 || ++ value_bits == 16); ++ uint32_t imm = value & 0x3f; ++ if (value_bits == 12) { ++ imm = value & kImm12Mask; ++ } else if (value_bits == 14) { ++ imm = value & 0x3fff; ++ } else if (value_bits == 16) { ++ imm = value & kImm16Mask; ++ } ++ Instr instr = opcode | imm << kRkShift | (rj.code() << kRjShift) | rd.code(); ++ emit(instr); ++} ++ ++void Assembler::GenImm(Opcode opcode, int32_t bit12, Register rj, ++ FPURegister fd) { ++ DCHECK(is_int12(bit12)); ++ Instr instr = opcode | ((bit12 & kImm12Mask) << kRkShift) | ++ (rj.code() << kRjShift) | fd.code(); ++ emit(instr); ++} ++ ++// Returns the next free trampoline entry. ++int32_t Assembler::get_trampoline_entry(int32_t pos) { ++ int32_t trampoline_entry = kInvalidSlotPos; ++ if (!internal_trampoline_exception_) { ++ if (trampoline_.start() > pos) { ++ trampoline_entry = trampoline_.take_slot(); ++ } ++ ++ if (kInvalidSlotPos == trampoline_entry) { ++ internal_trampoline_exception_ = true; ++ } ++ } ++ return trampoline_entry; ++} ++ ++uint64_t Assembler::jump_address(Label* L) { ++ int64_t target_pos; ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ return kEndOfJumpChain; ++ } ++ } ++ uint64_t imm = reinterpret_cast(buffer_start_) + target_pos; ++ DCHECK_EQ(imm & 3, 0); ++ ++ return imm; ++} ++ ++uint64_t Assembler::branch_long_offset(Label* L) { ++ int64_t target_pos; ++ ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ return kEndOfJumpChain; ++ } ++ } ++ int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset); ++ DCHECK_EQ(offset & 3, 0); ++ ++ return static_cast(offset); ++} ++ ++int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { ++ int32_t target_pos; ++ ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); ++ L->link_to(pc_offset()); ++ } else { ++ L->link_to(pc_offset()); ++ if (!trampoline_emitted_) { ++ unbound_labels_count_++; ++ next_buffer_check_ -= kTrampolineSlotsSize; ++ } ++ return kEndOfChain; ++ } ++ } ++ ++ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); ++ DCHECK(is_intn(offset, bits + 2)); ++ DCHECK_EQ(offset & 3, 0); ++ ++ return offset; ++} ++ ++void Assembler::label_at_put(Label* L, int at_offset) { ++ int target_pos; ++ if (L->is_bound()) { ++ target_pos = L->pos(); ++ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); ++ } else { ++ if (L->is_linked()) { ++ target_pos = L->pos(); // L's link. ++ int32_t imm18 = target_pos - at_offset; ++ DCHECK_EQ(imm18 & 3, 0); ++ int32_t imm16 = imm18 >> 2; ++ DCHECK(is_int16(imm16)); ++ instr_at_put(at_offset, (imm16 & kImm16Mask)); ++ } else { ++ target_pos = kEndOfChain; ++ instr_at_put(at_offset, 0); ++ if (!trampoline_emitted_) { ++ unbound_labels_count_++; ++ next_buffer_check_ -= kTrampolineSlotsSize; ++ } ++ } ++ L->link_to(at_offset); ++ } ++ // TODO PushBackTrack() ++} ++ ++//------- Branch and jump instructions -------- ++ ++void Assembler::b(int32_t offset) { GenB(B, offset); } ++ ++void Assembler::bl(int32_t offset) { GenB(BL, offset); } ++ ++void Assembler::beq(Register rj, Register rd, int32_t offset) { ++ GenBJ(BEQ, rj, rd, offset); ++} ++ ++void Assembler::bne(Register rj, Register rd, int32_t offset) { ++ GenBJ(BNE, rj, rd, offset); ++} ++ ++void Assembler::blt(Register rj, Register rd, int32_t offset) { ++ GenBJ(BLT, rj, rd, offset); ++} ++ ++void Assembler::bge(Register rj, Register rd, int32_t offset) { ++ GenBJ(BGE, rj, rd, offset); ++} ++ ++void Assembler::bltu(Register rj, Register rd, int32_t offset) { ++ GenBJ(BLTU, rj, rd, offset); ++} ++ ++void Assembler::bgeu(Register rj, Register rd, int32_t offset) { ++ GenBJ(BGEU, rj, rd, offset); ++} ++ ++void Assembler::beqz(Register rj, int32_t offset) { GenB(BEQZ, rj, offset); } ++void Assembler::bnez(Register rj, int32_t offset) { GenB(BNEZ, rj, offset); } ++ ++void Assembler::jirl(Register rd, Register rj, int32_t offset) { ++ GenBJ(JIRL, rj, rd, offset); ++} ++ ++void Assembler::bceqz(CFRegister cj, int32_t si21) { ++ GenB(BCZ, cj, si21, true); ++} ++ ++void Assembler::bcnez(CFRegister cj, int32_t si21) { ++ GenB(BCZ, cj, si21, false); ++} ++ ++// -------Data-processing-instructions--------- ++ ++// Arithmetic. ++void Assembler::add_w(Register rd, Register rj, Register rk) { ++ GenRegister(ADD_W, rk, rj, rd); ++} ++ ++void Assembler::add_d(Register rd, Register rj, Register rk) { ++ GenRegister(ADD_D, rk, rj, rd); ++} ++ ++void Assembler::sub_w(Register rd, Register rj, Register rk) { ++ GenRegister(SUB_W, rk, rj, rd); ++} ++ ++void Assembler::sub_d(Register rd, Register rj, Register rk) { ++ GenRegister(SUB_D, rk, rj, rd); ++} ++ ++void Assembler::addi_w(Register rd, Register rj, int32_t si12) { ++ GenImm(ADDI_W, si12, rj, rd, 12); ++} ++ ++void Assembler::addi_d(Register rd, Register rj, int32_t si12) { ++ GenImm(ADDI_D, si12, rj, rd, 12); ++} ++ ++void Assembler::addu16i_d(Register rd, Register rj, int32_t si16) { ++ GenImm(ADDU16I_D, si16, rj, rd, 16); ++} ++ ++void Assembler::alsl_w(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2 - 1)); ++ GenImm(ALSL_W, sa2 - 1, rk, rj, rd); ++} ++ ++void Assembler::alsl_wu(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2 - 1)); ++ GenImm(ALSL_WU, sa2 + 3, rk, rj, rd); ++} ++ ++void Assembler::alsl_d(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2 - 1)); ++ GenImm(ALSL_D, sa2 - 1, rk, rj, rd); ++} ++ ++void Assembler::lu12i_w(Register rd, int32_t si20) { ++ GenImm(LU12I_W, si20, rd); ++} ++ ++void Assembler::lu32i_d(Register rd, int32_t si20) { ++ GenImm(LU32I_D, si20, rd); ++} ++ ++void Assembler::lu52i_d(Register rd, Register rj, int32_t si12) { ++ GenImm(LU52I_D, si12, rj, rd, 12); ++} ++ ++void Assembler::slt(Register rd, Register rj, Register rk) { ++ GenRegister(SLT, rk, rj, rd); ++} ++ ++void Assembler::sltu(Register rd, Register rj, Register rk) { ++ GenRegister(SLTU, rk, rj, rd); ++} ++ ++void Assembler::slti(Register rd, Register rj, int32_t si12) { ++ GenImm(SLTI, si12, rj, rd, 12); ++} ++ ++void Assembler::sltui(Register rd, Register rj, int32_t si12) { ++ GenImm(SLTUI, si12, rj, rd, 12); ++} ++ ++void Assembler::pcaddi(Register rd, int32_t si20) { GenImm(PCADDI, si20, rd); } ++ ++void Assembler::pcaddu12i(Register rd, int32_t si20) { ++ GenImm(PCADDU12I, si20, rd); ++} ++ ++void Assembler::pcaddu18i(Register rd, int32_t si20) { ++ GenImm(PCADDU18I, si20, rd); ++} ++ ++void Assembler::pcalau12i(Register rd, int32_t si20) { ++ GenImm(PCALAU12I, si20, rd); ++} ++ ++void Assembler::and_(Register rd, Register rj, Register rk) { ++ GenRegister(AND, rk, rj, rd); ++} ++ ++void Assembler::or_(Register rd, Register rj, Register rk) { ++ GenRegister(OR, rk, rj, rd); ++} ++ ++void Assembler::xor_(Register rd, Register rj, Register rk) { ++ GenRegister(XOR, rk, rj, rd); ++} ++ ++void Assembler::nor(Register rd, Register rj, Register rk) { ++ GenRegister(NOR, rk, rj, rd); ++} ++ ++void Assembler::andn(Register rd, Register rj, Register rk) { ++ GenRegister(ANDN, rk, rj, rd); ++} ++ ++void Assembler::orn(Register rd, Register rj, Register rk) { ++ GenRegister(ORN, rk, rj, rd); ++} ++ ++void Assembler::andi(Register rd, Register rj, int32_t ui12) { ++ GenImm(ANDI, ui12, rj, rd, 12); ++} ++ ++void Assembler::ori(Register rd, Register rj, int32_t ui12) { ++ GenImm(ORI, ui12, rj, rd, 12); ++} ++ ++void Assembler::xori(Register rd, Register rj, int32_t ui12) { ++ GenImm(XORI, ui12, rj, rd, 12); ++} ++ ++void Assembler::mul_w(Register rd, Register rj, Register rk) { ++ GenRegister(MUL_W, rk, rj, rd); ++} ++ ++void Assembler::mulh_w(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_W, rk, rj, rd); ++} ++ ++void Assembler::mulh_wu(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_WU, rk, rj, rd); ++} ++ ++void Assembler::mul_d(Register rd, Register rj, Register rk) { ++ GenRegister(MUL_D, rk, rj, rd); ++} ++ ++void Assembler::mulh_d(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_D, rk, rj, rd); ++} ++ ++void Assembler::mulh_du(Register rd, Register rj, Register rk) { ++ GenRegister(MULH_DU, rk, rj, rd); ++} ++ ++void Assembler::mulw_d_w(Register rd, Register rj, Register rk) { ++ GenRegister(MULW_D_W, rk, rj, rd); ++} ++ ++void Assembler::mulw_d_wu(Register rd, Register rj, Register rk) { ++ GenRegister(MULW_D_WU, rk, rj, rd); ++} ++ ++void Assembler::div_w(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_W, rk, rj, rd); ++} ++ ++void Assembler::mod_w(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_W, rk, rj, rd); ++} ++ ++void Assembler::div_wu(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_WU, rk, rj, rd); ++} ++ ++void Assembler::mod_wu(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_WU, rk, rj, rd); ++} ++ ++void Assembler::div_d(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_D, rk, rj, rd); ++} ++ ++void Assembler::mod_d(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_D, rk, rj, rd); ++} ++ ++void Assembler::div_du(Register rd, Register rj, Register rk) { ++ GenRegister(DIV_DU, rk, rj, rd); ++} ++ ++void Assembler::mod_du(Register rd, Register rj, Register rk) { ++ GenRegister(MOD_DU, rk, rj, rd); ++} ++ ++// Shifts. ++void Assembler::sll_w(Register rd, Register rj, Register rk) { ++ GenRegister(SLL_W, rk, rj, rd); ++} ++ ++void Assembler::srl_w(Register rd, Register rj, Register rk) { ++ GenRegister(SRL_W, rk, rj, rd); ++} ++ ++void Assembler::sra_w(Register rd, Register rj, Register rk) { ++ GenRegister(SRA_W, rk, rj, rd); ++} ++ ++void Assembler::rotr_w(Register rd, Register rj, Register rk) { ++ GenRegister(ROTR_W, rk, rj, rd); ++} ++ ++void Assembler::slli_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(SLLI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::srli_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(SRLI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::srai_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(SRAI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::rotri_w(Register rd, Register rj, int32_t ui5) { ++ DCHECK(is_uint5(ui5)); ++ GenImm(ROTRI_W, ui5 + 0x20, rj, rd, 6); ++} ++ ++void Assembler::sll_d(Register rd, Register rj, Register rk) { ++ GenRegister(SLL_D, rk, rj, rd); ++} ++ ++void Assembler::srl_d(Register rd, Register rj, Register rk) { ++ GenRegister(SRL_D, rk, rj, rd); ++} ++ ++void Assembler::sra_d(Register rd, Register rj, Register rk) { ++ GenRegister(SRA_D, rk, rj, rd); ++} ++ ++void Assembler::rotr_d(Register rd, Register rj, Register rk) { ++ GenRegister(ROTR_D, rk, rj, rd); ++} ++ ++void Assembler::slli_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(SLLI_D, ui6, rj, rd, 6); ++} ++ ++void Assembler::srli_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(SRLI_D, ui6, rj, rd, 6); ++} ++ ++void Assembler::srai_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(SRAI_D, ui6, rj, rd, 6); ++} ++ ++void Assembler::rotri_d(Register rd, Register rj, int32_t ui6) { ++ GenImm(ROTRI_D, ui6, rj, rd, 6); ++} ++ ++// Bit twiddling. ++void Assembler::ext_w_b(Register rd, Register rj) { ++ GenRegister(EXT_W_B, rj, rd); ++} ++ ++void Assembler::ext_w_h(Register rd, Register rj) { ++ GenRegister(EXT_W_H, rj, rd); ++} ++ ++void Assembler::clo_w(Register rd, Register rj) { GenRegister(CLO_W, rj, rd); } ++ ++void Assembler::clz_w(Register rd, Register rj) { GenRegister(CLZ_W, rj, rd); } ++ ++void Assembler::cto_w(Register rd, Register rj) { GenRegister(CTO_W, rj, rd); } ++ ++void Assembler::ctz_w(Register rd, Register rj) { GenRegister(CTZ_W, rj, rd); } ++ ++void Assembler::clo_d(Register rd, Register rj) { GenRegister(CLO_D, rj, rd); } ++ ++void Assembler::clz_d(Register rd, Register rj) { GenRegister(CLZ_D, rj, rd); } ++ ++void Assembler::cto_d(Register rd, Register rj) { GenRegister(CTO_D, rj, rd); } ++ ++void Assembler::ctz_d(Register rd, Register rj) { GenRegister(CTZ_D, rj, rd); } ++ ++void Assembler::bytepick_w(Register rd, Register rj, Register rk, int32_t sa2) { ++ DCHECK(is_uint2(sa2)); ++ GenImm(BYTEPICK_W, sa2, rk, rj, rd); ++} ++ ++void Assembler::bytepick_d(Register rd, Register rj, Register rk, int32_t sa3) { ++ GenImm(BYTEPICK_D, sa3, rk, rj, rd); ++} ++ ++void Assembler::revb_2h(Register rd, Register rj) { ++ GenRegister(REVB_2H, rj, rd); ++} ++ ++void Assembler::revb_4h(Register rd, Register rj) { ++ GenRegister(REVB_4H, rj, rd); ++} ++ ++void Assembler::revb_2w(Register rd, Register rj) { ++ GenRegister(REVB_2W, rj, rd); ++} ++ ++void Assembler::revb_d(Register rd, Register rj) { ++ GenRegister(REVB_D, rj, rd); ++} ++ ++void Assembler::revh_2w(Register rd, Register rj) { ++ GenRegister(REVH_2W, rj, rd); ++} ++ ++void Assembler::revh_d(Register rd, Register rj) { ++ GenRegister(REVH_D, rj, rd); ++} ++ ++void Assembler::bitrev_4b(Register rd, Register rj) { ++ GenRegister(BITREV_4B, rj, rd); ++} ++ ++void Assembler::bitrev_8b(Register rd, Register rj) { ++ GenRegister(BITREV_8B, rj, rd); ++} ++ ++void Assembler::bitrev_w(Register rd, Register rj) { ++ GenRegister(BITREV_W, rj, rd); ++} ++ ++void Assembler::bitrev_d(Register rd, Register rj) { ++ GenRegister(BITREV_D, rj, rd); ++} ++ ++void Assembler::bstrins_w(Register rd, Register rj, int32_t msbw, ++ int32_t lsbw) { ++ DCHECK(is_uint5(msbw) && is_uint5(lsbw)); ++ GenImm(BSTR_W, msbw + 0x20, lsbw, rj, rd); ++} ++ ++void Assembler::bstrins_d(Register rd, Register rj, int32_t msbd, ++ int32_t lsbd) { ++ GenImm(BSTRINS_D, msbd, lsbd, rj, rd); ++} ++ ++void Assembler::bstrpick_w(Register rd, Register rj, int32_t msbw, ++ int32_t lsbw) { ++ DCHECK(is_uint5(msbw) && is_uint5(lsbw)); ++ GenImm(BSTR_W, msbw + 0x20, lsbw + 0x20, rj, rd); ++} ++ ++void Assembler::bstrpick_d(Register rd, Register rj, int32_t msbd, ++ int32_t lsbd) { ++ GenImm(BSTRPICK_D, msbd, lsbd, rj, rd); ++} ++ ++void Assembler::maskeqz(Register rd, Register rj, Register rk) { ++ GenRegister(MASKEQZ, rk, rj, rd); ++} ++ ++void Assembler::masknez(Register rd, Register rj, Register rk) { ++ GenRegister(MASKNEZ, rk, rj, rd); ++} ++ ++// Memory-instructions ++void Assembler::ld_b(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_B, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_h(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_H, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_w(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_W, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_d(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_D, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_bu(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_BU, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_hu(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_HU, si12, rj, rd, 12); ++} ++ ++void Assembler::ld_wu(Register rd, Register rj, int32_t si12) { ++ GenImm(LD_WU, si12, rj, rd, 12); ++} ++ ++void Assembler::st_b(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_B, si12, rj, rd, 12); ++} ++ ++void Assembler::st_h(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_H, si12, rj, rd, 12); ++} ++ ++void Assembler::st_w(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_W, si12, rj, rd, 12); ++} ++ ++void Assembler::st_d(Register rd, Register rj, int32_t si12) { ++ GenImm(ST_D, si12, rj, rd, 12); ++} ++ ++void Assembler::ldx_b(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_B, rk, rj, rd); ++} ++ ++void Assembler::ldx_h(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_H, rk, rj, rd); ++} ++ ++void Assembler::ldx_w(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_W, rk, rj, rd); ++} ++ ++void Assembler::ldx_d(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_D, rk, rj, rd); ++} ++ ++void Assembler::ldx_bu(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_BU, rk, rj, rd); ++} ++ ++void Assembler::ldx_hu(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_HU, rk, rj, rd); ++} ++ ++void Assembler::ldx_wu(Register rd, Register rj, Register rk) { ++ GenRegister(LDX_WU, rk, rj, rd); ++} ++ ++void Assembler::stx_b(Register rd, Register rj, Register rk) { ++ GenRegister(STX_B, rk, rj, rd); ++} ++ ++void Assembler::stx_h(Register rd, Register rj, Register rk) { ++ GenRegister(STX_H, rk, rj, rd); ++} ++ ++void Assembler::stx_w(Register rd, Register rj, Register rk) { ++ GenRegister(STX_W, rk, rj, rd); ++} ++ ++void Assembler::stx_d(Register rd, Register rj, Register rk) { ++ GenRegister(STX_D, rk, rj, rd); ++} ++ ++void Assembler::ldptr_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LDPTR_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::ldptr_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LDPTR_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::stptr_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(STPTR_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::stptr_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(STPTR_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::amswap_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_W, rk, rj, rd); ++} ++ ++void Assembler::amswap_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_D, rk, rj, rd); ++} ++ ++void Assembler::amadd_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_W, rk, rj, rd); ++} ++ ++void Assembler::amadd_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_D, rk, rj, rd); ++} ++ ++void Assembler::amand_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_W, rk, rj, rd); ++} ++ ++void Assembler::amand_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_D, rk, rj, rd); ++} ++ ++void Assembler::amor_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_W, rk, rj, rd); ++} ++ ++void Assembler::amor_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_D, rk, rj, rd); ++} ++ ++void Assembler::amxor_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_W, rk, rj, rd); ++} ++ ++void Assembler::amxor_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_W, rk, rj, rd); ++} ++ ++void Assembler::ammax_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_D, rk, rj, rd); ++} ++ ++void Assembler::ammin_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_W, rk, rj, rd); ++} ++ ++void Assembler::ammin_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_WU, rk, rj, rd); ++} ++ ++void Assembler::ammax_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DU, rk, rj, rd); ++} ++ ++void Assembler::ammin_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_WU, rk, rj, rd); ++} ++ ++void Assembler::ammin_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DU, rk, rj, rd); ++} ++ ++void Assembler::amswap_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amswap_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMSWAP_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amadd_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amadd_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMADD_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amand_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amand_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMAND_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amor_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amor_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMOR_DB_D, rk, rj, rd); ++} ++ ++void Assembler::amxor_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_DB_W, rk, rj, rd); ++} ++ ++void Assembler::amxor_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMXOR_DB_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_W, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_D, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_w(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_W, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_d(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_D, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_WU, rk, rj, rd); ++} ++ ++void Assembler::ammax_db_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMAX_DB_DU, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_wu(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_WU, rk, rj, rd); ++} ++ ++void Assembler::ammin_db_du(Register rd, Register rk, Register rj) { ++ GenRegister(AMMIN_DB_DU, rk, rj, rd); ++} ++ ++void Assembler::ll_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LL_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::ll_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(LL_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::sc_w(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(SC_W, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::sc_d(Register rd, Register rj, int32_t si14) { ++ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0)); ++ GenImm(SC_D, si14 >> 2, rj, rd, 14); ++} ++ ++void Assembler::dbar(int32_t hint) { GenImm(DBAR, hint); } ++ ++void Assembler::ibar(int32_t hint) { GenImm(IBAR, hint); } ++ ++// Break / Trap instructions. ++void Assembler::break_(uint32_t code, bool break_as_stop) { ++ DCHECK( ++ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) || ++ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode))); ++ GenImm(BREAK, code); ++} ++ ++void Assembler::stop(uint32_t code) { ++ DCHECK_GT(code, kMaxWatchpointCode); ++ DCHECK_LE(code, kMaxStopCode); ++#if defined(V8_HOST_ARCH_LOONG64) ++ break_(0x4321); ++#else // V8_HOST_ARCH_LOONG64 ++ break_(code, true); ++#endif ++} ++ ++void Assembler::fadd_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FADD_S, fk, fj, fd); ++} ++ ++void Assembler::fadd_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FADD_D, fk, fj, fd); ++} ++ ++void Assembler::fsub_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSUB_S, fk, fj, fd); ++} ++ ++void Assembler::fsub_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSUB_D, fk, fj, fd); ++} ++ ++void Assembler::fmul_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMUL_S, fk, fj, fd); ++} ++ ++void Assembler::fmul_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMUL_D, fk, fj, fd); ++} ++ ++void Assembler::fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FDIV_S, fk, fj, fd); ++} ++ ++void Assembler::fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FDIV_D, fk, fj, fd); ++} ++ ++void Assembler::fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMADD_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMADD_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMSUB_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FMSUB_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMADD_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMADD_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMSUB_S, fa, fk, fj, fd); ++} ++ ++void Assembler::fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, ++ FPURegister fa) { ++ GenRegister(FNMSUB_D, fa, fk, fj, fd); ++} ++ ++void Assembler::fmax_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAX_S, fk, fj, fd); ++} ++ ++void Assembler::fmax_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAX_D, fk, fj, fd); ++} ++ ++void Assembler::fmin_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMIN_S, fk, fj, fd); ++} ++ ++void Assembler::fmin_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMIN_D, fk, fj, fd); ++} ++ ++void Assembler::fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAXA_S, fk, fj, fd); ++} ++ ++void Assembler::fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMAXA_D, fk, fj, fd); ++} ++ ++void Assembler::fmina_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMINA_S, fk, fj, fd); ++} ++ ++void Assembler::fmina_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FMINA_D, fk, fj, fd); ++} ++ ++void Assembler::fabs_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FABS_S, fj, fd); ++} ++ ++void Assembler::fabs_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FABS_D, fj, fd); ++} ++ ++void Assembler::fneg_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FNEG_S, fj, fd); ++} ++ ++void Assembler::fneg_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FNEG_D, fj, fd); ++} ++ ++void Assembler::fsqrt_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FSQRT_S, fj, fd); ++} ++ ++void Assembler::fsqrt_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FSQRT_D, fj, fd); ++} ++ ++void Assembler::frecip_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FRECIP_S, fj, fd); ++} ++ ++void Assembler::frecip_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FRECIP_D, fj, fd); ++} ++ ++void Assembler::frsqrt_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FRSQRT_S, fj, fd); ++} ++ ++void Assembler::frsqrt_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FRSQRT_D, fj, fd); ++} ++ ++void Assembler::fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSCALEB_S, fk, fj, fd); ++} ++ ++void Assembler::fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FSCALEB_D, fk, fj, fd); ++} ++ ++void Assembler::flogb_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FLOGB_S, fj, fd); ++} ++ ++void Assembler::flogb_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FLOGB_D, fj, fd); ++} ++ ++void Assembler::fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FCOPYSIGN_S, fk, fj, fd); ++} ++ ++void Assembler::fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk) { ++ GenRegister(FCOPYSIGN_D, fk, fj, fd); ++} ++ ++void Assembler::fclass_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FCLASS_S, fj, fd); ++} ++ ++void Assembler::fclass_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FCLASS_D, fj, fd); ++} ++ ++void Assembler::fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd) { ++ GenCmp(FCMP_COND_S, cc, fk, fj, cd); ++} ++ ++void Assembler::fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd) { ++ GenCmp(FCMP_COND_D, cc, fk, fj, cd); ++} ++ ++void Assembler::fcvt_s_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FCVT_S_D, fj, fd); ++} ++ ++void Assembler::fcvt_d_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FCVT_D_S, fj, fd); ++} ++ ++void Assembler::ffint_s_w(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_S_W, fj, fd); ++} ++ ++void Assembler::ffint_s_l(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_S_L, fj, fd); ++} ++ ++void Assembler::ffint_d_w(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_D_W, fj, fd); ++} ++ ++void Assembler::ffint_d_l(FPURegister fd, FPURegister fj) { ++ GenRegister(FFINT_D_L, fj, fd); ++} ++ ++void Assembler::ftint_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_W_S, fj, fd); ++} ++ ++void Assembler::ftint_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_W_D, fj, fd); ++} ++ ++void Assembler::ftint_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_L_S, fj, fd); ++} ++ ++void Assembler::ftint_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINT_L_D, fj, fd); ++} ++ ++void Assembler::ftintrm_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_W_S, fj, fd); ++} ++ ++void Assembler::ftintrm_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_W_D, fj, fd); ++} ++ ++void Assembler::ftintrm_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_L_S, fj, fd); ++} ++ ++void Assembler::ftintrm_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRM_L_D, fj, fd); ++} ++ ++void Assembler::ftintrp_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_W_S, fj, fd); ++} ++ ++void Assembler::ftintrp_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_W_D, fj, fd); ++} ++ ++void Assembler::ftintrp_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_L_S, fj, fd); ++} ++ ++void Assembler::ftintrp_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRP_L_D, fj, fd); ++} ++ ++void Assembler::ftintrz_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_W_S, fj, fd); ++} ++ ++void Assembler::ftintrz_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_W_D, fj, fd); ++} ++ ++void Assembler::ftintrz_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_L_S, fj, fd); ++} ++ ++void Assembler::ftintrz_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRZ_L_D, fj, fd); ++} ++ ++void Assembler::ftintrne_w_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_W_S, fj, fd); ++} ++ ++void Assembler::ftintrne_w_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_W_D, fj, fd); ++} ++ ++void Assembler::ftintrne_l_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_L_S, fj, fd); ++} ++ ++void Assembler::ftintrne_l_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FTINTRNE_L_D, fj, fd); ++} ++ ++void Assembler::frint_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FRINT_S, fj, fd); ++} ++ ++void Assembler::frint_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FRINT_D, fj, fd); ++} ++ ++void Assembler::fmov_s(FPURegister fd, FPURegister fj) { ++ GenRegister(FMOV_S, fj, fd); ++} ++ ++void Assembler::fmov_d(FPURegister fd, FPURegister fj) { ++ GenRegister(FMOV_D, fj, fd); ++} ++ ++void Assembler::fsel(CFRegister ca, FPURegister fd, FPURegister fj, ++ FPURegister fk) { ++ GenSel(FSEL, ca, fk, fj, fd); ++} ++ ++void Assembler::movgr2fr_w(FPURegister fd, Register rj) { ++ GenRegister(MOVGR2FR_W, rj, fd); ++} ++ ++void Assembler::movgr2fr_d(FPURegister fd, Register rj) { ++ GenRegister(MOVGR2FR_D, rj, fd); ++} ++ ++void Assembler::movgr2frh_w(FPURegister fd, Register rj) { ++ GenRegister(MOVGR2FRH_W, rj, fd); ++} ++ ++void Assembler::movfr2gr_s(Register rd, FPURegister fj) { ++ GenRegister(MOVFR2GR_S, fj, rd); ++} ++ ++void Assembler::movfr2gr_d(Register rd, FPURegister fj) { ++ GenRegister(MOVFR2GR_D, fj, rd); ++} ++ ++void Assembler::movfrh2gr_s(Register rd, FPURegister fj) { ++ GenRegister(MOVFRH2GR_S, fj, rd); ++} ++ ++void Assembler::movgr2fcsr(Register rj) { GenRegister(MOVGR2FCSR, rj, FCSR); } ++ ++void Assembler::movfcsr2gr(Register rd) { GenRegister(MOVFCSR2GR, FCSR, rd); } ++ ++void Assembler::movfr2cf(CFRegister cd, FPURegister fj) { ++ GenRegister(MOVFR2CF, fj, cd); ++} ++ ++void Assembler::movcf2fr(FPURegister fd, CFRegister cj) { ++ GenRegister(MOVCF2FR, cj, fd); ++} ++ ++void Assembler::movgr2cf(CFRegister cd, Register rj) { ++ GenRegister(MOVGR2CF, rj, cd); ++} ++ ++void Assembler::movcf2gr(Register rd, CFRegister cj) { ++ GenRegister(MOVCF2GR, cj, rd); ++} ++ ++void Assembler::fld_s(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FLD_S, si12, rj, fd); ++} ++ ++void Assembler::fld_d(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FLD_D, si12, rj, fd); ++} ++ ++void Assembler::fst_s(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FST_S, si12, rj, fd); ++} ++ ++void Assembler::fst_d(FPURegister fd, Register rj, int32_t si12) { ++ GenImm(FST_D, si12, rj, fd); ++} ++ ++void Assembler::fldx_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDX_S, rk, rj, fd); ++} ++ ++void Assembler::fldx_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FLDX_D, rk, rj, fd); ++} ++ ++void Assembler::fstx_s(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTX_S, rk, rj, fd); ++} ++ ++void Assembler::fstx_d(FPURegister fd, Register rj, Register rk) { ++ GenRegister(FSTX_D, rk, rj, fd); ++} ++ ++// ------------Memory-instructions------------- ++ ++/*void Assembler::AdjustBaseAndOffset(MemOperand* src, ++ OffsetAccessType access_type, ++ int second_access_add_to_offset) { ++ // TODO should be optimized. ++ // This method is used to adjust the base register and offset pair ++ // for a load/store when the offset doesn't fit into int12_t. ++ ++ bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; ++ bool two_accesses = static_cast(access_type) || !doubleword_aligned; ++ DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. ++ ++ // is_int12 must be passed a signed value, hence the static cast below. ++ if (is_int12(src->offset()) && ++ (!two_accesses || is_int12(static_cast( ++ src->offset() + second_access_add_to_offset)))) { ++ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified ++ // value) fits into int16_t. ++ return; ++ } ++ ++ DCHECK(src->rm() != ++ at); // Must not overwrite the register 'base' while loading 'offset'. ++ ++#ifdef DEBUG ++ // Remember the "(mis)alignment" of 'offset', it will be checked at the end. ++ uint32_t misalignment = src->offset() & (kDoubleSize - 1); ++#endif ++ ++ // Do not load the whole 32-bit 'offset' if it can be represented as ++ // a sum of two 16-bit signed offsets. This can save an instruction or two. ++ // To simplify matters, only do this for a symmetric range of offsets from ++ // about -64KB to about +64KB, allowing further addition of 4 when accessing ++ // 64-bit variables with two 32-bit accesses. ++ constexpr int32_t kMinOffsetForSimpleAdjustment = ++ 0x7FF8; // Max int16_t that's a multiple of 8. ++ constexpr int32_t kMaxOffsetForSimpleAdjustment = ++ 2 * kMinOffsetForSimpleAdjustment; ++ ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { ++ daddiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment); ++ src->offset_ -= kMinOffsetForSimpleAdjustment; ++ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && ++ src->offset() < 0) { ++ daddiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); ++ src->offset_ += kMinOffsetForSimpleAdjustment; ++ } else if (kArchVariant == kMips64r6) { ++ // On r6 take advantage of the daui instruction, e.g.: ++ // daui at, base, offset_high ++ // [dahi at, 1] // When `offset` is close to +2GB. ++ // lw reg_lo, offset_low(at) ++ // [lw reg_hi, (offset_low+4)(at)] // If misaligned 64-bit load. ++ // or when offset_low+4 overflows int16_t: ++ // daui at, base, offset_high ++ // daddiu at, at, 8 ++ // lw reg_lo, (offset_low-8)(at) ++ // lw reg_hi, (offset_low-4)(at) ++ int16_t offset_low = static_cast(src->offset()); ++ int32_t offset_low32 = offset_low; ++ int16_t offset_high = static_cast(src->offset() >> 16); ++ bool increment_hi16 = offset_low < 0; ++ bool overflow_hi16 = false; ++ ++ if (increment_hi16) { ++ offset_high++; ++ overflow_hi16 = (offset_high == -32768); ++ } ++ daui(scratch, src->rm(), static_cast(offset_high)); ++ ++ if (overflow_hi16) { ++ dahi(scratch, 1); ++ } ++ ++ if (two_accesses && !is_int16(static_cast( ++ offset_low32 + second_access_add_to_offset))) { ++ // Avoid overflow in the 16-bit offset of the load/store instruction when ++ // adding 4. ++ daddiu(scratch, scratch, kDoubleSize); ++ offset_low32 -= kDoubleSize; ++ } ++ ++ src->offset_ = offset_low32; ++ } else { ++ // Do not load the whole 32-bit 'offset' if it can be represented as ++ // a sum of three 16-bit signed offsets. This can save an instruction. ++ // To simplify matters, only do this for a symmetric range of offsets from ++ // about -96KB to about +96KB, allowing further addition of 4 when accessing ++ // 64-bit variables with two 32-bit accesses. ++ constexpr int32_t kMinOffsetForMediumAdjustment = ++ 2 * kMinOffsetForSimpleAdjustment; ++ constexpr int32_t kMaxOffsetForMediumAdjustment = ++ 3 * kMinOffsetForSimpleAdjustment; ++ if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { ++ daddiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); ++ daddiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); ++ src->offset_ -= kMinOffsetForMediumAdjustment; ++ } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && ++ src->offset() < 0) { ++ daddiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); ++ daddiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); ++ src->offset_ += kMinOffsetForMediumAdjustment; ++ } else { ++ // Now that all shorter options have been exhausted, load the full 32-bit ++ // offset. ++ int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); ++ lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); ++ ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. ++ daddu(scratch, scratch, src->rm()); ++ src->offset_ -= loaded_offset; ++ } ++ } ++ src->rm_ = scratch; ++ ++ DCHECK(is_int16(src->offset())); ++ if (two_accesses) { ++ DCHECK(is_int16( ++ static_cast(src->offset() + second_access_add_to_offset))); ++ } ++ DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); ++}*/ ++ ++void Assembler::AdjustBaseAndOffset(MemOperand* src) { ++ // is_int12 must be passed a signed value, hence the static cast below. ++ if ((!src->hasIndexReg() && is_int12(src->offset())) || src->hasIndexReg()) { ++ return; ++ } ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ if (is_uint12(static_cast(src->offset()))) { ++ ori(scratch, zero_reg, src->offset() & kImm12Mask); ++ } else { ++ lu12i_w(scratch, src->offset() >> 12 & 0xfffff); ++ if (src->offset() & kImm12Mask) { ++ ori(scratch, scratch, src->offset() & kImm12Mask); ++ } ++ } ++ src->index_ = scratch; ++ src->offset_ = 0; ++ // TODO can be optimized, for example 2 * [int12_min, int12_max] ++ // addi_d scratch base, offset/2 only on instr ++ // base = scratch ++ // offset = offset - offset / 2 ++} ++ ++int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, ++ intptr_t pc_delta) { ++ if (RelocInfo::IsInternalReference(rmode)) { ++ int64_t* p = reinterpret_cast(pc); ++ if (*p == kEndOfJumpChain) { ++ return 0; // Number of instructions patched. ++ } ++ *p += pc_delta; ++ return 2; // Number of instructions patched. ++ } ++ abort(); ++ /* Instr instr = instr_at(pc); ++ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); ++ if (IsLui(instr)) { ++ Instr instr_lui = instr_at(pc + 0 * kInstrSize); ++ Instr instr_ori = instr_at(pc + 1 * kInstrSize); ++ Instr instr_ori2 = instr_at(pc + 3 * kInstrSize); ++ DCHECK(IsOri(instr_ori)); ++ DCHECK(IsOri(instr_ori2)); ++ // TODO(plind): symbolic names for the shifts. ++ int64_t imm = (instr_lui & static_cast(kImm16Mask)) << 48; ++ imm |= (instr_ori & static_cast(kImm16Mask)) << 32; ++ imm |= (instr_ori2 & static_cast(kImm16Mask)) << 16; ++ // Sign extend address. ++ imm >>= 16; ++ ++ if (imm == kEndOfJumpChain) { ++ return 0; // Number of instructions patched. ++ } ++ imm += pc_delta; ++ DCHECK_EQ(imm & 3, 0); ++ ++ instr_lui &= ~kImm16Mask; ++ instr_ori &= ~kImm16Mask; ++ instr_ori2 &= ~kImm16Mask; ++ ++ instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask)); ++ instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask)); ++ instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask)); ++ return 4; // Number of instructions patched. ++ } else if (IsJ(instr) || IsJal(instr)) { ++ // Regular j/jal relocation. ++ uint32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; ++ imm28 += pc_delta; ++ imm28 &= kImm28Mask; ++ instr &= ~kImm26Mask; ++ DCHECK_EQ(imm28 & 3, 0); ++ uint32_t imm26 = static_cast(imm28 >> 2); ++ instr_at_put(pc, instr | (imm26 & kImm26Mask)); ++ return 1; // Number of instructions patched. ++ } else { ++ DCHECK(((instr & kJumpRawMask) == kJRawMark) || ++ ((instr & kJumpRawMask) == kJalRawMark)); ++ // Unbox raw offset and emit j/jal. ++ int32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; ++ // Sign extend 28-bit offset to 32-bit. ++ imm28 = (imm28 << 4) >> 4; ++ uint64_t target = ++ static_cast(imm28) + reinterpret_cast(pc); ++ target &= kImm28Mask; ++ DCHECK_EQ(imm28 & 3, 0); ++ uint32_t imm26 = static_cast(target >> 2); ++ // Check markings whether to emit j or jal. ++ uint32_t unbox = (instr & kJRawMark) ? J : JAL; ++ instr_at_put(pc, unbox | (imm26 & kImm26Mask)); ++ return 1; // Number of instructions patched. ++ }*/ ++} ++ ++void Assembler::GrowBuffer() { ++ // Compute new buffer size. ++ int old_size = buffer_->size(); ++ int new_size = std::min(2 * old_size, old_size + 1 * MB); ++ ++ // Some internal data structures overflow for very large buffers, ++ // they must ensure that kMaximalBufferSize is not too large. ++ if (new_size > kMaximalBufferSize) { ++ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer"); ++ } ++ ++ // Set up new buffer. ++ std::unique_ptr new_buffer = buffer_->Grow(new_size); ++ DCHECK_EQ(new_size, new_buffer->size()); ++ byte* new_start = new_buffer->start(); ++ ++ // Copy the data. ++ intptr_t pc_delta = new_start - buffer_start_; ++ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size); ++ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); ++ MemMove(new_start, buffer_start_, pc_offset()); ++ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), ++ reloc_size); ++ ++ // Switch buffers. ++ buffer_ = std::move(new_buffer); ++ buffer_start_ = new_start; ++ pc_ += pc_delta; ++ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, ++ reloc_info_writer.last_pc() + pc_delta); ++ ++ // Relocate runtime entries. ++ Vector instructions{buffer_start_, pc_offset()}; ++ Vector reloc_info{reloc_info_writer.pos(), reloc_size}; ++ for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) { ++ RelocInfo::Mode rmode = it.rinfo()->rmode(); ++ if (rmode == RelocInfo::INTERNAL_REFERENCE) { ++ RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta); ++ } ++ } ++ DCHECK(!overflow()); ++} ++ ++void Assembler::db(uint8_t data) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(data); ++} ++ ++void Assembler::dd(uint32_t data) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(data); ++} ++ ++void Assembler::dq(uint64_t data) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ EmitHelper(data); ++} ++ ++void Assembler::dd(Label* label) { ++ if (!is_buffer_growth_blocked()) { ++ CheckBuffer(); ++ } ++ uint64_t data; ++ if (label->is_bound()) { ++ data = reinterpret_cast(buffer_start_ + label->pos()); ++ } else { ++ data = jump_address(label); ++ unbound_labels_count_++; ++ internal_reference_positions_.insert(label->pos()); ++ } ++ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); ++ EmitHelper(data); ++} ++ ++void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { ++ if (!ShouldRecordRelocInfo(rmode)) return; ++ // We do not try to reuse pool constants. ++ RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); ++ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here. ++ reloc_info_writer.Write(&rinfo); ++} ++ ++void Assembler::BlockTrampolinePoolFor(int instructions) { ++ CheckTrampolinePoolQuick(instructions); ++ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); ++} ++ ++void Assembler::CheckTrampolinePool() { ++ // Some small sequences of instructions must not be broken up by the ++ // insertion of a trampoline pool; such sequences are protected by setting ++ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, ++ // which are both checked here. Also, recursive calls to CheckTrampolinePool ++ // are blocked by trampoline_pool_blocked_nesting_. ++ if ((trampoline_pool_blocked_nesting_ > 0) || ++ (pc_offset() < no_trampoline_pool_before_)) { ++ // Emission is currently blocked; make sure we try again as soon as ++ // possible. ++ if (trampoline_pool_blocked_nesting_ > 0) { ++ next_buffer_check_ = pc_offset() + kInstrSize; ++ } else { ++ next_buffer_check_ = no_trampoline_pool_before_; ++ } ++ return; ++ } ++ ++ DCHECK(!trampoline_emitted_); ++ DCHECK_GE(unbound_labels_count_, 0); ++ if (unbound_labels_count_ > 0) { ++ // First we emit jump (2 instructions), then we emit trampoline pool. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Label after_pool; ++ b(&after_pool); ++ nop(); // TODO remove this ++ ++ int pool_start = pc_offset(); ++ for (int i = 0; i < unbound_labels_count_; i++) { ++ { ++ b(&after_pool); ++ nop(); // TODO remove this ++ } ++ } ++ nop(); ++ bind(&after_pool); ++ trampoline_ = Trampoline(pool_start, unbound_labels_count_); ++ ++ trampoline_emitted_ = true; ++ // As we are only going to emit trampoline once, we need to prevent any ++ // further emission. ++ next_buffer_check_ = kMaxInt; ++ } ++ } else { ++ // Number of branches to unbound label at this point is zero, so we can ++ // move next buffer check to maximum. ++ next_buffer_check_ = ++ pc_offset() + kMax16BranchOffset - kTrampolineSlotsSize * 16; ++ } ++ return; ++} ++ ++Address Assembler::target_address_at(Address pc) { ++ Instr instr0 = instr_at(pc); ++ Instr instr1 = instr_at(pc + 1 * kInstrSize); ++ Instr instr2 = instr_at(pc + 2 * kInstrSize); ++ ++ // Interpret 4 instructions for address generated by li: See listing in ++ // Assembler::set_target_address_at() just below. ++ DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2)))); ++ ++ // Assemble the 48 bit value. ++ uint64_t hi20 = ((uint64_t)(instr2 >> 5) & 0xfffff) << 32; ++ uint64_t mid20 = ((uint64_t)(instr0 >> 5) & 0xfffff) << 12; ++ uint64_t low12 = ((uint64_t)(instr1 >> 10) & 0xfff); ++ int64_t addr = static_cast(hi20 | mid20 | low12); ++ ++ // Sign extend to get canonical address. ++ addr = (addr << 16) >> 16; ++ // printf("add : 0x%lx 0x%lx 0x%lx 0x%lx\n", addr, hi20, mid20, low12); ++ return static_cast
(addr); ++} ++ ++// On loong64, a target address is stored in a 3-instruction sequence: ++// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask); ++// 1: ori(rd, rd, j.imm64_ & kImm12Mask); ++// 2: lu32i_d(rd, (j.imm64_ >> 32) & kImm20Mask); ++// ++// Patching the address must replace all the lui & ori instructions, ++// and flush the i-cache. ++// ++// There is an optimization below, which emits a nop when the address ++// fits in just 16 bits. This is unlikely to help, and should be benchmarked, ++// and possibly removed. ++void Assembler::set_target_value_at(Address pc, uint64_t target, ++ ICacheFlushMode icache_flush_mode) { ++ // There is an optimization where only 3 instructions are used to load address ++ // in code on LOONG64 because only 48-bits of address is effectively used. ++ // It relies on fact the upper [63:48] bits are not used for virtual address ++ // translation and they have to be set according to value of bit 47 in order ++ // get canonical address. ++#ifdef DEBUG ++ // Check we have the result from a li macro-instruction. ++ Instr instr0 = instr_at(pc); ++ Instr instr1 = instr_at(pc + kInstrSize); ++ Instr instr2 = instr_at(pc + kInstrSize * 2); ++ DCHECK(IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2)); ++#endif ++ ++ Instr instr = instr_at(pc); ++ uint32_t rd_code = GetRd(instr); ++ uint32_t* p = reinterpret_cast(pc); ++ ++ // Must use 3 instructions to insure patchable code. ++ // lu12i_w rd, middle-20. ++ // ori rd, rd, low-12. ++ // li32i_d rd, high-20. ++ *p = LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code; ++ *(p + 1) = ++ ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code; ++ *(p + 2) = LU32I_D | (((target >> 32) & 0xfffff) << kRjShift) | rd_code; ++ ++ if (icache_flush_mode != SKIP_ICACHE_FLUSH) { ++ FlushInstructionCache(pc, 3 * kInstrSize); ++ } ++} ++ ++UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) ++ : available_(assembler->GetScratchRegisterList()), ++ old_available_(*available_) {} ++ ++UseScratchRegisterScope::~UseScratchRegisterScope() { ++ *available_ = old_available_; ++} ++ ++Register UseScratchRegisterScope::Acquire() { ++ DCHECK_NOT_NULL(available_); ++ DCHECK_NE(*available_, 0); ++ int index = static_cast(base::bits::CountTrailingZeros32(*available_)); ++ *available_ &= ~(1UL << index); ++ ++ return Register::from_code(index); ++} ++ ++bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; } ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h +new file mode 100644 +index 00000000..b3804242 +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/assembler-loong64.h +@@ -0,0 +1,1118 @@ ++// Copyright (c) 1994-2006 Sun Microsystems Inc. ++// All Rights Reserved. ++// ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// - Redistributions of source code must retain the above copyright notice, ++// this list of conditions and the following disclaimer. ++// ++// - Redistribution in binary form must reproduce the above copyright ++// notice, this list of conditions and the following disclaimer in the ++// documentation and/or other materials provided with the distribution. ++// ++// - Neither the name of Sun Microsystems or the names of contributors may ++// be used to endorse or promote products derived from this software without ++// specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++// The original source code covered by the above license above has been ++// modified significantly by Google Inc. ++// Copyright 2012 the V8 project authors. All rights reserved. ++ ++#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ ++#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ ++ ++#include ++#include ++#include ++ ++#include "src/codegen/assembler.h" ++#include "src/codegen/external-reference.h" ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/codegen/loong64/register-loong64.h" ++#include "src/codegen/label.h" ++#include "src/objects/contexts.h" ++#include "src/objects/smi.h" ++ ++namespace v8 { ++namespace internal { ++ ++class SafepointTableBuilder; ++ ++// ----------------------------------------------------------------------------- ++// Machine instruction Operands. ++constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize; ++constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; ++// Class Operand represents a shifter operand in data processing instructions. ++class Operand { ++ public: ++ // Immediate. ++ V8_INLINE explicit Operand(int64_t immediate, ++ RelocInfo::Mode rmode = RelocInfo::NONE) ++ : rm_(no_reg), rmode_(rmode) { ++ value_.immediate = immediate; ++ } ++ V8_INLINE explicit Operand(const ExternalReference& f) ++ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { ++ value_.immediate = static_cast(f.address()); ++ } ++ V8_INLINE explicit Operand(const char* s); ++ explicit Operand(Handle handle); ++ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) { ++ value_.immediate = static_cast(value.ptr()); ++ } ++ ++ static Operand EmbeddedNumber(double number); // Smi or HeapNumber. ++ static Operand EmbeddedStringConstant(const StringConstantBase* str); ++ ++ // Register. ++ V8_INLINE explicit Operand(Register rm) : rm_(rm) {} ++ ++ // Return true if this is a register operand. ++ V8_INLINE bool is_reg() const; ++ ++ inline int64_t immediate() const; ++ ++ bool IsImmediate() const { return !rm_.is_valid(); } ++ ++ HeapObjectRequest heap_object_request() const { ++ DCHECK(IsHeapObjectRequest()); ++ return value_.heap_object_request; ++ } ++ ++ bool IsHeapObjectRequest() const { ++ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); ++ DCHECK_IMPLIES(is_heap_object_request_, ++ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || ++ rmode_ == RelocInfo::CODE_TARGET); ++ return is_heap_object_request_; ++ } ++ ++ Register rm() const { return rm_; } ++ ++ RelocInfo::Mode rmode() const { return rmode_; } ++ ++ private: ++ Register rm_; ++ union Value { ++ Value() {} ++ HeapObjectRequest heap_object_request; // if is_heap_object_request_ ++ int64_t immediate; // otherwise ++ } value_; // valid if rm_ == no_reg ++ bool is_heap_object_request_ = false; ++ RelocInfo::Mode rmode_; ++ ++ friend class Assembler; ++ friend class MacroAssembler; ++}; ++ ++// Class MemOperand represents a memory operand in load and store instructions. ++// 1: base_reg + off_imm( si12 | si14<<2) ++// 2: base_reg + offset_reg ++class V8_EXPORT_PRIVATE MemOperand { ++ public: ++ explicit MemOperand(Register rj, int32_t offset = 0); ++ explicit MemOperand(Register rj, Register offset = no_reg); ++ Register base() const { return base_; } ++ Register index() const { return index_; } ++ int32_t offset() const { return offset_; } ++ ++ bool hasIndexReg() const { return index_ != no_reg; } ++ ++ private: ++ Register base_; // base ++ Register index_; // index ++ int32_t offset_; // offset ++ ++ friend class Assembler; ++}; ++ ++class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ++ public: ++ // Create an assembler. Instructions and relocation information are emitted ++ // into a buffer, with the instructions starting from the beginning and the ++ // relocation information starting from the end of the buffer. See CodeDesc ++ // for a detailed comment on the layout (globals.h). ++ // ++ // If the provided buffer is nullptr, the assembler allocates and grows its ++ // own buffer. Otherwise it takes ownership of the provided buffer. ++ explicit Assembler(const AssemblerOptions&, ++ std::unique_ptr = {}); ++ ++ virtual ~Assembler() {} ++ ++ // GetCode emits any pending (non-emitted) code and fills the descriptor desc. ++ static constexpr int kNoHandlerTable = 0; ++ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; ++ void GetCode(Isolate* isolate, CodeDesc* desc, ++ SafepointTableBuilder* safepoint_table_builder, ++ int handler_table_offset); ++ ++ // Convenience wrapper for code without safepoint or handler tables. ++ void GetCode(Isolate* isolate, CodeDesc* desc) { ++ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); ++ } ++ ++ // Unused on this architecture. ++ void MaybeEmitOutOfLineConstantPool() {} ++ ++ // Label operations & relative jumps (PPUM Appendix D). ++ // ++ // Takes a branch opcode (cc) and a label (L) and generates ++ // either a backward branch or a forward branch and links it ++ // to the label fixup chain. Usage: ++ // ++ // Label L; // unbound label ++ // j(cc, &L); // forward branch to unbound label ++ // bind(&L); // bind label to the current pc ++ // j(cc, &L); // backward branch to bound label ++ // bind(&L); // illegal: a label may be bound only once ++ // ++ // Note: The same Label can be used for forward and backward branches ++ // but it may be bound only once. ++ void bind(Label* L); // Binds an unbound label L to current code position. ++ ++ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; ++ ++ // Determines if Label is bound and near enough so that branch instruction ++ // can be used to reach it, instead of jump instruction. ++ // c means conditinal branch, a means always branch. ++ bool is_near_c(Label* L); ++ bool is_near(Label* L, OffsetSize bits); ++ bool is_near_a(Label* L); ++ ++ int BranchOffset(Instr instr); ++ ++ // Returns the branch offset to the given label from the current code ++ // position. Links the label to the current position if it is still unbound. ++ // Manages the jump elimination optimization if the second parameter is true. ++ int32_t branch_offset_helper(Label* L, OffsetSize bits); ++ inline int32_t branch_offset(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset16); ++ } ++ inline int32_t branch_offset21(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset21); ++ } ++ inline int32_t branch_offset26(Label* L) { ++ return branch_offset_helper(L, OffsetSize::kOffset26); ++ } ++ inline int32_t shifted_branch_offset(Label* L) { ++ return branch_offset(L) >> 2; ++ } ++ inline int32_t shifted_branch_offset21(Label* L) { ++ return branch_offset21(L) >> 2; ++ } ++ inline int32_t shifted_branch_offset26(Label* L) { ++ return branch_offset26(L) >> 2; ++ } ++ uint64_t jump_address(Label* L); ++ uint64_t jump_offset(Label* L); ++ uint64_t branch_long_offset(Label* L); ++ ++ // Puts a labels target address at the given position. ++ // The high 8 bits are set to zero. ++ void label_at_put(Label* L, int at_offset); ++ ++ // Read/Modify the code target address in the branch/call instruction at pc. ++ // The isolate argument is unused (and may be nullptr) when skipping flushing. ++ static Address target_address_at(Address pc); ++ V8_INLINE static void set_target_address_at( ++ Address pc, Address target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { ++ set_target_value_at(pc, target, icache_flush_mode); ++ } ++ // On MIPS there is no Constant Pool so we skip that parameter. ++ V8_INLINE static Address target_address_at(Address pc, ++ Address constant_pool) { ++ return target_address_at(pc); ++ } ++ V8_INLINE static void set_target_address_at( ++ Address pc, Address constant_pool, Address target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { ++ set_target_address_at(pc, target, icache_flush_mode); ++ } ++ ++ static void set_target_value_at( ++ Address pc, uint64_t target, ++ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ++ ++ static void JumpLabelToJumpRegister(Address pc); ++ ++ // This sets the branch destination (which gets loaded at the call address). ++ // This is for calls and branches within generated code. The serializer ++ // has already deserialized the lui/ori instructions etc. ++ inline static void deserialization_set_special_target_at( ++ Address instruction_payload, Code code, Address target); ++ ++ // Get the size of the special target encoded at 'instruction_payload'. ++ inline static int deserialization_special_target_size( ++ Address instruction_payload); ++ ++ // This sets the internal reference at the pc. ++ inline static void deserialization_set_target_internal_reference_at( ++ Address pc, Address target, ++ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); ++ ++ // Here we are patching the address in the LUI/ORI instruction pair. ++ // These values are used in the serialization process and must be zero for ++ // LA platform, as Code, Embedded Object or External-reference pointers ++ // are split across two consecutive instructions and don't exist separately ++ // in the code, so the serializer should not step forwards in memory after ++ // a target is resolved and written. ++ static constexpr int kSpecialTargetSize = 0; ++ ++ // Number of consecutive instructions used to store 32bit/64bit constant. ++ // This constant was used in RelocInfo::target_address_address() function ++ // to tell serializer address of the instruction that follows ++ // LUI/ORI instruction pair. ++ // TODO check this ++ static constexpr int kInstructionsFor64BitConstant = 4; ++ ++ // Difference between address of current opcode and target address offset. ++ static constexpr int kBranchPCOffset = 0; ++ ++ // Difference between address of current opcode and target address offset, ++ // when we are generatinga sequence of instructions for long relative PC ++ // branches ++ static constexpr int kLongBranchPCOffset = 0; // 3 * kInstrSize; ++ ++ // Max offset for instructions with 16-bit offset field ++ static constexpr int kMax16BranchOffset = (1 << (18 - 1)) - 1; ++ ++ // Max offset for instructions with 21-bit offset field ++ static constexpr int kMax21BranchOffset = (1 << (23 - 1)) - 1; ++ ++ // Max offset for compact branch instructions with 26-bit offset field ++ static constexpr int kMax26BranchOffset = (1 << (28 - 1)) - 1; ++ ++ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize; ++ ++ RegList* GetScratchRegisterList() { return &scratch_register_list_; } ++ ++ // --------------------------------------------------------------------------- ++ // Code generation. ++ ++ // Insert the smallest number of nop instructions ++ // possible to align the pc offset to a multiple ++ // of m. m must be a power of 2 (>= 4). ++ void Align(int m); ++ // Insert the smallest number of zero bytes possible to align the pc offset ++ // to a mulitple of m. m must be a power of 2 (>= 2). ++ void DataAlign(int m); ++ // Aligns code to something that's optimal for a jump target for the platform. ++ void CodeTargetAlign(); ++ ++ // Different nop operations are used by the code generator to detect certain ++ // states of the generated code. ++ enum NopMarkerTypes { ++ NON_MARKING_NOP = 0, ++ DEBUG_BREAK_NOP, ++ // IC markers. ++ PROPERTY_ACCESS_INLINED, ++ PROPERTY_ACCESS_INLINED_CONTEXT, ++ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, ++ // Helper values. ++ LAST_CODE_MARKER, ++ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, ++ }; ++ ++ // Type == 0 is the default non-marking nop. For loongisa this is a ++ // andi(zero_reg, zero_reg, 0). We use rt_reg == r1 for non-zero ++ // marking, to avoid conflict with ssnop and ehb instructions. ++ void nop(unsigned int type = 0) { ++ DCHECK_LT(type, 32); ++ Register nop_rt_reg = (type == 0) ? zero_reg : t7; ++ andi(zero_reg, nop_rt_reg, type); ++ } ++ ++ // --------Branch-and-jump-instructions---------- ++ // We don't use likely variant of instructions. ++ void b(int32_t offset); ++ inline void b(Label* L) { b(shifted_branch_offset26(L)); } ++ void bl(int32_t offset); ++ inline void bl(Label* L) { bl(shifted_branch_offset26(L)); } ++ ++ void beq(Register rj, Register rd, int32_t offset); ++ inline void beq(Register rj, Register rd, Label* L) { ++ beq(rj, rd, shifted_branch_offset(L)); ++ } ++ void bne(Register rj, Register rd, int32_t offset); ++ inline void bne(Register rj, Register rd, Label* L) { ++ bne(rj, rd, shifted_branch_offset(L)); ++ } ++ void blt(Register rj, Register rd, int32_t offset); ++ inline void blt(Register rj, Register rd, Label* L) { ++ blt(rj, rd, shifted_branch_offset(L)); ++ } ++ void bge(Register rj, Register rd, int32_t offset); ++ inline void bge(Register rj, Register rd, Label* L) { ++ bge(rj, rd, shifted_branch_offset(L)); ++ } ++ void bltu(Register rj, Register rd, int32_t offset); ++ inline void bltu(Register rj, Register rd, Label* L) { ++ bltu(rj, rd, shifted_branch_offset(L)); ++ } ++ void bgeu(Register rj, Register rd, int32_t offset); ++ inline void bgeu(Register rj, Register rd, Label* L) { ++ bgeu(rj, rd, shifted_branch_offset(L)); ++ } ++ void beqz(Register rj, int32_t offset); ++ inline void beqz(Register rj, Label* L) { ++ beqz(rj, shifted_branch_offset21(L)); ++ } ++ void bnez(Register rj, int32_t offset); ++ inline void bnez(Register rj, Label* L) { ++ bnez(rj, shifted_branch_offset21(L)); ++ } ++ ++ void jirl(Register rd, Register rj, int32_t offset); ++ ++ void bceqz(CFRegister cj, int32_t si21); ++ inline void bceqz(CFRegister cj, Label* L) { ++ bceqz(cj, shifted_branch_offset21(L)); ++ } ++ void bcnez(CFRegister cj, int32_t si21); ++ inline void bcnez(CFRegister cj, Label* L) { ++ bcnez(cj, shifted_branch_offset21(L)); ++ } ++ ++ // -------Data-processing-instructions--------- ++ ++ // Arithmetic. ++ void add_w(Register rd, Register rj, Register rk); ++ void add_d(Register rd, Register rj, Register rk); ++ void sub_w(Register rd, Register rj, Register rk); ++ void sub_d(Register rd, Register rj, Register rk); ++ ++ void addi_w(Register rd, Register rj, int32_t si12); ++ void addi_d(Register rd, Register rj, int32_t si12); ++ ++ void addu16i_d(Register rd, Register rj, int32_t si16); ++ ++ void alsl_w(Register rd, Register rj, Register rk, int32_t sa2); ++ void alsl_wu(Register rd, Register rj, Register rk, int32_t sa2); ++ void alsl_d(Register rd, Register rj, Register rk, int32_t sa2); ++ ++ void lu12i_w(Register rd, int32_t si20); ++ void lu32i_d(Register rd, int32_t si20); ++ void lu52i_d(Register rd, Register rj, int32_t si12); ++ ++ void slt(Register rd, Register rj, Register rk); ++ void sltu(Register rd, Register rj, Register rk); ++ void slti(Register rd, Register rj, int32_t si12); ++ void sltui(Register rd, Register rj, int32_t si12); ++ ++ void pcaddi(Register rd, int32_t si20); ++ void pcaddu12i(Register rd, int32_t si20); ++ void pcaddu18i(Register rd, int32_t si20); ++ void pcalau12i(Register rd, int32_t si20); ++ ++ void and_(Register rd, Register rj, Register rk); ++ void or_(Register rd, Register rj, Register rk); ++ void xor_(Register rd, Register rj, Register rk); ++ void nor(Register rd, Register rj, Register rk); ++ void andn(Register rd, Register rj, Register rk); ++ void orn(Register rd, Register rj, Register rk); ++ ++ void andi(Register rd, Register rj, int32_t ui12); ++ void ori(Register rd, Register rj, int32_t ui12); ++ void xori(Register rd, Register rj, int32_t ui12); ++ ++ void mul_w(Register rd, Register rj, Register rk); ++ void mulh_w(Register rd, Register rj, Register rk); ++ void mulh_wu(Register rd, Register rj, Register rk); ++ void mul_d(Register rd, Register rj, Register rk); ++ void mulh_d(Register rd, Register rj, Register rk); ++ void mulh_du(Register rd, Register rj, Register rk); ++ ++ void mulw_d_w(Register rd, Register rj, Register rk); ++ void mulw_d_wu(Register rd, Register rj, Register rk); ++ ++ void div_w(Register rd, Register rj, Register rk); ++ void mod_w(Register rd, Register rj, Register rk); ++ void div_wu(Register rd, Register rj, Register rk); ++ void mod_wu(Register rd, Register rj, Register rk); ++ void div_d(Register rd, Register rj, Register rk); ++ void mod_d(Register rd, Register rj, Register rk); ++ void div_du(Register rd, Register rj, Register rk); ++ void mod_du(Register rd, Register rj, Register rk); ++ ++ // Shifts. ++ void sll_w(Register rd, Register rj, Register rk); ++ void srl_w(Register rd, Register rj, Register rk); ++ void sra_w(Register rd, Register rj, Register rk); ++ void rotr_w(Register rd, Register rj, Register rk); ++ ++ void slli_w(Register rd, Register rj, int32_t ui5); ++ void srli_w(Register rd, Register rj, int32_t ui5); ++ void srai_w(Register rd, Register rj, int32_t ui5); ++ void rotri_w(Register rd, Register rj, int32_t ui5); ++ ++ void sll_d(Register rd, Register rj, Register rk); ++ void srl_d(Register rd, Register rj, Register rk); ++ void sra_d(Register rd, Register rj, Register rk); ++ void rotr_d(Register rd, Register rj, Register rk); ++ ++ void slli_d(Register rd, Register rj, int32_t ui6); ++ void srli_d(Register rd, Register rj, int32_t ui6); ++ void srai_d(Register rd, Register rj, int32_t ui6); ++ void rotri_d(Register rd, Register rj, int32_t ui6); ++ ++ // Bit twiddling. ++ void ext_w_b(Register rd, Register rj); ++ void ext_w_h(Register rd, Register rj); ++ ++ void clo_w(Register rd, Register rj); ++ void clz_w(Register rd, Register rj); ++ void cto_w(Register rd, Register rj); ++ void ctz_w(Register rd, Register rj); ++ void clo_d(Register rd, Register rj); ++ void clz_d(Register rd, Register rj); ++ void cto_d(Register rd, Register rj); ++ void ctz_d(Register rd, Register rj); ++ ++ void bytepick_w(Register rd, Register rj, Register rk, int32_t sa2); ++ void bytepick_d(Register rd, Register rj, Register rk, int32_t sa3); ++ ++ void revb_2h(Register rd, Register rj); ++ void revb_4h(Register rd, Register rj); ++ void revb_2w(Register rd, Register rj); ++ void revb_d(Register rd, Register rj); ++ ++ void revh_2w(Register rd, Register rj); ++ void revh_d(Register rd, Register rj); ++ ++ void bitrev_4b(Register rd, Register rj); ++ void bitrev_8b(Register rd, Register rj); ++ ++ void bitrev_w(Register rd, Register rj); ++ void bitrev_d(Register rd, Register rj); ++ ++ void bstrins_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); ++ void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); ++ ++ void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); ++ void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); ++ ++ void maskeqz(Register rd, Register rj, Register rk); ++ void masknez(Register rd, Register rj, Register rk); ++ ++ // Memory-instructions ++ void ld_b(Register rd, Register rj, int32_t si12); ++ void ld_h(Register rd, Register rj, int32_t si12); ++ void ld_w(Register rd, Register rj, int32_t si12); ++ void ld_d(Register rd, Register rj, int32_t si12); ++ void ld_bu(Register rd, Register rj, int32_t si12); ++ void ld_hu(Register rd, Register rj, int32_t si12); ++ void ld_wu(Register rd, Register rj, int32_t si12); ++ void st_b(Register rd, Register rj, int32_t si12); ++ void st_h(Register rd, Register rj, int32_t si12); ++ void st_w(Register rd, Register rj, int32_t si12); ++ void st_d(Register rd, Register rj, int32_t si12); ++ ++ void ldx_b(Register rd, Register rj, Register rk); ++ void ldx_h(Register rd, Register rj, Register rk); ++ void ldx_w(Register rd, Register rj, Register rk); ++ void ldx_d(Register rd, Register rj, Register rk); ++ void ldx_bu(Register rd, Register rj, Register rk); ++ void ldx_hu(Register rd, Register rj, Register rk); ++ void ldx_wu(Register rd, Register rj, Register rk); ++ void stx_b(Register rd, Register rj, Register rk); ++ void stx_h(Register rd, Register rj, Register rk); ++ void stx_w(Register rd, Register rj, Register rk); ++ void stx_d(Register rd, Register rj, Register rk); ++ ++ void ldptr_w(Register rd, Register rj, int32_t si14); ++ void ldptr_d(Register rd, Register rj, int32_t si14); ++ void stptr_w(Register rd, Register rj, int32_t si14); ++ void stptr_d(Register rd, Register rj, int32_t si14); ++ ++ void amswap_w(Register rd, Register rk, Register rj); ++ void amswap_d(Register rd, Register rk, Register rj); ++ void amadd_w(Register rd, Register rk, Register rj); ++ void amadd_d(Register rd, Register rk, Register rj); ++ void amand_w(Register rd, Register rk, Register rj); ++ void amand_d(Register rd, Register rk, Register rj); ++ void amor_w(Register rd, Register rk, Register rj); ++ void amor_d(Register rd, Register rk, Register rj); ++ void amxor_w(Register rd, Register rk, Register rj); ++ void amxor_d(Register rd, Register rk, Register rj); ++ void ammax_w(Register rd, Register rk, Register rj); ++ void ammax_d(Register rd, Register rk, Register rj); ++ void ammin_w(Register rd, Register rk, Register rj); ++ void ammin_d(Register rd, Register rk, Register rj); ++ void ammax_wu(Register rd, Register rk, Register rj); ++ void ammax_du(Register rd, Register rk, Register rj); ++ void ammin_wu(Register rd, Register rk, Register rj); ++ void ammin_du(Register rd, Register rk, Register rj); ++ ++ void amswap_db_w(Register rd, Register rk, Register rj); ++ void amswap_db_d(Register rd, Register rk, Register rj); ++ void amadd_db_w(Register rd, Register rk, Register rj); ++ void amadd_db_d(Register rd, Register rk, Register rj); ++ void amand_db_w(Register rd, Register rk, Register rj); ++ void amand_db_d(Register rd, Register rk, Register rj); ++ void amor_db_w(Register rd, Register rk, Register rj); ++ void amor_db_d(Register rd, Register rk, Register rj); ++ void amxor_db_w(Register rd, Register rk, Register rj); ++ void amxor_db_d(Register rd, Register rk, Register rj); ++ void ammax_db_w(Register rd, Register rk, Register rj); ++ void ammax_db_d(Register rd, Register rk, Register rj); ++ void ammin_db_w(Register rd, Register rk, Register rj); ++ void ammin_db_d(Register rd, Register rk, Register rj); ++ void ammax_db_wu(Register rd, Register rk, Register rj); ++ void ammax_db_du(Register rd, Register rk, Register rj); ++ void ammin_db_wu(Register rd, Register rk, Register rj); ++ void ammin_db_du(Register rd, Register rk, Register rj); ++ ++ void ll_w(Register rd, Register rj, int32_t si14); ++ void ll_d(Register rd, Register rj, int32_t si14); ++ void sc_w(Register rd, Register rj, int32_t si14); ++ void sc_d(Register rd, Register rj, int32_t si14); ++ ++ void dbar(int32_t hint); ++ void ibar(int32_t hint); ++ ++ // Break / Trap instructions. ++ void break_(uint32_t code, bool break_as_stop = false); ++ void stop(uint32_t code = kMaxStopCode); ++ ++ // Arithmetic. ++ void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmul_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ void fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); ++ ++ void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmina_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fmina_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fabs_s(FPURegister fd, FPURegister fj); ++ void fabs_d(FPURegister fd, FPURegister fj); ++ void fneg_s(FPURegister fd, FPURegister fj); ++ void fneg_d(FPURegister fd, FPURegister fj); ++ ++ void fsqrt_s(FPURegister fd, FPURegister fj); ++ void fsqrt_d(FPURegister fd, FPURegister fj); ++ void frecip_s(FPURegister fd, FPURegister fj); ++ void frecip_d(FPURegister fd, FPURegister fj); ++ void frsqrt_s(FPURegister fd, FPURegister fj); ++ void frsqrt_d(FPURegister fd, FPURegister fj); ++ ++ void fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ void flogb_s(FPURegister fd, FPURegister fj); ++ void flogb_d(FPURegister fd, FPURegister fj); ++ void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk); ++ void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void fclass_s(FPURegister fd, FPURegister fj); ++ void fclass_d(FPURegister fd, FPURegister fj); ++ ++ void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd); ++ void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, ++ CFRegister cd); ++ ++ void fcvt_s_d(FPURegister fd, FPURegister fj); ++ void fcvt_d_s(FPURegister fd, FPURegister fj); ++ ++ void ffint_s_w(FPURegister fd, FPURegister fj); ++ void ffint_s_l(FPURegister fd, FPURegister fj); ++ void ffint_d_w(FPURegister fd, FPURegister fj); ++ void ffint_d_l(FPURegister fd, FPURegister fj); ++ void ftint_w_s(FPURegister fd, FPURegister fj); ++ void ftint_w_d(FPURegister fd, FPURegister fj); ++ void ftint_l_s(FPURegister fd, FPURegister fj); ++ void ftint_l_d(FPURegister fd, FPURegister fj); ++ ++ void ftintrm_w_s(FPURegister fd, FPURegister fj); ++ void ftintrm_w_d(FPURegister fd, FPURegister fj); ++ void ftintrm_l_s(FPURegister fd, FPURegister fj); ++ void ftintrm_l_d(FPURegister fd, FPURegister fj); ++ void ftintrp_w_s(FPURegister fd, FPURegister fj); ++ void ftintrp_w_d(FPURegister fd, FPURegister fj); ++ void ftintrp_l_s(FPURegister fd, FPURegister fj); ++ void ftintrp_l_d(FPURegister fd, FPURegister fj); ++ void ftintrz_w_s(FPURegister fd, FPURegister fj); ++ void ftintrz_w_d(FPURegister fd, FPURegister fj); ++ void ftintrz_l_s(FPURegister fd, FPURegister fj); ++ void ftintrz_l_d(FPURegister fd, FPURegister fj); ++ void ftintrne_w_s(FPURegister fd, FPURegister fj); ++ void ftintrne_w_d(FPURegister fd, FPURegister fj); ++ void ftintrne_l_s(FPURegister fd, FPURegister fj); ++ void ftintrne_l_d(FPURegister fd, FPURegister fj); ++ ++ void frint_s(FPURegister fd, FPURegister fj); ++ void frint_d(FPURegister fd, FPURegister fj); ++ ++ void fmov_s(FPURegister fd, FPURegister fj); ++ void fmov_d(FPURegister fd, FPURegister fj); ++ ++ void fsel(CFRegister ca, FPURegister fd, FPURegister fj, FPURegister fk); ++ ++ void movgr2fr_w(FPURegister fd, Register rj); ++ void movgr2fr_d(FPURegister fd, Register rj); ++ void movgr2frh_w(FPURegister fd, Register rj); ++ ++ void movfr2gr_s(Register rd, FPURegister fj); ++ void movfr2gr_d(Register rd, FPURegister fj); ++ void movfrh2gr_s(Register rd, FPURegister fj); ++ ++ void movgr2fcsr(Register rj); ++ void movfcsr2gr(Register rd); ++ ++ void movfr2cf(CFRegister cd, FPURegister fj); ++ void movcf2fr(FPURegister fd, CFRegister cj); ++ ++ void movgr2cf(CFRegister cd, Register rj); ++ void movcf2gr(Register rd, CFRegister cj); ++ ++ void fld_s(FPURegister fd, Register rj, int32_t si12); ++ void fld_d(FPURegister fd, Register rj, int32_t si12); ++ void fst_s(FPURegister fd, Register rj, int32_t si12); ++ void fst_d(FPURegister fd, Register rj, int32_t si12); ++ ++ void fldx_s(FPURegister fd, Register rj, Register rk); ++ void fldx_d(FPURegister fd, Register rj, Register rk); ++ void fstx_s(FPURegister fd, Register rj, Register rk); ++ void fstx_d(FPURegister fd, Register rj, Register rk); ++ ++ // Check the code size generated from label to here. ++ int SizeOfCodeGeneratedSince(Label* label) { ++ return pc_offset() - label->pos(); ++ } ++ ++ // Check the number of instructions generated from label to here. ++ int InstructionsGeneratedSince(Label* label) { ++ return SizeOfCodeGeneratedSince(label) / kInstrSize; ++ } ++ ++ // Class for scoping postponing the trampoline pool generation. ++ class BlockTrampolinePoolScope { ++ public: ++ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { ++ assem_->StartBlockTrampolinePool(); ++ } ++ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } ++ ++ private: ++ Assembler* assem_; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); ++ }; ++ ++ // Class for postponing the assembly buffer growth. Typically used for ++ // sequences of instructions that must be emitted as a unit, before ++ // buffer growth (and relocation) can occur. ++ // This blocking scope is not nestable. ++ class BlockGrowBufferScope { ++ public: ++ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { ++ assem_->StartBlockGrowBuffer(); ++ } ++ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); } ++ ++ private: ++ Assembler* assem_; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); ++ }; ++ ++ // Record a deoptimization reason that can be used by a log or cpu profiler. ++ // Use --trace-deopt to enable. ++ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, ++ int id); ++ ++ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, ++ intptr_t pc_delta); ++ ++ // Writes a single byte or word of data in the code stream. Used for ++ // inline tables, e.g., jump-tables. ++ void db(uint8_t data); ++ void dd(uint32_t data); ++ void dq(uint64_t data); ++ void dp(uintptr_t data) { dq(data); } ++ void dd(Label* label); ++ ++ // Postpone the generation of the trampoline pool for the specified number of ++ // instructions. ++ void BlockTrampolinePoolFor(int instructions); ++ ++ // Check if there is less than kGap bytes available in the buffer. ++ // If this is the case, we need to grow the buffer before emitting ++ // an instruction or relocation information. ++ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } ++ ++ // Get the number of bytes available in the buffer. ++ inline intptr_t available_space() const { ++ return reloc_info_writer.pos() - pc_; ++ } ++ ++ // Read/patch instructions. ++ static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } ++ static void instr_at_put(Address pc, Instr instr) { ++ *reinterpret_cast(pc) = instr; ++ } ++ Instr instr_at(int pos) { ++ return *reinterpret_cast(buffer_start_ + pos); ++ } ++ void instr_at_put(int pos, Instr instr) { ++ *reinterpret_cast(buffer_start_ + pos) = instr; ++ } ++ ++ // Check if an instruction is a branch of some kind. ++ static bool IsBranch(Instr instr); ++ static bool IsB(Instr instr); ++ static bool IsBz(Instr instr); ++ static bool IsNal(Instr instr); ++ ++ static bool IsBeq(Instr instr); ++ static bool IsBne(Instr instr); ++ ++ static bool IsJump(Instr instr); ++ static bool IsMov(Instr instr, Register rd, Register rs); ++ static bool IsPcAddi(Instr instr, Register rd, int32_t si20); ++ ++ static bool IsJ(Instr instr); ++ static bool IsLu12i_w(Instr instr); ++ static bool IsOri(Instr instr); ++ static bool IsLu32i_d(Instr instr); ++ static bool IsLu52i_d(Instr instr); ++ ++ static bool IsNop(Instr instr, unsigned int type); ++ static bool IsPop(Instr instr); ++ static bool IsPush(Instr instr); ++ // static bool IsLwRegFpOffset(Instr instr); ++ // static bool IsSwRegFpOffset(Instr instr); ++ // static bool IsLwRegFpNegOffset(Instr instr); ++ // static bool IsSwRegFpNegOffset(Instr instr); ++ ++ static Register GetRjReg(Instr instr); ++ static Register GetRkReg(Instr instr); ++ static Register GetRdReg(Instr instr); ++ ++ static uint32_t GetRj(Instr instr); ++ static uint32_t GetRjField(Instr instr); ++ static uint32_t GetRk(Instr instr); ++ static uint32_t GetRkField(Instr instr); ++ static uint32_t GetRd(Instr instr); ++ static uint32_t GetRdField(Instr instr); ++ static uint32_t GetSa2(Instr instr); ++ static uint32_t GetSa3(Instr instr); ++ static uint32_t GetSa2Field(Instr instr); ++ static uint32_t GetSa3Field(Instr instr); ++ static uint32_t GetOpcodeField(Instr instr); ++ static uint32_t GetFunction(Instr instr); ++ static uint32_t GetFunctionField(Instr instr); ++ static uint32_t GetImmediate16(Instr instr); ++ static uint32_t GetLabelConst(Instr instr); ++ ++ static bool IsAddImmediate(Instr instr); ++ static Instr SetAddImmediateOffset(Instr instr, int16_t offset); ++ ++ static bool IsAndImmediate(Instr instr); ++ static bool IsEmittedConstant(Instr instr); ++ ++ void CheckTrampolinePool(); ++ ++ inline int UnboundLabelsCount() { return unbound_labels_count_; } ++ ++ protected: ++ // Helper function for memory load/store. ++ void AdjustBaseAndOffset(MemOperand* src); ++ ++ inline static void set_target_internal_reference_encoded_at(Address pc, ++ Address target); ++ ++ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } ++ ++ // Decode branch instruction at pos and return branch target pos. ++ int target_at(int pos, bool is_internal); ++ ++ // Patch branch instruction at pos to branch to given branch target pos. ++ void target_at_put(int pos, int target_pos, bool is_internal); ++ ++ // Say if we need to relocate with this mode. ++ bool MustUseReg(RelocInfo::Mode rmode); ++ ++ // Record reloc info for current pc_. ++ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); ++ ++ // Block the emission of the trampoline pool before pc_offset. ++ void BlockTrampolinePoolBefore(int pc_offset) { ++ if (no_trampoline_pool_before_ < pc_offset) ++ no_trampoline_pool_before_ = pc_offset; ++ } ++ ++ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } ++ ++ void EndBlockTrampolinePool() { ++ trampoline_pool_blocked_nesting_--; ++ if (trampoline_pool_blocked_nesting_ == 0) { ++ CheckTrampolinePoolQuick(1); ++ } ++ } ++ ++ bool is_trampoline_pool_blocked() const { ++ return trampoline_pool_blocked_nesting_ > 0; ++ } ++ ++ bool has_exception() const { return internal_trampoline_exception_; } ++ ++ bool is_trampoline_emitted() const { return trampoline_emitted_; } ++ ++ // Temporarily block automatic assembly buffer growth. ++ void StartBlockGrowBuffer() { ++ DCHECK(!block_buffer_growth_); ++ block_buffer_growth_ = true; ++ } ++ ++ void EndBlockGrowBuffer() { ++ DCHECK(block_buffer_growth_); ++ block_buffer_growth_ = false; ++ } ++ ++ bool is_buffer_growth_blocked() const { return block_buffer_growth_; } ++ ++ void CheckTrampolinePoolQuick(int extra_instructions = 0) { ++ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { ++ CheckTrampolinePool(); ++ } ++ } ++ ++ private: ++ // Avoid overflows for displacements etc. ++ static const int kMaximalBufferSize = 512 * MB; ++ ++ // Buffer size and constant pool distance are checked together at regular ++ // intervals of kBufferCheckInterval emitted bytes. ++ static constexpr int kBufferCheckInterval = 1 * KB / 2; ++ ++ // Code generation. ++ // The relocation writer's position is at least kGap bytes below the end of ++ // the generated instructions. This is so that multi-instruction sequences do ++ // not have to check for overflow. The same is true for writes of large ++ // relocation info entries. ++ static constexpr int kGap = 64; ++ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); ++ ++ // Repeated checking whether the trampoline pool should be emitted is rather ++ // expensive. By default we only check again once a number of instructions ++ // has been generated. ++ static constexpr int kCheckConstIntervalInst = 32; ++ static constexpr int kCheckConstInterval = ++ kCheckConstIntervalInst * kInstrSize; ++ ++ int next_buffer_check_; // pc offset of next buffer check. ++ ++ // Emission of the trampoline pool may be blocked in some code sequences. ++ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. ++ int no_trampoline_pool_before_; // Block emission before this pc offset. ++ ++ // Keep track of the last emitted pool to guarantee a maximal distance. ++ int last_trampoline_pool_end_; // pc offset of the end of the last pool. ++ ++ // Automatic growth of the assembly buffer may be blocked for some sequences. ++ bool block_buffer_growth_; // Block growth when true. ++ ++ // Relocation information generation. ++ // Each relocation is encoded as a variable size value. ++ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; ++ RelocInfoWriter reloc_info_writer; ++ ++ // The bound position, before this we cannot do instruction elimination. ++ int last_bound_pos_; ++ ++ // Code emission. ++ inline void CheckBuffer(); ++ void GrowBuffer(); ++ inline void emit(Instr x); ++ inline void emit(uint64_t x); ++ // inline void CheckForEmitInForbiddenSlot(); ++ template ++ inline void EmitHelper(T x); ++ inline void EmitHelper(Instr x); ++ ++ void GenB(Opcode opcode, Register rj, int32_t si21); // opcode:6 ++ void GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq); ++ void GenB(Opcode opcode, int32_t si26); ++ void GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16); ++ void GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, FPURegister fj, ++ CFRegister cd); ++ void GenSel(Opcode opcode, CFRegister ca, FPURegister fk, FPURegister fj, ++ FPURegister rd); ++ ++ void GenRegister(Opcode opcode, Register rj, Register rd, bool rjrd = true); ++ void GenRegister(Opcode opcode, FPURegister fj, FPURegister fd); ++ void GenRegister(Opcode opcode, Register rj, FPURegister fd); ++ void GenRegister(Opcode opcode, FPURegister fj, Register rd); ++ void GenRegister(Opcode opcode, Register rj, FPUControlRegister fd); ++ void GenRegister(Opcode opcode, FPUControlRegister fj, Register rd); ++ void GenRegister(Opcode opcode, FPURegister fj, CFRegister cd); ++ void GenRegister(Opcode opcode, CFRegister cj, FPURegister fd); ++ void GenRegister(Opcode opcode, Register rj, CFRegister cd); ++ void GenRegister(Opcode opcode, CFRegister cj, Register rd); ++ ++ void GenRegister(Opcode opcode, Register rk, Register rj, Register rd); ++ void GenRegister(Opcode opcode, FPURegister fk, FPURegister fj, ++ FPURegister fd); ++ ++ void GenRegister(Opcode opcode, FPURegister fa, FPURegister fk, ++ FPURegister fj, FPURegister fd); ++ void GenRegister(Opcode opcode, Register rk, Register rj, FPURegister fd); ++ ++ void GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj, ++ Register rd); ++ void GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj, ++ Register rd); ++ void GenImm(Opcode opcode, int32_t bit20, Register rd); ++ void GenImm(Opcode opcode, int32_t bit15); ++ void GenImm(Opcode opcode, int32_t value, Register rj, Register rd, ++ int32_t value_bits); // 6 | 12 | 14 | 16 ++ void GenImm(Opcode opcode, int32_t bit12, Register rj, FPURegister fd); ++ ++ // Labels. ++ void print(const Label* L); ++ void bind_to(Label* L, int pos); ++ void next(Label* L, bool is_internal); ++ ++ // One trampoline consists of: ++ // - space for trampoline slots, ++ // - space for labels. ++ // ++ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. ++ // Space for trampoline slots precedes space for labels. Each label is of one ++ // instruction size, so total amount for labels is equal to ++ // label_count * kInstrSize. ++ class Trampoline { ++ public: ++ Trampoline() { ++ start_ = 0; ++ next_slot_ = 0; ++ free_slot_count_ = 0; ++ end_ = 0; ++ } ++ Trampoline(int start, int slot_count) { ++ start_ = start; ++ next_slot_ = start; ++ free_slot_count_ = slot_count; ++ end_ = start + slot_count * kTrampolineSlotsSize; ++ } ++ int start() { return start_; } ++ int end() { return end_; } ++ int take_slot() { ++ int trampoline_slot = kInvalidSlotPos; ++ if (free_slot_count_ <= 0) { ++ // We have run out of space on trampolines. ++ // Make sure we fail in debug mode, so we become aware of each case ++ // when this happens. ++ DCHECK(0); ++ // Internal exception will be caught. ++ } else { ++ trampoline_slot = next_slot_; ++ free_slot_count_--; ++ next_slot_ += kTrampolineSlotsSize; ++ } ++ return trampoline_slot; ++ } ++ ++ private: ++ int start_; ++ int end_; ++ int next_slot_; ++ int free_slot_count_; ++ }; ++ ++ int32_t get_trampoline_entry(int32_t pos); ++ int unbound_labels_count_; ++ // After trampoline is emitted, long branches are used in generated code for ++ // the forward branches whose target offsets could be beyond reach of branch ++ // instruction. We use this information to trigger different mode of ++ // branch instruction generation, where we use jump instructions rather ++ // than regular branch instructions. ++ bool trampoline_emitted_; ++ static constexpr int kInvalidSlotPos = -1; ++ ++ // Internal reference positions, required for unbounded internal reference ++ // labels. ++ std::set internal_reference_positions_; ++ bool is_internal_reference(Label* L) { ++ return internal_reference_positions_.find(L->pos()) != ++ internal_reference_positions_.end(); ++ } ++ ++ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } ++ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } ++ bool prev_instr_compact_branch_ = false; ++ ++ Trampoline trampoline_; ++ bool internal_trampoline_exception_; ++ ++ RegList scratch_register_list_; ++ ++ private: ++ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); ++ ++ int WriteCodeComments(); ++ ++ friend class RegExpMacroAssemblerMIPS; ++ friend class RelocInfo; ++ friend class BlockTrampolinePoolScope; ++ friend class EnsureSpace; ++}; ++ ++class EnsureSpace { ++ public: ++ explicit inline EnsureSpace(Assembler* assembler); ++}; ++ ++class V8_EXPORT_PRIVATE UseScratchRegisterScope { ++ public: ++ explicit UseScratchRegisterScope(Assembler* assembler); ++ ~UseScratchRegisterScope(); ++ ++ Register Acquire(); ++ bool hasAvailable() const; ++ ++ private: ++ RegList* available_; ++ RegList old_available_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ +diff --git a/deps/v8/src/codegen/loong64/constants-loong64.cc b/deps/v8/src/codegen/loong64/constants-loong64.cc +new file mode 100644 +index 00000000..3ae0f473 +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/constants-loong64.cc +@@ -0,0 +1,100 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/codegen/loong64/constants-loong64.h" ++ ++namespace v8 { ++namespace internal { ++ ++// ----------------------------------------------------------------------------- ++// Registers. ++ ++// These register names are defined in a way to match the native disassembler ++// formatting. See for example the command "objdump -d ". ++const char* Registers::names_[kNumSimuRegisters] = { ++ "zero_reg", "ra", "gp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6", ++ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "tp", ++ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"}; ++ ++// List of alias names which can be used when referring to MIPS registers. ++const Registers::RegisterAlias Registers::aliases_[] = { ++ {0, "zero"}, {23, "cp"}, {kInvalidRegister, nullptr}}; ++ ++const char* Registers::Name(int reg) { ++ const char* result; ++ if ((0 <= reg) && (reg < kNumSimuRegisters)) { ++ result = names_[reg]; ++ } else { ++ result = "noreg"; ++ } ++ return result; ++} ++ ++int Registers::Number(const char* name) { ++ // Look through the canonical names. ++ for (int i = 0; i < kNumSimuRegisters; i++) { ++ if (strcmp(names_[i], name) == 0) { ++ return i; ++ } ++ } ++ ++ // Look through the alias names. ++ int i = 0; ++ while (aliases_[i].reg != kInvalidRegister) { ++ if (strcmp(aliases_[i].name, name) == 0) { ++ return aliases_[i].reg; ++ } ++ i++; ++ } ++ ++ // No register with the reguested name found. ++ return kInvalidRegister; ++} ++ ++const char* FPURegisters::names_[kNumFPURegisters] = { ++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", ++ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", ++ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; ++ ++// List of alias names which can be used when referring to MIPS registers. ++const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { ++ {kInvalidRegister, nullptr}}; ++ ++const char* FPURegisters::Name(int creg) { ++ const char* result; ++ if ((0 <= creg) && (creg < kNumFPURegisters)) { ++ result = names_[creg]; ++ } else { ++ result = "nocreg"; ++ } ++ return result; ++} ++ ++int FPURegisters::Number(const char* name) { ++ // Look through the canonical names. ++ for (int i = 0; i < kNumFPURegisters; i++) { ++ if (strcmp(names_[i], name) == 0) { ++ return i; ++ } ++ } ++ ++ // Look through the alias names. ++ int i = 0; ++ while (aliases_[i].creg != kInvalidRegister) { ++ if (strcmp(aliases_[i].name, name) == 0) { ++ return aliases_[i].creg; ++ } ++ i++; ++ } ++ ++ // No Cregister with the reguested name found. ++ return kInvalidFPURegister; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/codegen/loong64/constants-loong64.h b/deps/v8/src/codegen/loong64/constants-loong64.h +new file mode 100644 +index 00000000..e94ec5dd +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/constants-loong64.h +@@ -0,0 +1,1340 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_ ++#define V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_ ++ ++#include "src/base/logging.h" ++#include "src/base/macros.h" ++#include "src/common/globals.h" ++ ++// UNIMPLEMENTED_ macro for LOONGISA. ++#ifdef DEBUG ++#define UNIMPLEMENTED_LOONGISA() \ ++ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ ++ __FILE__, __LINE__, __func__) ++#else ++#define UNIMPLEMENTED_LOONGISA() ++#endif ++ ++#define UNSUPPORTED_LOONGISA() \ ++ v8::internal::PrintF("Unsupported instruction.\n") ++ ++const uint32_t kLeastSignificantByteInInt32Offset = 0; ++const uint32_t kLessSignificantWordInDoublewordOffset = 0; ++ ++#ifndef __STDC_FORMAT_MACROS ++#define __STDC_FORMAT_MACROS ++#endif ++#include ++ ++// Defines constants and accessor classes to assemble, disassemble and ++// simulate LOONG64 instructions. ++ ++namespace v8 { ++namespace internal { ++ ++constexpr size_t kMaxPCRelativeCodeRangeInMB = 128; ++ ++// ----------------------------------------------------------------------------- ++// Registers and FPURegisters. ++ ++// Number of general purpose registers. ++const int kNumRegisters = 32; ++const int kInvalidRegister = -1; ++ ++// Number of registers with pc. ++const int kNumSimuRegisters = 33; ++ ++// In the simulator, the PC register is simulated as the 34th register. ++const int kPCRegister = 32; ++ ++// Number coprocessor registers. ++const int kNumFPURegisters = 32; ++const int kInvalidFPURegister = -1; ++ ++// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. ++// TODO fcsr0 fcsr1 fcsr2 fcsr3 ++const int kFCSRRegister = 0; ++const int kInvalidFPUControlRegister = -1; ++const uint32_t kFPUInvalidResult = static_cast(1u << 31) - 1; ++const int32_t kFPUInvalidResultNegative = static_cast(1u << 31); ++const uint64_t kFPU64InvalidResult = ++ static_cast(static_cast(1) << 63) - 1; ++const int64_t kFPU64InvalidResultNegative = ++ static_cast(static_cast(1) << 63); ++ ++// FCSR constants. ++// TODO ++const uint32_t kFCSRInexactFlagBit = 16; ++const uint32_t kFCSRUnderflowFlagBit = 17; ++const uint32_t kFCSROverflowFlagBit = 18; ++const uint32_t kFCSRDivideByZeroFlagBit = 19; ++const uint32_t kFCSRInvalidOpFlagBit = 20; ++ ++const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; ++const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; ++const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; ++const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; ++const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; ++ ++const uint32_t kFCSRFlagMask = ++ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask | ++ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask; ++ ++const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; ++ ++// Actual value of root register is offset from the root array's start ++// to take advantage of negative displacement values. ++// TODO(sigurds): Choose best value. ++constexpr int kRootRegisterBias = 256; ++ ++// Helper functions for converting between register numbers and names. ++class Registers { ++ public: ++ // Return the name of the register. ++ static const char* Name(int reg); ++ ++ // Lookup the register number for the name provided. ++ static int Number(const char* name); ++ ++ struct RegisterAlias { ++ int reg; ++ const char* name; ++ }; ++ ++ static const int64_t kMaxValue = 0x7fffffffffffffffl; ++ static const int64_t kMinValue = 0x8000000000000000l; ++ ++ private: ++ static const char* names_[kNumSimuRegisters]; ++ static const RegisterAlias aliases_[]; ++}; ++ ++// Helper functions for converting between register numbers and names. ++class FPURegisters { ++ public: ++ // Return the name of the register. ++ static const char* Name(int reg); ++ ++ // Lookup the register number for the name provided. ++ static int Number(const char* name); ++ ++ struct RegisterAlias { ++ int creg; ++ const char* name; ++ }; ++ ++ private: ++ static const char* names_[kNumFPURegisters]; ++ static const RegisterAlias aliases_[]; ++}; ++ ++// ----------------------------------------------------------------------------- ++// Instructions encoding constants. ++ ++// On LoongISA all instructions are 32 bits. ++using Instr = int32_t; ++ ++// Special Software Interrupt codes when used in the presence of the LOONG64 ++// simulator. ++enum SoftwareInterruptCodes { ++ // Transition to C code. ++ call_rt_redirected = 0x7fff ++}; ++ ++// On LOONG64 Simulator breakpoints can have different codes: ++// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, ++// the simulator will run through them and print the registers. ++// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() ++// instructions (see Assembler::stop()). ++// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the ++// debugger. ++const uint32_t kMaxWatchpointCode = 31; ++const uint32_t kMaxStopCode = 127; ++STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode); ++ ++// ----- Fields offset and length. ++const int kRjShift = 5; ++const int kRjBits = 5; ++const int kRkShift = 10; ++const int kRkBits = 5; ++const int kRdShift = 0; ++const int kRdBits = 5; ++const int kSaShift = 15; ++const int kSa2Bits = 2; ++const int kSa3Bits = 3; ++const int kCdShift = 0; ++const int kCdBits = 3; ++const int kCjShift = 5; ++const int kCjBits = 3; ++const int kCodeShift = 0; ++const int kCodeBits = 15; ++const int kCondShift = 15; ++const int kCondBits = 5; ++const int kUi5Shift = 10; ++const int kUi5Bits = 5; ++const int kUi6Shift = 10; ++const int kUi6Bits = 6; ++const int kUi12Shift = 10; ++const int kUi12Bits = 12; ++const int kSi12Shift = 10; ++const int kSi12Bits = 12; ++const int kSi14Shift = 10; ++const int kSi14Bits = 14; ++const int kSi16Shift = 10; ++const int kSi16Bits = 16; ++const int kSi20Shift = 5; ++const int kSi20Bits = 20; ++const int kMsbwShift = 16; ++const int kMsbwBits = 5; ++const int kLsbwShift = 10; ++const int kLsbwBits = 5; ++const int kMsbdShift = 16; ++const int kMsbdBits = 6; ++const int kLsbdShift = 10; ++const int kLsbdBits = 6; ++const int kFdShift = 0; ++const int kFdBits = 5; ++const int kFjShift = 5; ++const int kFjBits = 5; ++const int kFkShift = 10; ++const int kFkBits = 5; ++const int kFaShift = 15; ++const int kFaBits = 5; ++const int kCaShift = 15; ++const int kCaBits = 3; ++const int kHint15Shift = 0; ++const int kHint15Bits = 15; ++const int kHint5Shift = 0; ++const int kHint5Bits = 5; ++const int kOffsLowShift = 10; ++const int kOffsLowBits = 16; ++const int kOffs26HighShift = 0; ++const int kOffs26HighBits = 10; ++const int kOffs21HighShift = 0; ++const int kOffs21HighBits = 5; ++const int kImm12Shift = 0; ++const int kImm12Bits = 12; ++const int kImm16Shift = 0; ++const int kImm16Bits = 16; ++const int kImm26Shift = 0; ++const int kImm26Bits = 26; ++const int kImm28Shift = 0; ++const int kImm28Bits = 28; ++const int kImm32Shift = 0; ++const int kImm32Bits = 32; ++ ++// ----- Miscellaneous useful masks. ++// Instruction bit masks. ++const int kRjFieldMask = ((1 << kRjBits) - 1) << kRjShift; ++const int kRkFieldMask = ((1 << kRkBits) - 1) << kRkShift; ++const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; ++const int kSa2FieldMask = ((1 << kSa2Bits) - 1) << kSaShift; ++const int kSa3FieldMask = ((1 << kSa3Bits) - 1) << kSaShift; ++// Misc masks. ++const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values ++const int kLoMaskOf32 = 0xffff; ++const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values ++const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48; ++const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32; ++const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16; ++ ++const int kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift; ++const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; ++const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift; ++const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift; ++ ++// ----- LOONG64 Opcodes and Function Fields. ++enum Opcode : uint32_t { ++ BEQZ = 0x10U << 26, ++ BNEZ = 0x11U << 26, ++ BCZ = 0x12U << 26, // BCEQZ & BCNEZ ++ JIRL = 0x13U << 26, ++ B = 0x14U << 26, ++ BL = 0x15U << 26, ++ BEQ = 0x16U << 26, ++ BNE = 0x17U << 26, ++ BLT = 0x18U << 26, ++ BGE = 0x19U << 26, ++ BLTU = 0x1aU << 26, ++ BGEU = 0x1bU << 26, ++ ++ ADDU16I_D = 0x4U << 26, ++ ++ LU12I_W = 0xaU << 25, ++ LU32I_D = 0xbU << 25, ++ PCADDI = 0xcU << 25, ++ PCALAU12I = 0xdU << 25, ++ PCADDU12I = 0xeU << 25, ++ PCADDU18I = 0xfU << 25, ++ ++ LL_W = 0x20U << 24, ++ SC_W = 0x21U << 24, ++ LL_D = 0x22U << 24, ++ SC_D = 0x23U << 24, ++ LDPTR_W = 0x24U << 24, ++ STPTR_W = 0x25U << 24, ++ LDPTR_D = 0x26U << 24, ++ STPTR_D = 0x27U << 24, ++ ++ BSTR_W = 0x1U << 22, // BSTRINS_W & BSTRPICK_W ++ BSTRINS_W = BSTR_W, ++ BSTRPICK_W = BSTR_W, ++ BSTRINS_D = 0x2U << 22, ++ BSTRPICK_D = 0x3U << 22, ++ ++ SLTI = 0x8U << 22, ++ SLTUI = 0x9U << 22, ++ ADDI_W = 0xaU << 22, ++ ADDI_D = 0xbU << 22, ++ LU52I_D = 0xcU << 22, ++ ANDI = 0xdU << 22, ++ ORI = 0xeU << 22, ++ XORI = 0xfU << 22, ++ ++ LD_B = 0xa0U << 22, ++ LD_H = 0xa1U << 22, ++ LD_W = 0xa2U << 22, ++ LD_D = 0xa3U << 22, ++ ST_B = 0xa4U << 22, ++ ST_H = 0xa5U << 22, ++ ST_W = 0xa6U << 22, ++ ST_D = 0xa7U << 22, ++ LD_BU = 0xa8U << 22, ++ LD_HU = 0xa9U << 22, ++ LD_WU = 0xaaU << 22, ++ FLD_S = 0xacU << 22, ++ FST_S = 0xadU << 22, ++ FLD_D = 0xaeU << 22, ++ FST_D = 0xafU << 22, ++ ++ FMADD_S = 0x81U << 20, ++ FMADD_D = 0x82U << 20, ++ FMSUB_S = 0x85U << 20, ++ FMSUB_D = 0x86U << 20, ++ FNMADD_S = 0x89U << 20, ++ FNMADD_D = 0x8aU << 20, ++ FNMSUB_S = 0x8dU << 20, ++ FNMSUB_D = 0x8eU << 20, ++ FCMP_COND_S = 0xc1U << 20, ++ FCMP_COND_D = 0xc2U << 20, ++ ++ BYTEPICK_D = 0x3U << 18, ++ BYTEPICK_W = 0x2U << 18, ++ ++ FSEL = 0x340U << 18, ++ ++ ALSL = 0x1U << 18, ++ ALSL_W = ALSL, ++ ALSL_WU = ALSL, ++ ++ ALSL_D = 0xbU << 18, ++ ++ SLLI_W = 0x40U << 16, ++ SRLI_W = 0x44U << 16, ++ SRAI_W = 0x48U << 16, ++ ROTRI_W = 0x4cU << 16, ++ ++ SLLI_D = 0x41U << 16, ++ SRLI_D = 0x45U << 16, ++ SRAI_D = 0x49U << 16, ++ ROTRI_D = 0x4dU << 16, ++ ++ SLLI = 0x10U << 18, ++ SRLI = 0x11U << 18, ++ SRAI = 0x12U << 18, ++ ROTRI = 0x13U << 18, ++ ++ ADD_W = 0x20U << 15, ++ ADD_D = 0x21U << 15, ++ SUB_W = 0x22U << 15, ++ SUB_D = 0x23U << 15, ++ SLT = 0x24U << 15, ++ SLTU = 0x25U << 15, ++ MASKNEZ = 0x26U << 15, ++ MASKEQZ = 0x27U << 15, ++ NOR = 0x28U << 15, ++ AND = 0x29U << 15, ++ OR = 0x2aU << 15, ++ XOR = 0x2bU << 15, ++ ORN = 0x2cU << 15, ++ ANDN = 0x2dU << 15, ++ SLL_W = 0x2eU << 15, ++ SRL_W = 0x2fU << 15, ++ SRA_W = 0x30U << 15, ++ SLL_D = 0x31U << 15, ++ SRL_D = 0x32U << 15, ++ SRA_D = 0x33U << 15, ++ ROTR_W = 0x36U << 15, ++ ROTR_D = 0x37U << 15, ++ MUL_W = 0x38U << 15, ++ MULH_W = 0x39U << 15, ++ MULH_WU = 0x3aU << 15, ++ MUL_D = 0x3bU << 15, ++ MULH_D = 0x3cU << 15, ++ MULH_DU = 0x3dU << 15, ++ MULW_D_W = 0x3eU << 15, ++ MULW_D_WU = 0x3fU << 15, ++ ++ DIV_W = 0x40U << 15, ++ MOD_W = 0x41U << 15, ++ DIV_WU = 0x42U << 15, ++ MOD_WU = 0x43U << 15, ++ DIV_D = 0x44U << 15, ++ MOD_D = 0x45U << 15, ++ DIV_DU = 0x46U << 15, ++ MOD_DU = 0x47U << 15, ++ ++ BREAK = 0x54U << 15, ++ ++ FADD_S = 0x201U << 15, ++ FADD_D = 0x202U << 15, ++ FSUB_S = 0x205U << 15, ++ FSUB_D = 0x206U << 15, ++ FMUL_S = 0x209U << 15, ++ FMUL_D = 0x20aU << 15, ++ FDIV_S = 0x20dU << 15, ++ FDIV_D = 0x20eU << 15, ++ FMAX_S = 0x211U << 15, ++ FMAX_D = 0x212U << 15, ++ FMIN_S = 0x215U << 15, ++ FMIN_D = 0x216U << 15, ++ FMAXA_S = 0x219U << 15, ++ FMAXA_D = 0x21aU << 15, ++ FMINA_S = 0x21dU << 15, ++ FMINA_D = 0x21eU << 15, ++ FSCALEB_S = 0x221U << 15, ++ FSCALEB_D = 0x222U << 15, ++ FCOPYSIGN_S = 0x225U << 15, ++ FCOPYSIGN_D = 0x226U << 15, ++ ++ LDX_B = 0x7000U << 15, ++ LDX_H = 0x7008U << 15, ++ LDX_W = 0x7010U << 15, ++ LDX_D = 0x7018U << 15, ++ STX_B = 0x7020U << 15, ++ STX_H = 0x7028U << 15, ++ STX_W = 0x7030U << 15, ++ STX_D = 0x7038U << 15, ++ LDX_BU = 0x7040U << 15, ++ LDX_HU = 0x7048U << 15, ++ LDX_WU = 0x7050U << 15, ++ FLDX_S = 0x7060U << 15, ++ FLDX_D = 0x7068U << 15, ++ FSTX_S = 0x7070U << 15, ++ FSTX_D = 0x7078U << 15, ++ ++ AMSWAP_W = 0x70c0U << 15, ++ AMSWAP_D = 0x70c1U << 15, ++ AMADD_W = 0x70c2U << 15, ++ AMADD_D = 0x70c3U << 15, ++ AMAND_W = 0x70c4U << 15, ++ AMAND_D = 0x70c5U << 15, ++ AMOR_W = 0x70c6U << 15, ++ AMOR_D = 0x70c7U << 15, ++ AMXOR_W = 0x70c8U << 15, ++ AMXOR_D = 0x70c9U << 15, ++ AMMAX_W = 0x70caU << 15, ++ AMMAX_D = 0x70cbU << 15, ++ AMMIN_W = 0x70ccU << 15, ++ AMMIN_D = 0x70cdU << 15, ++ AMMAX_WU = 0x70ceU << 15, ++ AMMAX_DU = 0x70cfU << 15, ++ AMMIN_WU = 0x70d0U << 15, ++ AMMIN_DU = 0x70d1U << 15, ++ AMSWAP_DB_W = 0x70d2U << 15, ++ AMSWAP_DB_D = 0x70d3U << 15, ++ AMADD_DB_W = 0x70d4U << 15, ++ AMADD_DB_D = 0x70d5U << 15, ++ AMAND_DB_W = 0x70d6U << 15, ++ AMAND_DB_D = 0x70d7U << 15, ++ AMOR_DB_W = 0x70d8U << 15, ++ AMOR_DB_D = 0x70d9U << 15, ++ AMXOR_DB_W = 0x70daU << 15, ++ AMXOR_DB_D = 0x70dbU << 15, ++ AMMAX_DB_W = 0x70dcU << 15, ++ AMMAX_DB_D = 0x70ddU << 15, ++ AMMIN_DB_W = 0x70deU << 15, ++ AMMIN_DB_D = 0x70dfU << 15, ++ AMMAX_DB_WU = 0x70e0U << 15, ++ AMMAX_DB_DU = 0x70e1U << 15, ++ AMMIN_DB_WU = 0x70e2U << 15, ++ AMMIN_DB_DU = 0x70e3U << 15, ++ ++ DBAR = 0x70e4U << 15, ++ IBAR = 0x70e5U << 15, ++ ++ CLO_W = 0X4U << 10, ++ CLZ_W = 0X5U << 10, ++ CTO_W = 0X6U << 10, ++ CTZ_W = 0X7U << 10, ++ CLO_D = 0X8U << 10, ++ CLZ_D = 0X9U << 10, ++ CTO_D = 0XaU << 10, ++ CTZ_D = 0XbU << 10, ++ REVB_2H = 0XcU << 10, ++ REVB_4H = 0XdU << 10, ++ REVB_2W = 0XeU << 10, ++ REVB_D = 0XfU << 10, ++ REVH_2W = 0X10U << 10, ++ REVH_D = 0X11U << 10, ++ BITREV_4B = 0X12U << 10, ++ BITREV_8B = 0X13U << 10, ++ BITREV_W = 0X14U << 10, ++ BITREV_D = 0X15U << 10, ++ EXT_W_H = 0X16U << 10, ++ EXT_W_B = 0X17U << 10, ++ ++ FABS_S = 0X4501U << 10, ++ FABS_D = 0X4502U << 10, ++ FNEG_S = 0X4505U << 10, ++ FNEG_D = 0X4506U << 10, ++ FLOGB_S = 0X4509U << 10, ++ FLOGB_D = 0X450aU << 10, ++ FCLASS_S = 0X450dU << 10, ++ FCLASS_D = 0X450eU << 10, ++ FSQRT_S = 0X4511U << 10, ++ FSQRT_D = 0X4512U << 10, ++ FRECIP_S = 0X4515U << 10, ++ FRECIP_D = 0X4516U << 10, ++ FRSQRT_S = 0X4519U << 10, ++ FRSQRT_D = 0X451aU << 10, ++ FMOV_S = 0X4525U << 10, ++ FMOV_D = 0X4526U << 10, ++ MOVGR2FR_W = 0X4529U << 10, ++ MOVGR2FR_D = 0X452aU << 10, ++ MOVGR2FRH_W = 0X452bU << 10, ++ MOVFR2GR_S = 0X452dU << 10, ++ MOVFR2GR_D = 0X452eU << 10, ++ MOVFRH2GR_S = 0X452fU << 10, ++ MOVGR2FCSR = 0X4530U << 10, ++ MOVFCSR2GR = 0X4532U << 10, ++ MOVFR2CF = 0X4534U << 10, ++ MOVGR2CF = 0X4536U << 10, ++ ++ FCVT_S_D = 0x4646U << 10, ++ FCVT_D_S = 0x4649U << 10, ++ FTINTRM_W_S = 0x4681U << 10, ++ FTINTRM_W_D = 0x4682U << 10, ++ FTINTRM_L_S = 0x4689U << 10, ++ FTINTRM_L_D = 0x468aU << 10, ++ FTINTRP_W_S = 0x4691U << 10, ++ FTINTRP_W_D = 0x4692U << 10, ++ FTINTRP_L_S = 0x4699U << 10, ++ FTINTRP_L_D = 0x469aU << 10, ++ FTINTRZ_W_S = 0x46a1U << 10, ++ FTINTRZ_W_D = 0x46a2U << 10, ++ FTINTRZ_L_S = 0x46a9U << 10, ++ FTINTRZ_L_D = 0x46aaU << 10, ++ FTINTRNE_W_S = 0x46b1U << 10, ++ FTINTRNE_W_D = 0x46b2U << 10, ++ FTINTRNE_L_S = 0x46b9U << 10, ++ FTINTRNE_L_D = 0x46baU << 10, ++ FTINT_W_S = 0x46c1U << 10, ++ FTINT_W_D = 0x46c2U << 10, ++ FTINT_L_S = 0x46c9U << 10, ++ FTINT_L_D = 0x46caU << 10, ++ FFINT_S_W = 0x4744U << 10, ++ FFINT_S_L = 0x4746U << 10, ++ FFINT_D_W = 0x4748U << 10, ++ FFINT_D_L = 0x474aU << 10, ++ FRINT_S = 0x4791U << 10, ++ FRINT_D = 0x4792U << 10, ++ ++ MOVCF2FR = 0x4535U << 10, ++ MOVCF2GR = 0x4537U << 10 ++}; ++ ++// ----- Emulated conditions. ++// On LOONG64 we use this enum to abstract from conditional branch instructions. ++// The 'U' prefix is used to specify unsigned comparisons. ++enum Condition { ++ // Any value < 0 is considered no_condition. ++ kNoCondition = -1, ++ overflow = 0, ++ no_overflow = 1, ++ Uless = 2, ++ Ugreater_equal = 3, ++ Uless_equal = 4, ++ Ugreater = 5, ++ equal = 6, ++ not_equal = 7, // Unordered or Not Equal. ++ negative = 8, ++ positive = 9, ++ parity_even = 10, ++ parity_odd = 11, ++ less = 12, ++ greater_equal = 13, ++ less_equal = 14, ++ greater = 15, ++ ueq = 16, // Unordered or Equal. ++ ogl = 17, // Ordered and Not Equal. ++ cc_always = 18, ++ ++ // Aliases. ++ carry = Uless, ++ not_carry = Ugreater_equal, ++ zero = equal, ++ eq = equal, ++ not_zero = not_equal, ++ ne = not_equal, ++ nz = not_equal, ++ sign = negative, ++ not_sign = positive, ++ mi = negative, ++ pl = positive, ++ hi = Ugreater, ++ ls = Uless_equal, ++ ge = greater_equal, ++ lt = less, ++ gt = greater, ++ le = less_equal, ++ hs = Ugreater_equal, ++ lo = Uless, ++ al = cc_always, ++ ult = Uless, ++ uge = Ugreater_equal, ++ ule = Uless_equal, ++ ugt = Ugreater, ++ cc_default = kNoCondition ++}; ++ ++// Returns the equivalent of !cc. ++// Negation of the default kNoCondition (-1) results in a non-default ++// no_condition value (-2). As long as tests for no_condition check ++// for condition < 0, this will work as expected. ++inline Condition NegateCondition(Condition cc) { ++ DCHECK(cc != cc_always); ++ return static_cast(cc ^ 1); ++} ++ ++inline Condition NegateFpuCondition(Condition cc) { ++ DCHECK(cc != cc_always); ++ switch (cc) { ++ case ult: ++ return ge; ++ case ugt: ++ return le; ++ case uge: ++ return lt; ++ case ule: ++ return gt; ++ case lt: ++ return uge; ++ case gt: ++ return ule; ++ case ge: ++ return ult; ++ case le: ++ return ugt; ++ case eq: ++ return ne; ++ case ne: ++ return eq; ++ case ueq: ++ return ogl; ++ case ogl: ++ return ueq; ++ default: ++ return cc; ++ } ++} ++ ++// ----- Coprocessor conditions. ++enum FPUCondition { ++ kNoFPUCondition = -1, ++ ++ CAF = 0x00, // False. ++ SAF = 0x01, // False. ++ CLT = 0x02, // Less Than quiet ++ // SLT = 0x03, // Less Than signaling ++ CEQ = 0x04, ++ SEQ = 0x05, ++ CLE = 0x06, ++ SLE = 0x07, ++ CUN = 0x08, ++ SUN = 0x09, ++ CULT = 0x0a, ++ SULT = 0x0b, ++ CUEQ = 0x0c, ++ SUEQ = 0x0d, ++ CULE = 0x0e, ++ SULE = 0x0f, ++ CNE = 0x10, ++ SNE = 0x11, ++ COR = 0x14, ++ SOR = 0x15, ++ CUNE = 0x18, ++ SUNE = 0x19, ++}; ++ ++const uint32_t kFPURoundingModeShift = 8; ++const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift; ++ ++// FPU rounding modes. ++enum FPURoundingMode { ++ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest. ++ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero. ++ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity. ++ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity. ++ ++ // Aliases. ++ kRoundToNearest = RN, ++ kRoundToZero = RZ, ++ kRoundToPlusInf = RP, ++ kRoundToMinusInf = RM, ++ ++ mode_round = RN, ++ mode_ceil = RP, ++ mode_floor = RM, ++ mode_trunc = RZ ++}; ++ ++enum CheckForInexactConversion { ++ kCheckForInexactConversion, ++ kDontCheckForInexactConversion ++}; ++ ++enum class MaxMinKind : int { kMin = 0, kMax = 1 }; ++ ++// ----------------------------------------------------------------------------- ++// Hints. ++ ++// Branch hints are not used on the LOONG64. They are defined so that they can ++// appear in shared function signatures, but will be ignored in LOONG64 ++// implementations. ++enum Hint { no_hint = 0 }; ++ ++inline Hint NegateHint(Hint hint) { return no_hint; } ++ ++// ----------------------------------------------------------------------------- ++// Specific instructions, constants, and masks. ++// These constants are declared in assembler-mips.cc, as they use named ++// registers and other constants. ++ ++// addi_d(sp, sp, 8) aka Pop() operation or part of Pop(r) ++// operations as post-increment of sp. ++extern const Instr kPopInstruction; ++// addi_d(sp, sp, -8) part of Push(r) operation as pre-decrement of sp. ++extern const Instr kPushInstruction; ++// St_d(r, MemOperand(sp, 0)) ++extern const Instr kPushRegPattern; ++// Ld_d(r, MemOperand(sp, 0)) ++extern const Instr kPopRegPattern; ++// extern const Instr kLwRegFpOffsetPattern; ++// extern const Instr kSwRegFpOffsetPattern; ++// extern const Instr kLwRegFpNegOffsetPattern; ++// extern const Instr kSwRegFpNegOffsetPattern; ++// A mask for the Rk register for push, pop, lw, sw instructions. ++extern const Instr kRtMask; ++// extern const Instr kLwSwInstrTypeMask; ++// extern const Instr kLwSwInstrArgumentMask; ++// extern const Instr kLwSwOffsetMask; ++ ++// Break 0xfffff, reserved for redirected real time call. ++const Instr rtCallRedirInstr = BREAK | call_rt_redirected; ++// A nop instruction. (Encoding of addi_w 0 0 0). ++const Instr nopInstr = ADDI_W; ++ ++constexpr uint8_t kInstrSize = 4; ++constexpr uint8_t kInstrSizeLog2 = 2; ++ ++class InstructionBase { ++ public: ++ enum { ++ // On Loonisa PC cannot actually be directly accessed. We behave as if PC ++ // was ++ // always the value of the current instruction being executed. ++ kPCReadOffset = 0 ++ }; ++ ++ enum Type { ++ kOp6Type, ++ kOp7Type, ++ kOp8Type, ++ kOp10Type, ++ kOp12Type, ++ kOp14Type, ++ kOp17Type, ++ kOp22Type, ++ kUnsupported = -1 ++ }; ++ ++ // Get the raw instruction bits. ++ inline Instr InstructionBits() const { ++ return *reinterpret_cast(this); ++ } ++ ++ // Set the raw instruction bits to value. ++ inline void SetInstructionBits(Instr value) { ++ *reinterpret_cast(this) = value; ++ } ++ ++ // Read one particular bit out of the instruction bits. ++ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } ++ ++ // Read a bit field out of the instruction bits. ++ inline int Bits(int hi, int lo) const { ++ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1); ++ } ++ ++ // Safe to call within InstructionType(). ++ inline int RjFieldRawNoAssert() const { ++ return InstructionBits() & kRjFieldMask; ++ } ++ ++ // Get the encoding type of the instruction. ++ inline Type InstructionType() const; ++ ++ protected: ++ InstructionBase() {} ++}; ++ ++template ++class InstructionGetters : public T { ++ public: ++ inline int RjValue() const { ++ return this->Bits(kRjShift + kRjBits - 1, kRjShift); ++ } ++ ++ inline int RkValue() const { ++ return this->Bits(kRkShift + kRkBits - 1, kRkShift); ++ } ++ ++ inline int RdValue() const { ++ return this->Bits(kRdShift + kRdBits - 1, kRdShift); ++ } ++ ++ inline int Sa2Value() const { ++ return this->Bits(kSaShift + kSa2Bits - 1, kSaShift); ++ } ++ ++ inline int Sa3Value() const { ++ return this->Bits(kSaShift + kSa3Bits - 1, kSaShift); ++ } ++ ++ inline int Ui5Value() const { ++ return this->Bits(kUi5Shift + kUi5Bits - 1, kUi5Shift); ++ } ++ ++ inline int Ui6Value() const { ++ return this->Bits(kUi6Shift + kUi6Bits - 1, kUi6Shift); ++ } ++ ++ inline int Ui12Value() const { ++ return this->Bits(kUi12Shift + kUi12Bits - 1, kUi12Shift); ++ } ++ ++ inline int LsbwValue() const { ++ return this->Bits(kLsbwShift + kLsbwBits - 1, kLsbwShift); ++ } ++ ++ inline int MsbwValue() const { ++ return this->Bits(kMsbwShift + kMsbwBits - 1, kMsbwShift); ++ } ++ ++ inline int LsbdValue() const { ++ return this->Bits(kLsbdShift + kLsbdBits - 1, kLsbdShift); ++ } ++ ++ inline int MsbdValue() const { ++ return this->Bits(kMsbdShift + kMsbdBits - 1, kMsbdShift); ++ } ++ ++ inline int CondValue() const { ++ return this->Bits(kCondShift + kCondBits - 1, kCondShift); ++ } ++ ++ inline int Si12Value() const { ++ return this->Bits(kSi12Shift + kSi12Bits - 1, kSi12Shift); ++ } ++ ++ inline int Si14Value() const { ++ return this->Bits(kSi14Shift + kSi14Bits - 1, kSi14Shift); ++ } ++ ++ inline int Si16Value() const { ++ return this->Bits(kSi16Shift + kSi16Bits - 1, kSi16Shift); ++ } ++ ++ inline int Si20Value() const { ++ return this->Bits(kSi20Shift + kSi20Bits - 1, kSi20Shift); ++ } ++ ++ inline int FdValue() const { ++ return this->Bits(kFdShift + kFdBits - 1, kFdShift); ++ } ++ ++ inline int FaValue() const { ++ return this->Bits(kFaShift + kFaBits - 1, kFaShift); ++ } ++ ++ inline int FjValue() const { ++ return this->Bits(kFjShift + kFjBits - 1, kFjShift); ++ } ++ ++ inline int FkValue() const { ++ return this->Bits(kFkShift + kFkBits - 1, kFkShift); ++ } ++ ++ inline int CjValue() const { ++ return this->Bits(kCjShift + kCjBits - 1, kCjShift); ++ } ++ ++ inline int CdValue() const { ++ return this->Bits(kCdShift + kCdBits - 1, kCdShift); ++ } ++ ++ inline int CaValue() const { ++ return this->Bits(kCaShift + kCaBits - 1, kCaShift); ++ } ++ ++ inline int CodeValue() const { ++ return this->Bits(kCodeShift + kCodeBits - 1, kCodeShift); ++ } ++ ++ inline int Hint5Value() const { ++ return this->Bits(kHint5Shift + kHint5Bits - 1, kHint5Shift); ++ } ++ ++ inline int Hint15Value() const { ++ return this->Bits(kHint15Shift + kHint15Bits - 1, kHint15Shift); ++ } ++ ++ inline int Offs16Value() const { ++ return this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); ++ } ++ ++ inline int Offs21Value() const { ++ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); ++ int high = ++ this->Bits(kOffs21HighShift + kOffs21HighBits - 1, kOffs21HighShift); ++ return ((high << kOffsLowBits) + low); ++ } ++ ++ inline int Offs26Value() const { ++ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift); ++ int high = ++ this->Bits(kOffs26HighShift + kOffs26HighBits - 1, kOffs26HighShift); ++ return ((high << kOffsLowBits) + low); ++ } ++ ++ inline int RjFieldRaw() const { ++ return this->InstructionBits() & kRjFieldMask; ++ } ++ ++ inline int RkFieldRaw() const { ++ return this->InstructionBits() & kRkFieldMask; ++ } ++ ++ inline int RdFieldRaw() const { ++ return this->InstructionBits() & kRdFieldMask; ++ } ++ ++ inline int32_t ImmValue(int bits) const { return this->Bits(bits - 1, 0); } ++ ++ /*TODO*/ ++ inline int32_t Imm12Value() const { abort(); } ++ ++ inline int32_t Imm14Value() const { abort(); } ++ ++ inline int32_t Imm16Value() const { abort(); } ++ ++ // Say if the instruction 'links'. e.g. jal, bal. ++ bool IsLinkingInstruction() const; ++ // Say if the instruction is a break or a trap. ++ bool IsTrap() const; ++}; ++ ++class Instruction : public InstructionGetters { ++ public: ++ // Instructions are read of out a code stream. The only way to get a ++ // reference to an instruction is to convert a pointer. There is no way ++ // to allocate or create instances of class Instruction. ++ // Use the At(pc) function to create references to Instruction. ++ static Instruction* At(byte* pc) { ++ return reinterpret_cast(pc); ++ } ++ ++ private: ++ // We need to prevent the creation of instances of class Instruction. ++ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); ++}; ++ ++// ----------------------------------------------------------------------------- ++// LOONG64 assembly various constants. ++ ++// C/C++ argument slots size. ++const int kCArgSlotCount = 0; ++ ++const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2; ++ ++const int kInvalidStackOffset = -1; ++ ++static const int kNegOffset = 0x00008000; ++ ++InstructionBase::Type InstructionBase::InstructionType() const { ++ InstructionBase::Type kType = kUnsupported; ++ ++ // Check for kOp6Type ++ switch (Bits(31, 26) << 26) { ++ case ADDU16I_D: ++ case BEQZ: ++ case BNEZ: ++ case BCZ: ++ case JIRL: ++ case B: ++ case BL: ++ case BEQ: ++ case BNE: ++ case BLT: ++ case BGE: ++ case BLTU: ++ case BGEU: ++ kType = kOp6Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp7Type ++ switch (Bits(31, 25) << 25) { ++ case LU12I_W: ++ case LU32I_D: ++ case PCADDI: ++ case PCALAU12I: ++ case PCADDU12I: ++ case PCADDU18I: ++ kType = kOp7Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp8Type ++ switch (Bits(31, 24) << 24) { ++ case LDPTR_W: ++ case STPTR_W: ++ case LDPTR_D: ++ case STPTR_D: ++ case LL_W: ++ case SC_W: ++ case LL_D: ++ case SC_D: ++ kType = kOp8Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp10Type ++ switch (Bits(31, 22) << 22) { ++ case BSTR_W: { ++ // If Bit(21) = 0, then the Opcode is not BSTR_W. ++ if (Bit(21) == 0) ++ kType = kUnsupported; ++ else ++ kType = kOp10Type; ++ break; ++ } ++ case BSTRINS_D: ++ case BSTRPICK_D: ++ case SLTI: ++ case SLTUI: ++ case ADDI_W: ++ case ADDI_D: ++ case LU52I_D: ++ case ANDI: ++ case ORI: ++ case XORI: ++ case LD_B: ++ case LD_H: ++ case LD_W: ++ case LD_D: ++ case ST_B: ++ case ST_H: ++ case ST_W: ++ case ST_D: ++ case LD_BU: ++ case LD_HU: ++ case LD_WU: ++ case FLD_S: ++ case FST_S: ++ case FLD_D: ++ case FST_D: ++ kType = kOp10Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp12Type ++ switch (Bits(31, 20) << 20) { ++ case FMADD_S: ++ case FMADD_D: ++ case FMSUB_S: ++ case FMSUB_D: ++ case FNMADD_S: ++ case FNMADD_D: ++ case FNMSUB_S: ++ case FNMSUB_D: ++ case FCMP_COND_S: ++ case FCMP_COND_D: ++ case FSEL: ++ kType = kOp12Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp14Type ++ switch (Bits(31, 18) << 18) { ++ case ALSL: ++ case BYTEPICK_W: ++ case BYTEPICK_D: ++ case ALSL_D: ++ case SLLI: ++ case SRLI: ++ case SRAI: ++ case ROTRI: ++ kType = kOp14Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp17Type ++ switch (Bits(31, 15) << 15) { ++ case ADD_W: ++ case ADD_D: ++ case SUB_W: ++ case SUB_D: ++ case SLT: ++ case SLTU: ++ case MASKEQZ: ++ case MASKNEZ: ++ case NOR: ++ case AND: ++ case OR: ++ case XOR: ++ case ORN: ++ case ANDN: ++ case SLL_W: ++ case SRL_W: ++ case SRA_W: ++ case SLL_D: ++ case SRL_D: ++ case SRA_D: ++ case ROTR_D: ++ case ROTR_W: ++ case MUL_W: ++ case MULH_W: ++ case MULH_WU: ++ case MUL_D: ++ case MULH_D: ++ case MULH_DU: ++ case MULW_D_W: ++ case MULW_D_WU: ++ case DIV_W: ++ case MOD_W: ++ case DIV_WU: ++ case MOD_WU: ++ case DIV_D: ++ case MOD_D: ++ case DIV_DU: ++ case MOD_DU: ++ case BREAK: ++ case FADD_S: ++ case FADD_D: ++ case FSUB_S: ++ case FSUB_D: ++ case FMUL_S: ++ case FMUL_D: ++ case FDIV_S: ++ case FDIV_D: ++ case FMAX_S: ++ case FMAX_D: ++ case FMIN_S: ++ case FMIN_D: ++ case FMAXA_S: ++ case FMAXA_D: ++ case FMINA_S: ++ case FMINA_D: ++ case LDX_B: ++ case LDX_H: ++ case LDX_W: ++ case LDX_D: ++ case STX_B: ++ case STX_H: ++ case STX_W: ++ case STX_D: ++ case LDX_BU: ++ case LDX_HU: ++ case LDX_WU: ++ case FLDX_S: ++ case FLDX_D: ++ case FSTX_S: ++ case FSTX_D: ++ case AMSWAP_W: ++ case AMSWAP_D: ++ case AMADD_W: ++ case AMADD_D: ++ case AMAND_W: ++ case AMAND_D: ++ case AMOR_W: ++ case AMOR_D: ++ case AMXOR_W: ++ case AMXOR_D: ++ case AMMAX_W: ++ case AMMAX_D: ++ case AMMIN_W: ++ case AMMIN_D: ++ case AMMAX_WU: ++ case AMMAX_DU: ++ case AMMIN_WU: ++ case AMMIN_DU: ++ case AMSWAP_DB_W: ++ case AMSWAP_DB_D: ++ case AMADD_DB_W: ++ case AMADD_DB_D: ++ case AMAND_DB_W: ++ case AMAND_DB_D: ++ case AMOR_DB_W: ++ case AMOR_DB_D: ++ case AMXOR_DB_W: ++ case AMXOR_DB_D: ++ case AMMAX_DB_W: ++ case AMMAX_DB_D: ++ case AMMIN_DB_W: ++ case AMMIN_DB_D: ++ case AMMAX_DB_WU: ++ case AMMAX_DB_DU: ++ case AMMIN_DB_WU: ++ case AMMIN_DB_DU: ++ case DBAR: ++ case IBAR: ++ case FSCALEB_S: ++ case FSCALEB_D: ++ case FCOPYSIGN_S: ++ case FCOPYSIGN_D: ++ kType = kOp17Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ if (kType == kUnsupported) { ++ // Check for kOp22Type ++ switch (Bits(31, 10) << 10) { ++ case CLZ_W: ++ case CTZ_W: ++ case CLZ_D: ++ case CTZ_D: ++ case REVB_2H: ++ case REVB_4H: ++ case REVB_2W: ++ case REVB_D: ++ case REVH_2W: ++ case REVH_D: ++ case BITREV_4B: ++ case BITREV_8B: ++ case BITREV_W: ++ case BITREV_D: ++ case EXT_W_B: ++ case EXT_W_H: ++ case FABS_S: ++ case FABS_D: ++ case FNEG_S: ++ case FNEG_D: ++ case FSQRT_S: ++ case FSQRT_D: ++ case FMOV_S: ++ case FMOV_D: ++ case MOVGR2FR_W: ++ case MOVGR2FR_D: ++ case MOVGR2FRH_W: ++ case MOVFR2GR_S: ++ case MOVFR2GR_D: ++ case MOVFRH2GR_S: ++ case MOVGR2FCSR: ++ case MOVFCSR2GR: ++ case FCVT_S_D: ++ case FCVT_D_S: ++ case FTINTRM_W_S: ++ case FTINTRM_W_D: ++ case FTINTRM_L_S: ++ case FTINTRM_L_D: ++ case FTINTRP_W_S: ++ case FTINTRP_W_D: ++ case FTINTRP_L_S: ++ case FTINTRP_L_D: ++ case FTINTRZ_W_S: ++ case FTINTRZ_W_D: ++ case FTINTRZ_L_S: ++ case FTINTRZ_L_D: ++ case FTINTRNE_W_S: ++ case FTINTRNE_W_D: ++ case FTINTRNE_L_S: ++ case FTINTRNE_L_D: ++ case FTINT_W_S: ++ case FTINT_W_D: ++ case FTINT_L_S: ++ case FTINT_L_D: ++ case FFINT_S_W: ++ case FFINT_S_L: ++ case FFINT_D_W: ++ case FFINT_D_L: ++ case FRINT_S: ++ case FRINT_D: ++ case MOVFR2CF: ++ case MOVCF2FR: ++ case MOVGR2CF: ++ case MOVCF2GR: ++ case FRECIP_S: ++ case FRECIP_D: ++ case FRSQRT_S: ++ case FRSQRT_D: ++ case FCLASS_S: ++ case FCLASS_D: ++ case FLOGB_S: ++ case FLOGB_D: ++ case CLO_W: ++ case CTO_W: ++ case CLO_D: ++ case CTO_D: ++ kType = kOp22Type; ++ break; ++ default: ++ kType = kUnsupported; ++ } ++ } ++ ++ return kType; ++} ++ ++// ----------------------------------------------------------------------------- ++// Instructions. ++ ++template ++bool InstructionGetters

::IsTrap() const { ++ return true; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_ +diff --git a/deps/v8/src/codegen/loong64/cpu-loong64.cc b/deps/v8/src/codegen/loong64/cpu-loong64.cc +new file mode 100644 +index 00000000..4b5dc7c9 +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/cpu-loong64.cc +@@ -0,0 +1,38 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// CPU specific code for loongisa independent of OS goes here. ++ ++#include ++#include ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/codegen/cpu-features.h" ++ ++namespace v8 { ++namespace internal { ++ ++void CpuFeatures::FlushICache(void* start, size_t size) { ++#if !defined(USE_SIMULATOR) ++ // Nothing to do, flushing no instructions. ++ if (size == 0) { ++ return; ++ } ++ ++#if defined(ANDROID) && !defined(__LP64__) ++ // Bionic cacheflush can typically run in userland, avoiding kernel call. ++ char* end = reinterpret_cast(start) + size; ++ cacheflush(reinterpret_cast(start), reinterpret_cast(end), ++ 0); ++#else // ANDROID ++ asm("ibar 0\n"); ++#endif // ANDROID ++#endif // !USE_SIMULATOR. ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64.cc b/deps/v8/src/codegen/loong64/interface-descriptors-loong64.cc +new file mode 100644 +index 00000000..579b1b0f +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64.cc +@@ -0,0 +1,356 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/codegen/interface-descriptors.h" ++ ++#include "src/execution/frames.h" ++ ++namespace v8 { ++namespace internal { ++ ++const Register CallInterfaceDescriptor::ContextRegister() { return cp; } ++ ++void CallInterfaceDescriptor::DefaultInitializePlatformSpecific( ++ CallInterfaceDescriptorData* data, int register_parameter_count) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ CHECK_LE(static_cast(register_parameter_count), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(register_parameter_count, ++ default_stub_registers); ++} ++ ++// On MIPS it is not allowed to use odd numbered floating point registers ++// (e.g. f1, f3, etc.) for parameters. This can happen if we use ++// DefaultInitializePlatformSpecific to assign float registers for parameters. ++// E.g if fourth parameter goes to float register, f7 would be assigned for ++// parameter (a3 casted to int is 7). ++bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { ++ return reg.code() % 2 == 0; ++} ++ ++void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ CHECK_EQ(static_cast(kParameterCount - kStackArgumentsCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount, ++ default_stub_registers); ++} ++ ++void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2}; ++ CHECK_EQ(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void RecordWriteDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ ++ data->RestrictAllocatableRegisters(default_stub_registers, ++ arraysize(default_stub_registers)); ++ ++ CHECK_LE(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ const Register default_stub_registers[] = {a0, a1, a2, a3, a4}; ++ ++ data->RestrictAllocatableRegisters(default_stub_registers, ++ arraysize(default_stub_registers)); ++ ++ CHECK_LE(static_cast(kParameterCount), ++ arraysize(default_stub_registers)); ++ data->InitializePlatformSpecific(kParameterCount, default_stub_registers); ++} ++ ++const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { ++ return a1; ++} ++const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; } ++ ++const Register LoadDescriptor::ReceiverRegister() { return a1; } ++const Register LoadDescriptor::NameRegister() { return a2; } ++const Register LoadDescriptor::SlotRegister() { return a0; } ++ ++const Register LoadWithVectorDescriptor::VectorRegister() { return a3; } ++ ++const Register StoreDescriptor::ReceiverRegister() { return a1; } ++const Register StoreDescriptor::NameRegister() { return a2; } ++const Register StoreDescriptor::ValueRegister() { return a0; } ++const Register StoreDescriptor::SlotRegister() { return a4; } ++ ++const Register StoreWithVectorDescriptor::VectorRegister() { return a3; } ++ ++const Register StoreTransitionDescriptor::SlotRegister() { return a4; } ++const Register StoreTransitionDescriptor::VectorRegister() { return a3; } ++const Register StoreTransitionDescriptor::MapRegister() { return a5; } ++ ++const Register ApiGetterDescriptor::HolderRegister() { return a0; } ++const Register ApiGetterDescriptor::CallbackRegister() { return a3; } ++ ++const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; } ++const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; } ++ ++// static ++const Register TypeConversionDescriptor::ArgumentRegister() { return a0; } ++ ++void TypeofDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a3}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallTrampolineDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: target ++ // a0: number of arguments ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a4 : arguments list length (untagged) ++ // a2 : arguments list (FixedArray) ++ Register registers[] = {a1, a0, a4, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallForwardVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: the target to call ++ // a0: number of arguments ++ // a2: start index (to support rest parameters) ++ Register registers[] = {a1, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallFunctionTemplateDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : function template info ++ // a0 : number of arguments (on the stack, not including receiver) ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallWithSpreadDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a2 : the object to spread ++ Register registers[] = {a1, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void CallWithArrayLikeDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : the target to call ++ // a2 : the arguments list ++ Register registers[] = {a1, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a3 : the new target ++ // a4 : arguments list length (untagged) ++ // a2 : arguments list (FixedArray) ++ Register registers[] = {a1, a3, a0, a4, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: the target to call ++ // a3: new target ++ // a0: number of arguments ++ // a2: start index (to support rest parameters) ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructWithSpreadDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a0 : number of arguments (on the stack, not including receiver) ++ // a1 : the target to call ++ // a3 : the new target ++ // a2 : the object to spread ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1 : the target to call ++ // a3 : the new target ++ // a2 : the arguments list ++ Register registers[] = {a1, a3, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ConstructStubDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // a1: target ++ // a3: new target ++ // a0: number of arguments ++ // a2: allocation site or undefined ++ Register registers[] = {a1, a3, a0, a2}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void AbortDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // register state ++ data->InitializePlatformSpecific(0, nullptr); ++} ++ ++void CompareDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void BinaryOpDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a1, a0}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ArgumentsAdaptorDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // JSFunction ++ a3, // the new target ++ a0, // actual number of arguments ++ a2, // expected number of arguments ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ApiCallbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // kApiFunctionAddress ++ a2, // kArgc ++ a3, // kCallData ++ a0, // kHolder ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterDispatchDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, ++ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // argument count (not including receiver) ++ a2, // address of first argument ++ a1 // the target callable to be call ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // argument count (not including receiver) ++ a4, // address of the first argument ++ a1, // constructor to call ++ a3, // new target ++ a2, // allocation site feedback if available, undefined otherwise ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void ResumeGeneratorDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a0, // the value to pass to the generator ++ a1 // the JSGeneratorObject to resume ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void FrameDropperTrampolineDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = { ++ a1, // loaded new FP ++ }; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ Register registers[] = {a0, a1}; ++ data->InitializePlatformSpecific(arraysize(registers), registers); ++} ++ ++void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void Compare_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 4); ++} ++ ++void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific( ++ CallInterfaceDescriptorData* data) { ++ // TODO(v8:8888): Implement on this platform. ++ DefaultInitializePlatformSpecific(data, 3); ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc +new file mode 100644 +index 00000000..69fd5618 +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc +@@ -0,0 +1,4050 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include // For LONG_MIN, LONG_MAX. ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/base/bits.h" ++#include "src/base/division-by-constant.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/callable.h" ++#include "src/codegen/code-factory.h" ++#include "src/codegen/external-reference-table.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/register-configuration.h" ++#include "src/debug/debug.h" ++#include "src/execution/frames-inl.h" ++#include "src/heap/memory-chunk.h" ++#include "src/init/bootstrapper.h" ++#include "src/logging/counters.h" ++#include "src/objects/heap-number.h" ++#include "src/runtime/runtime.h" ++#include "src/snapshot/embedded/embedded-data.h" ++#include "src/snapshot/snapshot.h" ++#include "src/wasm/wasm-code-manager.h" ++ ++// Satisfy cpplint check, but don't include platform-specific header. It is ++// included recursively via macro-assembler.h. ++#if 0 ++#include "src/codegen/loong64/macro-assembler-loong64.h" ++#endif ++ ++namespace v8 { ++namespace internal { ++ ++static inline bool IsZero(const Operand& rk) { ++ if (rk.is_reg()) { ++ return rk.rm() == zero_reg; ++ } else { ++ return rk.immediate() == 0; ++ } ++} ++ ++int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, ++ Register exclusion1, ++ Register exclusion2, ++ Register exclusion3) const { ++ int bytes = 0; ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ bytes += NumRegs(list) * kPointerSize; ++ ++ if (fp_mode == kSaveFPRegs) { ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ return bytes; ++} ++ ++int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ++ Register exclusion2, Register exclusion3) { ++ int bytes = 0; ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ MultiPush(list); ++ bytes += NumRegs(list) * kPointerSize; ++ ++ if (fp_mode == kSaveFPRegs) { ++ MultiPushFPU(kCallerSavedFPU); ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ return bytes; ++} ++ ++int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ++ Register exclusion2, Register exclusion3) { ++ int bytes = 0; ++ if (fp_mode == kSaveFPRegs) { ++ MultiPopFPU(kCallerSavedFPU); ++ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize; ++ } ++ ++ RegList exclusions = 0; ++ if (exclusion1 != no_reg) { ++ exclusions |= exclusion1.bit(); ++ if (exclusion2 != no_reg) { ++ exclusions |= exclusion2.bit(); ++ if (exclusion3 != no_reg) { ++ exclusions |= exclusion3.bit(); ++ } ++ } ++ } ++ ++ RegList list = kJSCallerSaved & ~exclusions; ++ MultiPop(list); ++ bytes += NumRegs(list) * kPointerSize; ++ ++ return bytes; ++} ++ ++void TurboAssembler::LoadRoot(Register destination, RootIndex index) { ++ Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); ++} ++ ++void TurboAssembler::PushCommonFrame(Register marker_reg) { ++ if (marker_reg.is_valid()) { ++ Push(ra, fp, marker_reg); ++ Add_d(fp, sp, Operand(kPointerSize)); ++ } else { ++ Push(ra, fp); ++ mov(fp, sp); ++ } ++} ++ ++void TurboAssembler::PushStandardFrame(Register function_reg) { ++ int offset = -StandardFrameConstants::kContextOffset; ++ if (function_reg.is_valid()) { ++ Push(ra, fp, cp, function_reg); ++ offset += kPointerSize; ++ } else { ++ Push(ra, fp, cp); ++ } ++ Add_d(fp, sp, Operand(offset)); ++} ++ ++int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { ++ // The registers are pushed starting with the highest encoding, ++ // which means that lowest encodings are closest to the stack pointer. ++ return kSafepointRegisterStackIndexMap[reg_code]; ++} ++ ++// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) ++// The register 'object' contains a heap object pointer. The heap object ++// tag is shifted away. ++void MacroAssembler::RecordWriteField(Register object, int offset, ++ Register value, Register dst, ++ RAStatus ra_status, ++ SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action, ++ SmiCheck smi_check) { ++ DCHECK(!AreAliased(value, dst, t8, object)); ++ // First, check if a write barrier is even needed. The tests below ++ // catch stores of Smis. ++ Label done; ++ ++ // Skip barrier if writing a smi. ++ if (smi_check == INLINE_SMI_CHECK) { ++ JumpIfSmi(value, &done); ++ } ++ ++ // Although the object register is tagged, the offset is relative to the start ++ // of the object, so so offset must be a multiple of kPointerSize. ++ DCHECK(IsAligned(offset, kPointerSize)); ++ ++ Add_d(dst, object, Operand(offset - kHeapObjectTag)); ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Label ok; ++ And(t8, dst, Operand(kPointerSize - 1)); ++ Branch(&ok, eq, t8, Operand(zero_reg)); ++ stop(); ++ bind(&ok); ++ } ++ ++ RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action, ++ OMIT_SMI_CHECK); ++ ++ bind(&done); ++ ++ // Clobber clobbered input registers when running with the debug-code flag ++ // turned on to provoke errors. ++ if (emit_debug_code()) { ++ li(value, Operand(bit_cast(kZapValue + 4))); ++ li(dst, Operand(bit_cast(kZapValue + 8))); ++ } ++} ++ ++void TurboAssembler::SaveRegisters(RegList registers) { ++ DCHECK_GT(NumRegs(registers), 0); ++ RegList regs = 0; ++ for (int i = 0; i < Register::kNumRegisters; ++i) { ++ if ((registers >> i) & 1u) { ++ regs |= Register::from_code(i).bit(); ++ } ++ } ++ MultiPush(regs); ++} ++ ++void TurboAssembler::RestoreRegisters(RegList registers) { ++ DCHECK_GT(NumRegs(registers), 0); ++ RegList regs = 0; ++ for (int i = 0; i < Register::kNumRegisters; ++i) { ++ if ((registers >> i) & 1u) { ++ regs |= Register::from_code(i).bit(); ++ } ++ } ++ MultiPop(regs); ++} ++ ++void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address, ++ SaveFPRegsMode fp_mode) { ++ EphemeronKeyBarrierDescriptor descriptor; ++ RegList registers = descriptor.allocatable_registers(); ++ ++ SaveRegisters(registers); ++ ++ Register object_parameter( ++ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject)); ++ Register slot_parameter(descriptor.GetRegisterParameter( ++ EphemeronKeyBarrierDescriptor::kSlotAddress)); ++ Register fp_mode_parameter( ++ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode)); ++ ++ Push(object); ++ Push(address); ++ ++ Pop(slot_parameter); ++ Pop(object_parameter); ++ ++ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); ++ Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier), ++ RelocInfo::CODE_TARGET); ++ RestoreRegisters(registers); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { ++ CallRecordWriteStub( ++ object, address, remembered_set_action, fp_mode, ++ isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), ++ kNullAddress); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, ++ Address wasm_target) { ++ CallRecordWriteStub(object, address, remembered_set_action, fp_mode, ++ Handle::null(), wasm_target); ++} ++ ++void TurboAssembler::CallRecordWriteStub( ++ Register object, Register address, ++ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, ++ Handle code_target, Address wasm_target) { ++ DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); ++ // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, ++ // i.e. always emit remember set and save FP registers in RecordWriteStub. If ++ // large performance regression is observed, we should use these values to ++ // avoid unnecessary work. ++ ++ RecordWriteDescriptor descriptor; ++ RegList registers = descriptor.allocatable_registers(); ++ ++ SaveRegisters(registers); ++ Register object_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject)); ++ Register slot_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot)); ++ Register remembered_set_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet)); ++ Register fp_mode_parameter( ++ descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode)); ++ ++ Push(object); ++ Push(address); ++ ++ Pop(slot_parameter); ++ Pop(object_parameter); ++ ++ Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); ++ Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); ++ if (code_target.is_null()) { ++ Call(wasm_target, RelocInfo::WASM_STUB_CALL); ++ } else { ++ Call(code_target, RelocInfo::CODE_TARGET); ++ } ++ ++ RestoreRegisters(registers); ++} ++ ++// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) ++// The register 'object' contains a heap object pointer. The heap object ++// tag is shifted away. ++void MacroAssembler::RecordWrite(Register object, Register address, ++ Register value, RAStatus ra_status, ++ SaveFPRegsMode fp_mode, ++ RememberedSetAction remembered_set_action, ++ SmiCheck smi_check) { ++ DCHECK(!AreAliased(object, address, value)); ++ ++ if (emit_debug_code()) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Ld_d(scratch, MemOperand(address, 0)); ++ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch, ++ Operand(value)); ++ } ++ ++ if ((remembered_set_action == OMIT_REMEMBERED_SET && ++ !FLAG_incremental_marking) || ++ FLAG_disable_write_barriers) { ++ return; ++ } ++ ++ // First, check if a write barrier is even needed. The tests below ++ // catch stores of smis and stores into the young generation. ++ Label done; ++ ++ if (smi_check == INLINE_SMI_CHECK) { ++ DCHECK_EQ(0, kSmiTag); ++ JumpIfSmi(value, &done); ++ } ++ ++ CheckPageFlag(value, ++ value, // Used as scratch. ++ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); ++ CheckPageFlag(object, ++ value, // Used as scratch. ++ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); ++ ++ // Record the actual write. ++ if (ra_status == kRAHasNotBeenSaved) { ++ push(ra); ++ } ++ CallRecordWriteStub(object, address, remembered_set_action, fp_mode); ++ if (ra_status == kRAHasNotBeenSaved) { ++ pop(ra); ++ } ++ ++ bind(&done); ++ ++ // Clobber clobbered registers when running with the debug-code flag ++ // turned on to provoke errors. ++ if (emit_debug_code()) { ++ li(address, Operand(bit_cast(kZapValue + 12))); ++ li(value, Operand(bit_cast(kZapValue + 16))); ++ } ++} ++ ++// --------------------------------------------------------------------------- ++// Instruction macros. ++ ++void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ add_w(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_w(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ add_w(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ add_d(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_d(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ add_d(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sub_w(rd, rj, rk.rm()); ++ } else { ++ DCHECK(is_int32(rk.immediate())); ++ if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_w(rd, rj, ++ static_cast( ++ -rk.immediate())); // No subi_w instr, use addi_w(x, y, -imm). ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ if (-rk.immediate() >> 12 == 0 && !MustUseReg(rk.rmode())) { ++ // Use load -imm and addu when loading -imm generates one instruction. ++ li(scratch, -rk.immediate()); ++ add_w(rd, rj, scratch); ++ } else { ++ // li handles the relocation. ++ li(scratch, rk); ++ sub_w(rd, rj, scratch); ++ } ++ } ++ } ++} ++ ++void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sub_d(rd, rj, rk.rm()); ++ } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { ++ addi_d(rd, rj, ++ static_cast( ++ -rk.immediate())); // No subi_d instr, use addi_d(x, y, -imm). ++ } else { ++ DCHECK(rj != t7); ++ int li_count = InstrCountForLi64Bit(rk.immediate()); ++ int li_neg_count = InstrCountForLi64Bit(-rk.immediate()); ++ if (li_neg_count < li_count && !MustUseReg(rk.rmode())) { ++ // Use load -imm and add_d when loading -imm generates one instruction. ++ DCHECK(rk.immediate() != std::numeric_limits::min()); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(-rk.immediate())); ++ add_d(rd, rj, scratch); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rk); ++ sub_d(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mul_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mul_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mulh_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mulh_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mulh_wu(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mulh_wu(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mul_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mul_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mulh_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mulh_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_w(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_w(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_wu(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_wu(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_wu(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_wu(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ div_du(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ div_du(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_d(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_d(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ mod_du(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ mod_du(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::And(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ and_(rd, rj, rk.rm()); ++ } else { ++ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ andi(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ and_(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ or_(rd, rj, rk.rm()); ++ } else { ++ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ ori(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ or_(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ xor_(rd, rj, rk.rm()); ++ } else { ++ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ xori(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ xor_(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ nor(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ nor(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ andn(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ andn(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ orn(rd, rj, rk.rm()); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ orn(rd, rj, scratch); ++ } ++} ++ ++void TurboAssembler::Neg(Register rj, const Operand& rk) { ++ DCHECK(rk.is_reg()); ++ sub_d(rj, zero_reg, rk.rm()); ++} ++ ++void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ slt(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ slti(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ // TODO why?? ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ slt(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sltu(rd, rj, rk.rm()); ++ } else { ++ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) { ++ sltui(rd, rj, static_cast(rk.immediate())); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ sltu(rd, rj, scratch); ++ } ++ } ++} ++ ++void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ slt(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ slt(rd, scratch, rj); ++ } ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sltu(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ sltu(rd, scratch, rj); ++ } ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) { ++ Slt(rd, rj, rk); ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) { ++ Sltu(rd, rj, rk); ++ xori(rd, rd, 1); ++} ++ ++void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ slt(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ slt(rd, scratch, rj); ++ } ++} ++ ++void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ sltu(rd, rk.rm(), rj); ++ } else { ++ // li handles the relocation. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ DCHECK(rj != scratch); ++ li(scratch, rk); ++ sltu(rd, scratch, rj); ++ } ++} ++ ++void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ rotr_w(rd, rj, rk.rm()); ++ } else { ++ int64_t ror_value = rk.immediate() % 32; ++ if (ror_value < 0) { ++ ror_value += 32; ++ } ++ rotri_w(rd, rj, ror_value); ++ } ++} ++ ++void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { ++ if (rk.is_reg()) { ++ rotr_d(rd, rj, rk.rm()); ++ } else { ++ int64_t dror_value = rk.immediate() % 64; ++ if (dror_value < 0) dror_value += 64; ++ rotri_d(rd, rj, dror_value); ++ } ++} ++ ++void MacroAssembler::Pref(int32_t hint, const MemOperand& rj) { ++ // TODO ++ // pref(hint); ++} ++ ++void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch) { ++ DCHECK(sa >= 1 && sa <= 31); ++ if (sa <= 4) { ++ alsl_w(rd, rj, rk, sa); ++ } else { ++ Register tmp = rd == rk ? scratch : rd; ++ DCHECK(tmp != rk); ++ slli_w(tmp, rj, sa); ++ add_w(rd, rk, tmp); ++ } ++} ++ ++void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch) { ++ DCHECK(sa >= 1 && sa <= 31); ++ if (sa <= 4) { ++ alsl_d(rd, rj, rk, sa); ++ } else { ++ Register tmp = rd == rk ? scratch : rd; ++ DCHECK(tmp != rk); ++ slli_d(tmp, rj, sa); ++ add_d(rd, rk, tmp); ++ } ++} ++ ++// ------------Pseudo-instructions------------- ++ ++// Change endianness ++void TurboAssembler::ByteSwapSigned(Register dest, Register src, ++ int operand_size) { ++ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); ++ if (operand_size == 2) { ++ revb_2h(dest, src); ++ ext_w_h(dest, dest); ++ } else if (operand_size == 4) { ++ revb_2w(dest, src); ++ slli_w(dest, dest, 0); ++ } else { ++ revb_d(dest, dest); ++ } ++} ++ ++void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, ++ int operand_size) { ++ DCHECK(operand_size == 2 || operand_size == 4); ++ if (operand_size == 2) { ++ revb_2h(dest, src); ++ bstrins_d(dest, zero_reg, 63, 16); ++ } else { ++ revb_2w(dest, src); ++ bstrins_d(dest, zero_reg, 63, 32); ++ } ++} ++ ++void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_b(rd, source.base(), source.index()); ++ } else { ++ ld_b(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_bu(rd, source.base(), source.index()); ++ } else { ++ ld_bu(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_b(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_b(rd, source.base(), source.index()); ++ } else { ++ st_b(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_h(rd, source.base(), source.index()); ++ } else { ++ ld_h(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_hu(rd, source.base(), source.index()); ++ } else { ++ ld_hu(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_h(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_h(rd, source.base(), source.index()); ++ } else { ++ st_h(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); // TODO ldptr_w ?? ++ if (source.hasIndexReg()) { ++ ldx_w(rd, source.base(), source.index()); ++ } else { ++ ld_w(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_wu(rd, source.base(), source.index()); ++ } else { ++ ld_wu(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_w(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_w(rd, source.base(), source.index()); ++ } else { ++ st_w(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ ldx_d(rd, source.base(), source.index()); ++ } else { ++ ld_d(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::St_d(Register rd, const MemOperand& rj) { ++ MemOperand source = rj; ++ AdjustBaseAndOffset(&source); ++ if (source.hasIndexReg()) { ++ stx_d(rd, source.base(), source.index()); ++ } else { ++ st_d(rd, source.base(), source.offset()); ++ } ++} ++ ++void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fldx_s(fd, tmp.base(), tmp.index()); ++ } else { ++ fld_s(fd, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fstx_s(fs, tmp.base(), tmp.index()); ++ } else { ++ fst_s(fs, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fldx_d(fd, tmp.base(), tmp.index()); ++ } else { ++ fld_d(fd, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) { ++ MemOperand tmp = src; ++ AdjustBaseAndOffset(&tmp); ++ if (tmp.hasIndexReg()) { ++ fstx_d(fs, tmp.base(), tmp.index()); ++ } else { ++ fst_d(fs, tmp.base(), tmp.offset()); ++ } ++} ++ ++void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ ll_w(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ ll_w(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ ll_d(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ ll_d(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ sc_w(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ sc_w(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) { ++ DCHECK(!rj.hasIndexReg()); ++ bool is_one_instruction = is_int14(rj.offset()); ++ if (is_one_instruction) { ++ sc_d(rd, rj.base(), rj.offset()); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, rj.offset()); ++ add_d(scratch, scratch, rj.base()); ++ sc_d(rd, scratch, 0); ++ } ++} ++ ++void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { ++ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating ++ // non-isolate-independent code. In many cases it might be cheaper than ++ // embedding the relocatable value. ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadConstant(dst, value); ++ return; ++ } ++ li(dst, Operand(value), mode); ++} ++ ++void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { ++ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating ++ // non-isolate-independent code. In many cases it might be cheaper than ++ // embedding the relocatable value. ++ if (root_array_available_ && options().isolate_independent_code) { ++ IndirectLoadExternalReference(dst, value); ++ return; ++ } ++ li(dst, Operand(value), mode); ++} ++ ++void TurboAssembler::li(Register dst, const StringConstantBase* string, ++ LiFlags mode) { ++ li(dst, Operand::EmbeddedStringConstant(string), mode); ++} ++ ++static inline int InstrCountForLiLower32Bit(int64_t value) { ++ if (is_int12(static_cast(value)) || ++ is_uint12(static_cast(value)) || !(value & kImm12Mask)) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { ++ if (is_int12(static_cast(j.immediate()))) { ++ addi_d(rd, zero_reg, j.immediate()); ++ } else if (is_uint12(static_cast(j.immediate()))) { ++ ori(rd, zero_reg, j.immediate() & kImm12Mask); ++ } else { ++ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); ++ if (j.immediate() & kImm12Mask) { ++ ori(rd, rd, j.immediate() & kImm12Mask); ++ } ++ } ++} ++ ++int TurboAssembler::InstrCountForLi64Bit(int64_t value) { ++ if (is_int32(value)) { ++ return InstrCountForLiLower32Bit(value); ++ } else if (is_int52(value)) { ++ return InstrCountForLiLower32Bit(value) + 1; ++ } else if ((value & 0xffffffffL) == 0) { ++ // 32 LSBs (Least Significant Bits) all set to zero. ++ uint8_t tzc = base::bits::CountTrailingZeros32(value >> 32); ++ uint8_t lzc = base::bits::CountLeadingZeros32(value >> 32); ++ if (tzc >= 20) { ++ return 1; ++ } else if (tzc + lzc > 12) { ++ return 2; ++ } else { ++ return 3; ++ } ++ } else { ++ int64_t imm21 = (value >> 31) & 0x1fffffL; ++ if (imm21 != 0x1fffffL && imm21 != 0) { ++ return InstrCountForLiLower32Bit(value) + 2; ++ } else { ++ return InstrCountForLiLower32Bit(value) + 1; ++ } ++ } ++ UNREACHABLE(); ++ return INT_MAX; ++} ++ ++// All changes to if...else conditions here must be added to ++// InstrCountForLi64Bit as well. ++void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { ++ DCHECK(!j.is_reg()); ++ DCHECK(!MustUseReg(j.rmode())); ++ DCHECK(mode == OPTIMIZE_SIZE); ++ int64_t imm = j.immediate(); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Normal load of an immediate value which does not need Relocation Info. ++ if (is_int32(imm)) { ++ LiLower32BitHelper(rd, j); ++ } else if (is_int52(imm)) { ++ LiLower32BitHelper(rd, j); ++ lu32i_d(rd, imm >> 32 & 0xfffff); ++ } else if ((imm & 0xffffffffL) == 0) { ++ // 32 LSBs (Least Significant Bits) all set to zero. ++ uint8_t tzc = base::bits::CountTrailingZeros32(imm >> 32); ++ uint8_t lzc = base::bits::CountLeadingZeros32(imm >> 32); ++ if (tzc >= 20) { ++ lu52i_d(rd, zero_reg, imm >> 52 & kImm12Mask); ++ } else if (tzc + lzc > 12) { ++ int32_t mask = (1 << (32 - tzc)) - 1; ++ lu12i_w(rd, imm >> (tzc + 32) & mask); ++ slli_d(rd, rd, tzc + 20); ++ } else { ++ xor_(rd, rd, rd); ++ lu32i_d(rd, imm >> 32 & 0xfffff); ++ lu52i_d(rd, rd, imm >> 52 & kImm12Mask); ++ } ++ } else { ++ int64_t imm21 = (imm >> 31) & 0x1fffffL; ++ LiLower32BitHelper(rd, j); ++ if (imm21 != 0x1fffffL && imm21 != 0) lu32i_d(rd, imm >> 32 & 0xfffff); ++ lu52i_d(rd, rd, imm >> 52 & kImm12Mask); ++ } ++} ++ ++void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { ++ DCHECK(!j.is_reg()); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { ++ li_optimized(rd, j, mode); ++ } else if (MustUseReg(j.rmode())) { ++ int64_t immediate; ++ if (j.IsHeapObjectRequest()) { ++ RequestHeapObject(j.heap_object_request()); ++ immediate = 0; ++ } else { ++ immediate = j.immediate(); ++ } ++ ++ RecordRelocInfo(j.rmode(), immediate); ++ lu12i_w(rd, immediate >> 12 & 0xfffff); ++ ori(rd, rd, immediate & kImm12Mask); ++ lu32i_d(rd, immediate >> 32 & 0xfffff); ++ } else if (mode == ADDRESS_LOAD) { ++ // We always need the same number of instructions as we may need to patch ++ // this code to load another value which may need all 3 instructions. ++ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); ++ ori(rd, rd, j.immediate() & kImm12Mask); ++ lu32i_d(rd, j.immediate() >> 32 & 0xfffff); ++ } else { // mode == CONSTANT_SIZE - always emit the same instruction ++ // sequence. ++ lu12i_w(rd, j.immediate() >> 12 & 0xfffff); ++ ori(rd, rd, j.immediate() & kImm12Mask); ++ lu32i_d(rd, j.immediate() >> 32 & 0xfffff); ++ lu52i_d(rd, rd, j.immediate() >> 52 & kImm12Mask); ++ } ++} ++ ++void TurboAssembler::MultiPush(RegList regs) { ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPush(RegList regs1, RegList regs2) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs1 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs2 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ DCHECK_EQ(regs1 & regs3, 0); ++ DCHECK_EQ(regs2 & regs3, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs1 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs2 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs3 & (1 << i)) != 0) { ++ stack_offset -= kPointerSize; ++ St_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPop(RegList regs) { ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPop(RegList regs1, RegList regs2) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs2 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs1 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { ++ DCHECK_EQ(regs1 & regs2, 0); ++ DCHECK_EQ(regs1 & regs3, 0); ++ DCHECK_EQ(regs2 & regs3, 0); ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs3 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs2 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs1 & (1 << i)) != 0) { ++ Ld_d(ToRegister(i), MemOperand(sp, stack_offset)); ++ stack_offset += kPointerSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::MultiPushFPU(RegList regs) { ++ int16_t num_to_push = base::bits::CountPopulation(regs); ++ int16_t stack_offset = num_to_push * kDoubleSize; ++ ++ Sub_d(sp, sp, Operand(stack_offset)); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ if ((regs & (1 << i)) != 0) { ++ stack_offset -= kDoubleSize; ++ Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset)); ++ } ++ } ++} ++ ++void TurboAssembler::MultiPopFPU(RegList regs) { ++ int16_t stack_offset = 0; ++ ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ if ((regs & (1 << i)) != 0) { ++ Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset)); ++ stack_offset += kDoubleSize; ++ } ++ } ++ addi_d(sp, sp, stack_offset); ++} ++ ++void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, ++ uint16_t lsbw) { ++ DCHECK_LT(lsbw, msbw); ++ DCHECK_LT(lsbw, 32); ++ DCHECK_LT(msbw, 32); ++ bstrpick_w(rk, rj, msbw, lsbw); ++} ++ ++void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, ++ uint16_t lsbw) { ++ DCHECK_LT(lsbw, msbw); ++ DCHECK_LT(lsbw, 64); ++ DCHECK_LT(msbw, 64); ++ bstrpick_d(rk, rj, msbw, lsbw); ++} ++ ++void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); } ++ ++void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); } ++ ++void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) { ++ // Move the data from fs to t8. ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ movfr2gr_s(t8, fj); ++ Ffint_d_uw(fd, t8); ++} ++ ++void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ ++ // Convert rj to a FP value in fd. ++ DCHECK(rj != t7); ++ ++ // Zero extend int32 in rj. ++ Bstrpick_d(t7, rj, 31, 0); ++ movgr2fr_d(fd, t7); ++ ffint_d_l(fd, fd); ++} ++ ++void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t8. ++ movfr2gr_d(t8, fj); ++ Ffint_d_ul(fd, t8); ++} ++ ++void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rj to a FP value in fd. ++ ++ DCHECK(rj != t7); ++ ++ Label msb_clear, conversion_done; ++ ++ Branch(&msb_clear, ge, rj, Operand(zero_reg)); ++ ++ // Rj >= 2^63 ++ andi(t7, rj, 1); ++ srli_d(rj, rj, 1); ++ or_(t7, t7, rj); ++ movgr2fr_d(fd, t7); ++ ffint_d_l(fd, fd); ++ fadd_d(fd, fd, fd); ++ Branch(&conversion_done); ++ ++ bind(&msb_clear); ++ // Rs < 2^63, we can do simple conversion. ++ movgr2fr_d(fd, rj); ++ ffint_d_l(fd, fd); ++ ++ bind(&conversion_done); ++} ++ ++void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t8. ++ movfr2gr_d(t8, fj); ++ Ffint_s_uw(fd, t8); ++} ++ ++void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rj to a FP value in fd. ++ DCHECK(rj != t7); ++ ++ // Zero extend int32 in rj. ++ bstrpick_d(t7, rj, 31, 0); ++ movgr2fr_d(fd, t7); ++ ffint_s_l(fd, fd); ++} ++ ++void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Move the data from fs to t8. ++ movfr2gr_d(t8, fj); ++ Ffint_s_ul(fd, t8); ++} ++ ++void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Convert rj to a FP value in fd. ++ ++ DCHECK(rj != t7); ++ ++ Label positive, conversion_done; ++ ++ Branch(&positive, ge, rj, Operand(zero_reg)); ++ ++ // Rs >= 2^31. ++ andi(t7, rj, 1); ++ srli_d(rj, rj, 1); ++ or_(t7, t7, rj); ++ movgr2fr_d(fd, t7); ++ ffint_s_l(fd, fd); ++ fadd_s(fd, fd, fd); ++ Branch(&conversion_done); ++ ++ bind(&positive); ++ // Rs < 2^31, we can do simple conversion. ++ movgr2fr_d(fd, rj); ++ ffint_s_l(fd, fd); ++ ++ bind(&conversion_done); ++} ++ ++void MacroAssembler::Ftintrne_l_d(FPURegister fd, FPURegister fj) { ++ ftintrne_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrm_l_d(FPURegister fd, FPURegister fj) { ++ ftintrm_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrp_l_d(FPURegister fd, FPURegister fj) { ++ ftintrp_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrz_l_d(FPURegister fd, FPURegister fj) { ++ ftintrz_l_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj, ++ FPURegister scratch) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Load to GPR. ++ movfr2gr_d(t8, fj); ++ // Reset sign bit. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x7FFFFFFFFFFFFFFFl); ++ and_(t8, t8, scratch1); ++ } ++ movgr2fr_d(scratch, t8); ++ Ftintrz_l_d(fd, scratch); ++} ++ ++void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj, ++ FPURegister scratch) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_uw_d(t8, fj, scratch); ++ movgr2fr_w(fd, t8); ++} ++ ++void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj, ++ FPURegister scratch) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_uw_s(t8, fj, scratch); ++ movgr2fr_w(fd, t8); ++} ++ ++void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_ul_d(t8, fj, scratch, result); ++ movgr2fr_d(fd, t8); ++} ++ ++void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Ftintrz_ul_s(t8, fj, scratch, result); ++ movgr2fr_d(fd, t8); ++} ++ ++void MacroAssembler::Ftintrz_w_d(FPURegister fd, FPURegister fj) { ++ ftintrz_w_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrne_w_d(FPURegister fd, FPURegister fj) { ++ ftintrne_w_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrm_w_d(FPURegister fd, FPURegister fj) { ++ ftintrm_w_d(fd, fj); ++} ++ ++void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) { ++ ftintrp_w_d(fd, fj); ++} ++ ++void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, ++ FPURegister scratch) { ++ DCHECK(fj != scratch); ++ DCHECK(rd != t7); ++ ++ { ++ // Load 2^31 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x41E00000); ++ movgr2fr_w(scratch, zero_reg); ++ movgr2frh_w(scratch, scratch1); ++ } ++ // Test if scratch > fd. ++ // If fd < 2^31 we can convert it normally. ++ Label simple_convert; ++ CompareF64(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^31 from fd, then trunc it to rs ++ // and add 2^31 to rj. ++ fsub_d(scratch, fj, scratch); ++ ftintrz_w_d(scratch, scratch); ++ movfr2gr_s(rd, scratch); ++ Or(rd, rd, 1 << 31); ++ ++ Label done; ++ Branch(&done); ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_w_d(scratch, fj); ++ movfr2gr_s(rd, scratch); ++ ++ bind(&done); ++} ++ ++void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, ++ FPURegister scratch) { ++ DCHECK(fj != scratch); ++ DCHECK(rd != t7); ++ { ++ // Load 2^31 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x4F000000); ++ movgr2fr_w(scratch, scratch1); ++ } ++ // Test if scratch > fs. ++ // If fs < 2^31 we can convert it normally. ++ Label simple_convert; ++ CompareF32(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^31 from fs, then trunc it to rd ++ // and add 2^31 to rd. ++ fsub_s(scratch, fj, scratch); ++ ftintrz_w_s(scratch, scratch); ++ movfr2gr_s(rd, scratch); ++ Or(rd, rd, 1 << 31); ++ ++ Label done; ++ Branch(&done); ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_w_s(scratch, fj); ++ movfr2gr_s(rd, scratch); ++ ++ bind(&done); ++} ++ ++void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ DCHECK(fj != scratch); ++ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); ++ ++ Label simple_convert, done, fail; ++ if (result.is_valid()) { ++ mov(result, zero_reg); ++ Move(scratch, -1.0); ++ // If fd =< -1 or unordered, then the conversion fails. ++ CompareF64(fj, scratch, CLE); ++ BranchTrueShortF(&fail); ++ CompareIsNanF64(fj, scratch); ++ BranchTrueShortF(&fail); ++ } ++ ++ // Load 2^63 into scratch as its double representation. ++ li(t7, 0x43E0000000000000); ++ movgr2fr_d(scratch, t7); ++ ++ // Test if scratch > fs. ++ // If fs < 2^63 we can convert it normally. ++ CompareF64(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^63 from fs, then trunc it to rd ++ // and add 2^63 to rd. ++ fsub_d(scratch, fj, scratch); ++ ftintrz_l_d(scratch, scratch); ++ movfr2gr_d(rd, scratch); ++ Or(rd, rd, Operand(1UL << 63)); ++ Branch(&done); ++ ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_l_d(scratch, fj); ++ movfr2gr_d(rd, scratch); ++ ++ bind(&done); ++ if (result.is_valid()) { ++ // Conversion is failed if the result is negative. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ addi_d(scratch1, zero_reg, -1); ++ srli_d(scratch1, scratch1, 1); // Load 2^62. ++ movfr2gr_d(result, scratch); ++ xor_(result, result, scratch1); ++ } ++ Slt(result, zero_reg, result); ++ } ++ ++ bind(&fail); ++} ++ ++void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, ++ FPURegister scratch, Register result) { ++ DCHECK(fj != scratch); ++ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); ++ ++ Label simple_convert, done, fail; ++ if (result.is_valid()) { ++ mov(result, zero_reg); ++ Move(scratch, -1.0f); ++ // If fd =< -1 or unordered, then the conversion fails. ++ CompareF32(fj, scratch, CLE); ++ BranchTrueShortF(&fail); ++ CompareIsNanF32(fj, scratch); ++ BranchTrueShortF(&fail); ++ } ++ ++ { ++ // Load 2^63 into scratch as its float representation. ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ li(scratch1, 0x5F000000); ++ movgr2fr_w(scratch, scratch1); ++ } ++ ++ // Test if scratch > fs. ++ // If fs < 2^63 we can convert it normally. ++ CompareF32(fj, scratch, CLT); ++ BranchTrueShortF(&simple_convert); ++ ++ // First we subtract 2^63 from fs, then trunc it to rd ++ // and add 2^63 to rd. ++ fsub_s(scratch, fj, scratch); ++ ftintrz_l_s(scratch, scratch); ++ movfr2gr_d(rd, scratch); ++ Or(rd, rd, Operand(1UL << 63)); ++ Branch(&done); ++ ++ // Simple conversion. ++ bind(&simple_convert); ++ ftintrz_l_s(scratch, fj); ++ movfr2gr_d(rd, scratch); ++ ++ bind(&done); ++ if (result.is_valid()) { ++ // Conversion is failed if the result is negative or unordered. ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch1 = temps.Acquire(); ++ addi_d(scratch1, zero_reg, -1); ++ srli_d(scratch1, scratch1, 1); // Load 2^62. ++ movfr2gr_d(result, scratch); ++ xor_(result, result, scratch1); ++ } ++ Slt(result, zero_reg, result); ++ } ++ ++ bind(&fail); ++} ++ ++void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, ++ FPURoundingMode mode) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t8; ++ movfcsr2gr(scratch); ++ li(t7, Operand(mode)); ++ movgr2fcsr(t7); ++ frint_d(dst, src); ++ movgr2fcsr(scratch); ++} ++ ++void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_floor); ++} ++ ++void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_ceil); ++} ++ ++void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_trunc); ++} ++ ++void TurboAssembler::Round_d(FPURegister dst, FPURegister src) { ++ RoundDouble(dst, src, mode_round); ++} ++ ++void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, ++ FPURoundingMode mode) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = t8; ++ movfcsr2gr(scratch); ++ li(t7, Operand(mode)); ++ movgr2fcsr(t7); ++ frint_s(dst, src); ++ movgr2fcsr(scratch); ++} ++ ++void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_floor); ++} ++ ++void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_ceil); ++} ++ ++void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_trunc); ++} ++ ++void TurboAssembler::Round_s(FPURegister dst, FPURegister src) { ++ RoundFloat(dst, src, mode_round); ++} ++ ++void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, ++ FPUCondition cc, CFRegister cd, bool f32) { ++ if (f32) { ++ fcmp_cond_s(cc, cmp1, cmp2, cd); ++ } else { ++ fcmp_cond_d(cc, cmp1, cmp2, cd); ++ } ++} ++ ++void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2, ++ CFRegister cd, bool f32) { ++ CompareF(cmp1, cmp2, CUN, cd, f32); ++} ++ ++void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) { ++ bcnez(cj, target); ++} ++ ++void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) { ++ bceqz(cj, target); ++} ++ ++void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) { ++ // TODO can be optimzed ++ bool long_branch = target->is_bound() ++ ? !is_near(target, OffsetSize::kOffset21) ++ : is_trampoline_emitted(); ++ if (long_branch) { ++ Label skip; ++ BranchFalseShortF(&skip, cj); ++ Branch(target); ++ bind(&skip); ++ } else { ++ BranchTrueShortF(target, cj); ++ } ++} ++ ++void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) { ++ bool long_branch = target->is_bound() ++ ? !is_near(target, OffsetSize::kOffset21) ++ : is_trampoline_emitted(); ++ if (long_branch) { ++ Label skip; ++ BranchTrueShortF(&skip, cj); ++ Branch(target); ++ bind(&skip); ++ } else { ++ BranchFalseShortF(target, cj); ++ } ++} ++ ++void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ DCHECK(src_low != scratch); ++ movfrh2gr_s(scratch, dst); ++ movgr2fr_w(dst, src_low); ++ movgr2frh_w(dst, scratch); ++} ++ ++void TurboAssembler::Move(FPURegister dst, uint32_t src) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(static_cast(src))); ++ movgr2fr_w(dst, scratch); ++} ++ ++void TurboAssembler::Move(FPURegister dst, uint64_t src) { ++ // Handle special values first. ++ if (src == bit_cast(0.0) && has_double_zero_reg_set_) { ++ fmov_d(dst, kDoubleRegZero); ++ } else if (src == bit_cast(-0.0) && has_double_zero_reg_set_) { ++ Neg_d(dst, kDoubleRegZero); ++ } else { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(static_cast(src))); ++ movgr2fr_d(dst, scratch); ++ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true; ++ } ++} ++ ++void TurboAssembler::Movz(Register rd, Register rj, Register rk) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ maskeqz(scratch, rj, rk); ++ masknez(rd, rd, rk); ++ or_(rd, rd, scratch); ++} ++ ++void TurboAssembler::Movn(Register rd, Register rj, Register rk) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ masknez(scratch, rj, rk); ++ maskeqz(rd, rd, rk); ++ or_(rd, rd, scratch); ++} ++ ++void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj, ++ const Operand& rk, Condition cond) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ switch (cond) { ++ case cc_always: ++ mov(rd, zero_reg); ++ break; ++ case eq: ++ if (rj == zero_reg) { ++ if (rk.is_reg()) { ++ LoadZeroIfConditionZero(rd, rk.rm()); ++ } else { ++ if (rk.immediate() == 0) { ++ mov(rd, zero_reg); ++ } else { ++ // nop(); ++ } ++ } ++ } else if (IsZero(rk)) { ++ LoadZeroIfConditionZero(rd, rj); ++ } else { ++ Sub_d(t7, rj, rk); ++ LoadZeroIfConditionZero(rd, t7); ++ } ++ break; ++ case ne: ++ if (rj == zero_reg) { ++ if (rk.is_reg()) { ++ LoadZeroIfConditionNotZero(rd, rk.rm()); ++ } else { ++ if (rk.immediate() != 0) { ++ mov(rd, zero_reg); ++ } else { ++ // nop(); ++ } ++ } ++ } else if (IsZero(rk)) { ++ LoadZeroIfConditionNotZero(rd, rj); ++ } else { ++ Sub_d(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ } ++ break; ++ ++ // Signed comparison. ++ case greater: ++ Sgt(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ break; ++ case greater_equal: ++ Sge(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj >= rk ++ break; ++ case less: ++ Slt(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj < rk ++ break; ++ case less_equal: ++ Sle(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj <= rk ++ break; ++ ++ // Unsigned comparison. ++ case Ugreater: ++ Sgtu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj > rk ++ break; ++ ++ case Ugreater_equal: ++ Sgeu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj >= rk ++ break; ++ case Uless: ++ Sltu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj < rk ++ break; ++ case Uless_equal: ++ Sleu(t7, rj, rk); ++ LoadZeroIfConditionNotZero(rd, t7); ++ // rj <= rk ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, ++ Register condition) { ++ maskeqz(dest, dest, condition); ++} ++ ++void TurboAssembler::LoadZeroIfConditionZero(Register dest, ++ Register condition) { ++ masknez(dest, dest, condition); ++} ++ ++void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ movcf2gr(scratch, cc); ++ LoadZeroIfConditionNotZero(dest, scratch); ++} ++ ++void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ movcf2gr(scratch, cc); ++ LoadZeroIfConditionZero(dest, scratch); ++} ++ ++void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); } ++ ++void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); } ++ ++void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); } ++ ++void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); } ++ ++// TODO: Optimize like arm64, use simd instruction ++void TurboAssembler::Popcnt_w(Register rd, Register rj) { ++ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel ++ // ++ // A generalization of the best bit counting method to integers of ++ // bit-widths up to 128 (parameterized by type T) is this: ++ // ++ // v = v - ((v >> 1) & (T)~(T)0/3); // temp ++ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp ++ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp ++ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count ++ // ++ // For comparison, for 32-bit quantities, this algorithm can be executed ++ // using 20 MIPS instructions (the calls to LoadConst32() generate two ++ // machine instructions each for the values being used in this algorithm). ++ // A(n unrolled) loop-based algorithm requires 25 instructions. ++ // ++ // For a 64-bit operand this can be performed in 24 instructions compared ++ // to a(n unrolled) loop based algorithm which requires 38 instructions. ++ // ++ // There are algorithms which are faster in the cases where very few ++ // bits are set but the algorithm here attempts to minimize the total ++ // number of instructions executed even when a large number of bits ++ // are set. ++ int32_t B0 = 0x55555555; // (T)~(T)0/3 ++ int32_t B1 = 0x33333333; // (T)~(T)0/15*3 ++ int32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15 ++ int32_t value = 0x01010101; // (T)~(T)0/255 ++ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE ++ ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.Acquire(); ++ Register scratch2 = t8; ++ srli_w(scratch, rj, 1); ++ li(scratch2, B0); ++ And(scratch, scratch, scratch2); ++ Sub_w(scratch, rj, scratch); ++ li(scratch2, B1); ++ And(rd, scratch, scratch2); ++ srli_w(scratch, scratch, 2); ++ And(scratch, scratch, scratch2); ++ Add_w(scratch, rd, scratch); ++ srli_w(rd, scratch, 4); ++ Add_w(rd, rd, scratch); ++ li(scratch2, B2); ++ And(rd, rd, scratch2); ++ li(scratch, value); ++ Mul_w(rd, rd, scratch); ++ srli_w(rd, rd, shift); ++} ++ ++void TurboAssembler::Popcnt_d(Register rd, Register rj) { ++ int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 ++ int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 ++ int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15 ++ int64_t value = 0x0101010101010101l; // (T)~(T)0/255 ++ uint32_t shift = 56; // (sizeof(T) - 1) * BITS_PER_BYTE ++ ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.Acquire(); ++ Register scratch2 = t8; ++ srli_d(scratch, rj, 1); ++ li(scratch2, B0); ++ And(scratch, scratch, scratch2); ++ Sub_d(scratch, rj, scratch); ++ li(scratch2, B1); ++ And(rd, scratch, scratch2); ++ srli_d(scratch, scratch, 2); ++ And(scratch, scratch, scratch2); ++ Add_d(scratch, rd, scratch); ++ srli_d(rd, scratch, 4); ++ Add_d(rd, rd, scratch); ++ li(scratch2, B2); ++ And(rd, rd, scratch2); ++ li(scratch, value); ++ Mul_d(rd, rd, scratch); ++ srli_d(rd, rd, shift); ++} ++ ++void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, ++ int size, bool sign_extend) { ++ sra_d(dest, source, pos); ++ bstrpick_d(dest, dest, size - 1, 0); ++ if (sign_extend) { ++ switch (size) { ++ case 8: ++ ext_w_b(dest, dest); ++ break; ++ case 16: ++ ext_w_h(dest, dest); ++ break; ++ case 32: ++ // sign-extend word ++ slli_w(dest, dest, 0); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++void TurboAssembler::InsertBits(Register dest, Register source, Register pos, ++ int size) { ++ Rotr_d(dest, dest, pos); ++ bstrins_d(dest, source, size - 1, 0); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Sub_d(scratch, zero_reg, pos); ++ Rotr_d(dest, dest, scratch); ++ } ++} ++ ++void MacroAssembler::EmitFPUTruncate( ++ FPURoundingMode rounding_mode, Register result, DoubleRegister double_input, ++ Register scratch, DoubleRegister double_scratch, Register except_flag, ++ CheckForInexactConversion check_inexact) { ++ break_(3); ++} ++ ++void TurboAssembler::TryInlineTruncateDoubleToI(Register result, ++ DoubleRegister double_input, ++ Label* done) { ++ DoubleRegister single_scratch = kScratchDoubleReg.low(); ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.Acquire(); ++ Register scratch2 = t7; ++ ++ // Clear cumulative exception flags and save the FCSR. ++ /* movfcsr2gr(scratch2, FCSR); ++ movgr2fcsr(FCSR, zero_reg); ++ // Try a conversion to a signed integer. ++ ftintrz_w_d(single_scratch, double_input); ++ movfr2gr_w(result, single_scratch); ++ // Retrieve and restore the FCSR. ++ movfcsr2gr(scratch, FCSR); ++ movgr2fcsr(FCSR, scratch2); ++ // Check for overflow and NaNs. ++ And(scratch, scratch, ++ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | ++ kFCSRInvalidOpFlagMask); ++ // If we had no exceptions we are done. ++ Branch(done, eq, scratch, Operand(zero_reg));*/ ++ ++ CompareIsNanF64(double_input, double_input); ++ Move(result, zero_reg); ++ bcnez(FCC0, done); ++ ftintrz_l_d(single_scratch, double_input); ++ movfr2gr_d(scratch2, single_scratch); ++ li(scratch, 1L << 63); ++ Xor(scratch, scratch, scratch2); ++ rotri_d(scratch2, scratch, 1); ++ movfr2gr_s(result, single_scratch); ++ Branch(done, ne, scratch, Operand(scratch2)); ++} ++ ++void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, ++ Register result, ++ DoubleRegister double_input, ++ StubCallMode stub_mode) { ++ Label done; ++ ++ TryInlineTruncateDoubleToI(result, double_input, &done); ++ ++ // If we fell through then inline version didn't succeed - call stub instead. ++ Sub_d(sp, sp, ++ Operand(kDoubleSize + kSystemPointerSize)); // Put input on stack. ++ St_d(ra, MemOperand(sp, kSystemPointerSize)); ++ Fst_d(double_input, MemOperand(sp, 0)); ++ ++ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { ++ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); ++ } else { ++ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); ++ } ++ ++ Pop(ra, result); ++ bind(&done); ++} ++ ++// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. ++#define BRANCH_ARGS_CHECK(cond, rj, rk) \ ++ DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \ ++ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg))) ++ ++void TurboAssembler::Branch(Label* L, bool need_link) { ++ int offset = GetOffset(L, OffsetSize::kOffset26); ++ if (need_link) { ++ bl(offset); ++ } else { ++ b(offset); ++ } ++} ++ ++void TurboAssembler::Branch(Label* L, Condition cond, Register rj, ++ const Operand& rk, bool need_link) { ++ if (L->is_bound()) { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ if (!BranchShortOrFallback(L, cond, rj, rk, need_link)) { ++ if (cond != cc_always) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rj, rk, need_link); ++ Branch(L, need_link); ++ bind(&skip); ++ } else { ++ Branch(L); ++ } ++ } ++ } else { ++ if (is_trampoline_emitted()) { ++ if (cond != cc_always) { ++ Label skip; ++ Condition neg_cond = NegateCondition(cond); ++ BranchShort(&skip, neg_cond, rj, rk, need_link); ++ Branch(L, need_link); ++ bind(&skip); ++ } else { ++ Branch(L); ++ } ++ } else { ++ BranchShort(L, cond, rj, rk, need_link); ++ } ++ } ++} ++ ++void TurboAssembler::Branch(Label* L, Condition cond, Register rj, ++ RootIndex index) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(L, cond, rj, Operand(scratch)); ++} ++ ++int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) { ++ return branch_offset_helper(L, bits) >> 2; ++} ++ ++Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk, ++ Register scratch) { ++ Register r2 = no_reg; ++ if (rk.is_reg()) { ++ r2 = rk.rm(); ++ } else { ++ r2 = scratch; ++ li(r2, rk); ++ } ++ ++ return r2; ++} ++ ++bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond, ++ Register rj, const Operand& rk, ++ bool need_link) { ++ UseScratchRegisterScope temps(this); ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; ++ ++ // Be careful to always use shifted_branch_offset only just before the ++ // branch instruction, as the location will be remember for patching the ++ // target. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int offset = 0; ++ switch (cond) { ++ case cc_always: ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ if (need_link) { ++ bl(offset); ++ } else { ++ b(offset); ++ } ++ break; ++ case eq: ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // beq is used here to make the code patchable. Otherwise b should ++ // be used which has no condition field so is not patchable. ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ beq(rj, rj, offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset21); ++ beqz(rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ // We don't want any other register but scratch clobbered. ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ beq(rj, sc, offset); ++ } ++ break; ++ case ne: ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ // bne is used here to make the code patchable. Otherwise we ++ // should not generate any instruction. ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bne(rj, rj, offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset21); ++ bnez(rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ // We don't want any other register but scratch clobbered. ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bne(rj, sc, offset); ++ } ++ break; ++ ++ // Signed comparison. ++ case greater: ++ // rj > rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(zero_reg, rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(sc, rj, offset); ++ } ++ break; ++ case greater_equal: ++ // rj >= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(rj, zero_reg, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(rj, sc, offset); ++ } ++ break; ++ case less: ++ // rj < rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(rj, zero_reg, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ blt(rj, sc, offset); ++ } ++ break; ++ case less_equal: ++ // rj <= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(zero_reg, rj, offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bge(sc, rj, offset); ++ } ++ break; ++ ++ // Unsigned comparison. ++ case Ugreater: ++ // rj > rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (rj == zero_reg) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bltu(sc, rj, offset); ++ } ++ break; ++ case Ugreater_equal: ++ // rj >= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (rj == zero_reg) { ++ // No code needs to be emitted. ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bgeu(rj, sc, offset); ++ } ++ break; ++ case Uless: ++ // rj < rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ // No code needs to be emitted. ++ } else if (IsZero(rk)) { ++ // No code needs to be emitted. ++ } else if (rj == zero_reg) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bltu(rj, sc, offset); ++ } ++ break; ++ case Uless_equal: ++ // rj <= rk ++ if (rk.is_reg() && rj.code() == rk.rm().code()) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (rj == zero_reg) { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false; ++ if (need_link) pcaddi(ra, 2); ++ offset = GetOffset(L, OffsetSize::kOffset26); ++ b(offset); ++ } else if (IsZero(rk)) { ++ // No code needs to be emitted. ++ } else { ++ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false; ++ if (need_link) pcaddi(ra, 2); ++ Register sc = GetRkAsRegisterHelper(rk, scratch); ++ DCHECK(rj != sc); ++ offset = GetOffset(L, OffsetSize::kOffset16); ++ bgeu(sc, rj, offset); ++ } ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return true; ++} ++ ++void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj, ++ const Operand& rk, bool need_link) { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ bool result = BranchShortOrFallback(L, cond, rj, rk, need_link); ++ DCHECK(result); ++ USE(result); ++} ++ ++void TurboAssembler::LoadFromConstantsTable(Register destination, ++ int constant_index) { ++ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); ++ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); ++ Ld_d(destination, ++ FieldMemOperand(destination, FixedArray::kHeaderSize + ++ constant_index * kPointerSize)); ++} ++ ++void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { ++ Ld_d(destination, MemOperand(kRootRegister, offset)); ++} ++ ++void TurboAssembler::LoadRootRegisterOffset(Register destination, ++ intptr_t offset) { ++ if (offset == 0) { ++ Move(destination, kRootRegister); ++ } else { ++ Add_d(destination, kRootRegister, Operand(offset)); ++ } ++} ++ ++void TurboAssembler::Jump(Register target, Condition cond, Register rj, ++ const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (cond == cc_always) { ++ jirl(zero_reg, target, 0); ++ } else { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ Label skip; ++ Branch(&skip, NegateCondition(cond), rj, rk); ++ jirl(zero_reg, target, 0); ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, ++ Condition cond, Register rj, const Operand& rk) { ++ Label skip; ++ if (cond != cc_always) { ++ Branch(&skip, NegateCondition(cond), rj, rk); ++ } ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t7, Operand(target, rmode)); ++ jirl(zero_reg, t7, 0); ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, ++ Register rj, const Operand& rk) { ++ DCHECK(!RelocInfo::IsCodeTarget(rmode)); ++ Jump(static_cast(target), rmode, cond, rj, rk); ++} ++ ++void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, ++ Condition cond, Register rj, const Operand& rk) { ++ DCHECK(RelocInfo::IsCodeTarget(rmode)); ++ ++ int builtin_index = Builtins::kNoBuiltinId; ++ bool target_is_isolate_independent_builtin = ++ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && ++ Builtins::IsIsolateIndependent(builtin_index); ++ ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (root_array_available_ && options().isolate_independent_code) { ++ int offset = code->builtin_index() * kSystemPointerSize + ++ IsolateData::builtin_entry_table_offset(); ++ Ld_d(t7, MemOperand(kRootRegister, offset)); ++ Jump(t7, cond, rj, rk); ++ return; ++ } else if (options().inline_offheap_trampolines && ++ target_is_isolate_independent_builtin) { ++ // Inline the trampoline. ++ RecordCommentForOffHeapTrampoline(builtin_index); ++ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ Address entry = d.InstructionStartOfBuiltin(builtin_index); ++ li(t7, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Jump(t7, cond, rj, rk); ++ return; ++ } ++ ++ Jump(static_cast(code.address()), rmode, cond, rj, rk); ++} ++ ++void TurboAssembler::Jump(const ExternalReference& reference) { ++ li(t7, reference); ++ Jump(t7); ++} ++ ++// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8]. ++void TurboAssembler::Call(Register target, Condition cond, Register rj, ++ const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (cond == cc_always) { ++ jirl(ra, target, 0); ++ } else { ++ BRANCH_ARGS_CHECK(cond, rj, rk); ++ Label skip; ++ Branch(&skip, NegateCondition(cond), rj, rk); ++ jirl(ra, target, 0); ++ bind(&skip); ++ } ++} ++ ++void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, ++ unsigned higher_limit, ++ Label* on_in_range) { ++ if (lower_limit != 0) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Sub_d(scratch, value, Operand(lower_limit)); ++ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit)); ++ } else { ++ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit)); ++ } ++} ++ ++void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, ++ Register rj, const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t7, Operand(static_cast(target), rmode), ADDRESS_LOAD); ++ Call(t7, cond, rj, rk); ++} ++ ++void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, ++ Condition cond, Register rj, const Operand& rk) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int builtin_index = Builtins::kNoBuiltinId; ++ bool target_is_isolate_independent_builtin = ++ isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && ++ Builtins::IsIsolateIndependent(builtin_index); ++ ++ if (root_array_available_ && options().isolate_independent_code) { ++ int offset = code->builtin_index() * kSystemPointerSize + ++ IsolateData::builtin_entry_table_offset(); ++ LoadRootRelative(t7, offset); ++ Call(t7, cond, rj, rk); ++ return; ++ } else if (options().inline_offheap_trampolines && ++ target_is_isolate_independent_builtin) { ++ // Inline the trampoline. ++ RecordCommentForOffHeapTrampoline(builtin_index); ++ CHECK_NE(builtin_index, Builtins::kNoBuiltinId); ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ Address entry = d.InstructionStartOfBuiltin(builtin_index); ++ li(t7, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Call(t7, cond, rj, rk); ++ return; ++ } ++ ++ DCHECK(RelocInfo::IsCodeTarget(rmode)); ++ DCHECK(code->IsExecutable()); ++ Call(code.address(), rmode, cond, rj, rk); ++} ++ ++void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ++ STATIC_ASSERT(kSystemPointerSize == 8); ++ STATIC_ASSERT(kSmiTagSize == 1); ++ STATIC_ASSERT(kSmiTag == 0); ++ ++ // The builtin_index register contains the builtin index as a Smi. ++ SmiUntag(builtin_index, builtin_index); ++ Alsl_d(builtin_index, builtin_index, kRootRegister, kSystemPointerSizeLog2, ++ t7); ++ Ld_d(builtin_index, ++ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); ++} ++ ++void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { ++ LoadEntryFromBuiltinIndex(builtin_index); ++ Call(builtin_index); ++} ++ ++void TurboAssembler::PatchAndJump(Address target) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ pcaddi(scratch, 4); ++ Ld_d(t7, MemOperand(scratch, 0)); ++ jirl(zero_reg, t7, 0); ++ nop(); ++ DCHECK_EQ(reinterpret_cast(pc_) % 8, 0); ++ *reinterpret_cast(pc_) = target; // pc_ should be align. ++ pc_ += sizeof(uint64_t); ++} ++ ++void TurboAssembler::StoreReturnAddressAndCall(Register target) { ++ // This generates the final instruction sequence for calls to C functions ++ // once an exit frame has been constructed. ++ // ++ // Note that this assumes the caller code (i.e. the Code object currently ++ // being generated) is immovable or that the callee function cannot trigger ++ // GC, since the callee function will return to it. ++ ++ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); ++ static constexpr int kNumInstructionsToJump = 2; ++ Label find_ra; ++ // Adjust the value in ra to point to the correct return location, 2nd ++ // instruction past the real call into C code (the jirl)), and push it. ++ // This is the return address of the exit frame. ++ pcaddi(ra, kNumInstructionsToJump + 1); ++ bind(&find_ra); ++ ++ // This spot was reserved in EnterExitFrame. ++ St_d(ra, MemOperand(sp, 0)); ++ // Stack is still aligned. ++ ++ // TODO can be jirl target? a0 -- a7? ++ jirl(zero_reg, target, 0); ++ // Make sure the stored 'ra' points to this position. ++ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); ++} ++ ++void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) { ++ Jump(ra, cond, rj, rk); ++} ++ ++void TurboAssembler::DropAndRet(int drop) { ++ DCHECK(is_int16(drop * kPointerSize)); ++ addi_d(sp, sp, drop * kPointerSize); ++ Ret(); ++} ++ ++void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, ++ const Operand& r2) { ++ // Both Drop and Ret need to be conditional. ++ Label skip; ++ if (cond != cc_always) { ++ Branch(&skip, NegateCondition(cond), r1, r2); ++ } ++ ++ Drop(drop); ++ Ret(); ++ ++ if (cond != cc_always) { ++ bind(&skip); ++ } ++} ++ ++void TurboAssembler::Drop(int count, Condition cond, Register reg, ++ const Operand& op) { ++ if (count <= 0) { ++ return; ++ } ++ ++ Label skip; ++ ++ if (cond != al) { ++ Branch(&skip, NegateCondition(cond), reg, op); ++ } ++ ++ Add_d(sp, sp, Operand(count * kPointerSize)); ++ ++ if (cond != al) { ++ bind(&skip); ++ } ++} ++ ++void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { ++ if (scratch == no_reg) { ++ Xor(reg1, reg1, Operand(reg2)); ++ Xor(reg2, reg2, Operand(reg1)); ++ Xor(reg1, reg1, Operand(reg2)); ++ } else { ++ mov(scratch, reg1); ++ mov(reg1, reg2); ++ mov(reg2, scratch); ++ } ++} ++ ++void TurboAssembler::Call(Label* target) { Branch(target, true); } ++ ++void TurboAssembler::Push(Smi smi) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(smi)); ++ push(scratch); ++} ++ ++void TurboAssembler::Push(Handle handle) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(handle)); ++ push(scratch); ++} ++ ++void MacroAssembler::MaybeDropFrames() { ++ // Check whether we need to drop frames to restart a function on the stack. ++ li(a1, ExternalReference::debug_restart_fp_address(isolate())); ++ Ld_d(a1, MemOperand(a1, 0)); ++ Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, ++ ne, a1, Operand(zero_reg)); ++} ++ ++// --------------------------------------------------------------------------- ++// Exception handling. ++ ++void MacroAssembler::PushStackHandler() { ++ // Adjust this code if not the case. ++ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize); ++ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); ++ ++ Push(Smi::zero()); // Padding. ++ ++ // Link the current handler as the next handler. ++ li(t2, ++ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); ++ Ld_d(t1, MemOperand(t2, 0)); ++ push(t1); ++ ++ // Set this new handler as the current one. ++ St_d(sp, MemOperand(t2, 0)); ++} ++ ++void MacroAssembler::PopStackHandler() { ++ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); ++ pop(a1); ++ Add_d(sp, sp, ++ Operand( ++ static_cast(StackHandlerConstants::kSize - kPointerSize))); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, ++ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); ++ St_d(a1, MemOperand(scratch, 0)); ++} ++ ++void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, ++ const DoubleRegister src) { ++ fsub_d(dst, src, kDoubleRegZero); ++} ++ ++void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { ++ Move(dst, f0); // Reg f0 is loongarch return value ++} ++ ++void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { ++ Move(dst, f0); // Reg f0 is loongarch first argument value. ++} ++ ++void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(f0, src); } ++ ++void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(f0, src); } ++ ++void TurboAssembler::MovToFloatParameters(DoubleRegister src1, ++ DoubleRegister src2) { ++ const DoubleRegister fparg2 = f1; ++ if (src2 == f0) { ++ DCHECK(src1 != fparg2); ++ Move(fparg2, src2); ++ Move(f0, src1); ++ } else { ++ Move(f0, src1); ++ Move(fparg2, src2); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// JavaScript invokes. ++ ++void TurboAssembler::PrepareForTailCall(Register callee_args_count, ++ Register caller_args_count, ++ Register scratch0, Register scratch1) { ++ // Calculate the end of destination area where we will put the arguments ++ // after we drop current frame. We add kPointerSize to count the receiver ++ // argument which is not included into formal parameters count. ++ Register dst_reg = scratch0; ++ Alsl_d(dst_reg, caller_args_count, fp, kPointerSizeLog2, t7); ++ Add_d(dst_reg, dst_reg, ++ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); ++ ++ Register src_reg = caller_args_count; ++ // Calculate the end of source area. +kPointerSize is for the receiver. ++ Alsl_d(src_reg, callee_args_count, sp, kPointerSizeLog2, t7); ++ Add_d(src_reg, src_reg, Operand(kPointerSize)); ++ ++ if (FLAG_debug_code) { ++ Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg, ++ Operand(dst_reg)); ++ } ++ ++ // Restore caller's frame pointer and return address now as they will be ++ // overwritten by the copying loop. ++ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); ++ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ ++ // Now copy callee arguments to the caller frame going backwards to avoid ++ // callee arguments corruption (source and destination areas could overlap). ++ ++ // Both src_reg and dst_reg are pointing to the word after the one to copy, ++ // so they must be pre-decremented in the loop. ++ Register tmp_reg = scratch1; ++ Label loop, entry; ++ Branch(&entry); ++ bind(&loop); ++ Sub_d(src_reg, src_reg, Operand(kPointerSize)); ++ Sub_d(dst_reg, dst_reg, Operand(kPointerSize)); ++ Ld_d(tmp_reg, MemOperand(src_reg, 0)); ++ St_d(tmp_reg, MemOperand(dst_reg, 0)); ++ bind(&entry); ++ Branch(&loop, ne, sp, Operand(src_reg)); ++ ++ // Leave current frame. ++ mov(sp, dst_reg); ++} ++ ++void MacroAssembler::InvokePrologue(Register expected_parameter_count, ++ Register actual_parameter_count, ++ Label* done, InvokeFlag flag) { ++ Label regular_invoke; ++ ++ // Check whether the expected and actual arguments count match. The registers ++ // are set up according to contract with ArgumentsAdaptorTrampoline: ++ // a0: actual arguments count ++ // a1: function (passed through to callee) ++ // a2: expected arguments count ++ ++ // The code below is made a lot easier because the calling code already sets ++ // up actual and expected registers according to the contract. ++ ++ DCHECK_EQ(actual_parameter_count, a0); ++ DCHECK_EQ(expected_parameter_count, a2); ++ ++ Branch(®ular_invoke, eq, expected_parameter_count, ++ Operand(actual_parameter_count)); ++ ++ Handle adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline); ++ if (flag == CALL_FUNCTION) { ++ Call(adaptor); ++ Branch(done); ++ } else { ++ Jump(adaptor, RelocInfo::CODE_TARGET); ++ } ++ ++ bind(®ular_invoke); ++} ++ ++void MacroAssembler::CheckDebugHook(Register fun, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count) { ++ Label skip_hook; ++ ++ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate())); ++ Ld_b(t0, MemOperand(t0, 0)); ++ Branch(&skip_hook, eq, t0, Operand(zero_reg)); ++ ++ { ++ // Load receiver to pass it later to DebugOnFunctionCall hook. ++ Alsl_d(t0, actual_parameter_count, sp, kPointerSizeLog2, t7); ++ Ld_d(t0, MemOperand(t0, 0)); ++ FrameScope frame(this, ++ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); ++ SmiTag(expected_parameter_count); ++ Push(expected_parameter_count); ++ ++ SmiTag(actual_parameter_count); ++ Push(actual_parameter_count); ++ ++ if (new_target.is_valid()) { ++ Push(new_target); ++ } ++ // TODO: MultiPush/Pop ++ Push(fun); ++ Push(fun); ++ Push(t0); ++ CallRuntime(Runtime::kDebugOnFunctionCall); ++ Pop(fun); ++ if (new_target.is_valid()) { ++ Pop(new_target); ++ } ++ ++ Pop(actual_parameter_count); ++ SmiUntag(actual_parameter_count); ++ ++ Pop(expected_parameter_count); ++ SmiUntag(expected_parameter_count); ++ } ++ bind(&skip_hook); ++} ++ ++void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ DCHECK_EQ(function, a1); ++ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3); ++ ++ // On function call, call into the debugger if necessary. ++ CheckDebugHook(function, new_target, expected_parameter_count, ++ actual_parameter_count); ++ ++ // Clear the new.target register if not given. ++ if (!new_target.is_valid()) { ++ LoadRoot(a3, RootIndex::kUndefinedValue); ++ } ++ ++ Label done; ++ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag); ++ // We call indirectly through the code field in the function to ++ // allow recompilation to take effect without changing any of the ++ // call sites. ++ Register code = kJavaScriptCallCodeStartRegister; ++ Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset)); ++ if (flag == CALL_FUNCTION) { ++ CallCodeObject(code); ++ } else { ++ DCHECK(flag == JUMP_FUNCTION); ++ JumpCodeObject(code); ++ } ++ ++ // Continue here if InvokePrologue does handle the invocation due to ++ // mismatched parameter counts. ++ bind(&done); ++} ++ ++void MacroAssembler::InvokeFunctionWithNewTarget( ++ Register function, Register new_target, Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ ++ // Contract with called JS functions requires that function is passed in a1. ++ DCHECK_EQ(function, a1); ++ Register expected_parameter_count = a2; ++ Register temp_reg = t0; ++ Ld_d(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ // The argument count is stored as uint16_t ++ Ld_hu(expected_parameter_count, ++ FieldMemOperand(temp_reg, ++ SharedFunctionInfo::kFormalParameterCountOffset)); ++ ++ InvokeFunctionCode(a1, new_target, expected_parameter_count, ++ actual_parameter_count, flag); ++} ++ ++void MacroAssembler::InvokeFunction(Register function, ++ Register expected_parameter_count, ++ Register actual_parameter_count, ++ InvokeFlag flag) { ++ // You can't call a function without a valid frame. ++ DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); ++ ++ // Contract with called JS functions requires that function is passed in a1. ++ DCHECK_EQ(function, a1); ++ ++ // Get the function and setup the context. ++ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ++ ++ InvokeFunctionCode(a1, no_reg, expected_parameter_count, ++ actual_parameter_count, flag); ++} ++ ++// --------------------------------------------------------------------------- ++// Support functions. ++ ++void MacroAssembler::GetObjectType(Register object, Register map, ++ Register type_reg) { ++ LoadMap(map, object); ++ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); ++} ++ ++// ----------------------------------------------------------------------------- ++// Runtime calls. ++ ++void TurboAssembler::AdddOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t8; ++ if (!right.is_reg()) { ++ li(t7, Operand(right)); ++ right_reg = t7; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ add_d(scratch, left, right_reg); ++ xor_(overflow, scratch, left); ++ xor_(t7, scratch, right_reg); ++ and_(overflow, overflow, t7); ++ mov(dst, scratch); ++ } else { ++ add_d(dst, left, right_reg); ++ xor_(overflow, dst, left); ++ xor_(t7, dst, right_reg); ++ and_(overflow, overflow, t7); ++ } ++} ++ ++void TurboAssembler::SubdOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t8; ++ if (!right.is_reg()) { ++ li(t7, Operand(right)); ++ right_reg = t7; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ Sub_d(scratch, left, right_reg); ++ xor_(overflow, left, scratch); ++ xor_(t7, left, right_reg); ++ and_(overflow, overflow, t7); ++ mov(dst, scratch); ++ } else { ++ sub_d(dst, left, right_reg); ++ xor_(overflow, left, dst); ++ xor_(t7, left, right_reg); ++ and_(overflow, overflow, t7); ++ } ++} ++ ++void TurboAssembler::MulOverflow(Register dst, Register left, ++ const Operand& right, Register overflow) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ Register right_reg = no_reg; ++ Register scratch = t8; ++ if (!right.is_reg()) { ++ li(t7, Operand(right)); ++ right_reg = t7; ++ } else { ++ right_reg = right.rm(); ++ } ++ ++ DCHECK(left != scratch && right_reg != scratch && dst != scratch && ++ overflow != scratch); ++ DCHECK(overflow != left && overflow != right_reg); ++ ++ if (dst == left || dst == right_reg) { ++ Mul_w(scratch, left, right_reg); ++ Mulh_w(overflow, left, right_reg); ++ mov(dst, scratch); ++ } else { ++ Mul_w(dst, left, right_reg); ++ Mulh_w(overflow, left, right_reg); ++ } ++ ++ srai_d(scratch, dst, 32); ++ xor_(overflow, overflow, scratch); ++} ++ ++void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, ++ SaveFPRegsMode save_doubles) { ++ // All parameters are on the stack. v0 has the return value after call. ++ ++ // If the expected number of arguments of the runtime function is ++ // constant, we check that the actual number of arguments match the ++ // expectation. ++ CHECK(f->nargs < 0 || f->nargs == num_arguments); ++ ++ // TODO(1236192): Most runtime routines don't need the number of ++ // arguments passed in because it is constant. At some point we ++ // should remove this need and make the runtime routine entry code ++ // smarter. ++ PrepareCEntryArgs(num_arguments); ++ PrepareCEntryFunction(ExternalReference::Create(f)); ++ Handle code = ++ CodeFactory::CEntry(isolate(), f->result_size, save_doubles); ++ Call(code, RelocInfo::CODE_TARGET); ++} ++ ++void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { ++ const Runtime::Function* function = Runtime::FunctionForId(fid); ++ DCHECK_EQ(1, function->result_size); ++ if (function->nargs >= 0) { ++ PrepareCEntryArgs(function->nargs); ++ } ++ JumpToExternalReference(ExternalReference::Create(fid)); ++} ++ ++void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, ++ bool builtin_exit_frame) { ++ PrepareCEntryFunction(builtin); ++ Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, ++ kArgvOnStack, builtin_exit_frame); ++ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg)); ++} ++ ++void MacroAssembler::JumpToInstructionStream(Address entry) { ++ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ Jump(kOffHeapTrampolineRegister); ++} ++ ++void MacroAssembler::LoadWeakValue(Register out, Register in, ++ Label* target_if_cleared) { ++ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); ++ ++ And(out, in, Operand(~kWeakHeapObjectMask)); ++} ++ ++void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2) { ++ DCHECK_GT(value, 0); ++ if (FLAG_native_code_counters && counter->Enabled()) { ++ // This operation has to be exactly 32-bit wide in case the external ++ // reference table redirects the counter to a uint32_t dummy_stats_counter_ ++ // field. ++ li(scratch2, ExternalReference::Create(counter)); ++ Ld_w(scratch1, MemOperand(scratch2, 0)); ++ Add_w(scratch1, scratch1, Operand(value)); ++ St_w(scratch1, MemOperand(scratch2, 0)); ++ } ++} ++ ++void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, ++ Register scratch1, Register scratch2) { ++ DCHECK_GT(value, 0); ++ if (FLAG_native_code_counters && counter->Enabled()) { ++ // This operation has to be exactly 32-bit wide in case the external ++ // reference table redirects the counter to a uint32_t dummy_stats_counter_ ++ // field. ++ li(scratch2, ExternalReference::Create(counter)); ++ Ld_w(scratch1, MemOperand(scratch2, 0)); ++ Sub_w(scratch1, scratch1, Operand(value)); ++ St_w(scratch1, MemOperand(scratch2, 0)); ++ } ++} ++ ++// ----------------------------------------------------------------------------- ++// Debugging. ++ ++void TurboAssembler::Trap() { stop(); } ++void TurboAssembler::DebugBreak() { stop(); } ++ ++void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, ++ Operand rk) { ++ if (emit_debug_code()) Check(cc, reason, rs, rk); ++} ++ ++void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj, ++ Operand rk) { ++ Label L; ++ Branch(&L, cc, rj, rk); ++ Abort(reason); ++ // Will not return here. ++ bind(&L); ++} ++ ++void TurboAssembler::Abort(AbortReason reason) { ++ Label abort_start; ++ bind(&abort_start); ++#ifdef DEBUG ++ const char* msg = GetAbortReason(reason); ++ RecordComment("Abort message: "); ++ RecordComment(msg); ++#endif ++ ++ // Avoid emitting call to builtin if requested. ++ if (trap_on_abort()) { ++ stop(); ++ return; ++ } ++ ++ if (should_abort_hard()) { ++ // We don't care if we constructed a frame. Just pretend we did. ++ FrameScope assume_frame(this, StackFrame::NONE); ++ PrepareCallCFunction(0, a0); ++ li(a0, Operand(static_cast(reason))); ++ CallCFunction(ExternalReference::abort_with_reason(), 1); ++ return; ++ } ++ ++ Move(a0, Smi::FromInt(static_cast(reason))); ++ ++ // Disable stub call restrictions to always allow calls to abort. ++ if (!has_frame()) { ++ // We don't actually want to generate a pile of code for this, so just ++ // claim there is a stack frame, without generating one. ++ FrameScope scope(this, StackFrame::NONE); ++ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); ++ } else { ++ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); ++ } ++ // Will not return here. ++ if (is_trampoline_pool_blocked()) { ++ // If the calling code cares about the exact number of ++ // instructions generated, we insert padding here to keep the size ++ // of the Abort macro constant. ++ // Currently in debug mode with debug_code enabled the number of ++ // generated instructions is 10, so we use this as a maximum value. ++ static const int kExpectedAbortInstructions = 10; ++ int abort_instructions = InstructionsGeneratedSince(&abort_start); ++ DCHECK_LE(abort_instructions, kExpectedAbortInstructions); ++ while (abort_instructions++ < kExpectedAbortInstructions) { ++ nop(); ++ } ++ } ++} ++ ++void MacroAssembler::LoadMap(Register destination, Register object) { ++ Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset)); ++} ++ ++void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { ++ LoadMap(dst, cp); ++ Ld_d(dst, FieldMemOperand( ++ dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); ++ Ld_d(dst, MemOperand(dst, Context::SlotOffset(index))); ++} ++ ++void TurboAssembler::StubPrologue(StackFrame::Type type) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(StackFrame::TypeToMarker(type))); ++ PushCommonFrame(scratch); ++} ++ ++void TurboAssembler::Prologue() { PushStandardFrame(a1); } ++ ++void TurboAssembler::EnterFrame(StackFrame::Type type) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ int stack_offset = -3 * kPointerSize; ++ const int fp_offset = 1 * kPointerSize; ++ addi_d(sp, sp, stack_offset); ++ stack_offset = -stack_offset - kPointerSize; ++ St_d(ra, MemOperand(sp, stack_offset)); ++ stack_offset -= kPointerSize; ++ St_d(fp, MemOperand(sp, stack_offset)); ++ stack_offset -= kPointerSize; ++ li(t7, Operand(StackFrame::TypeToMarker(type))); ++ St_d(t7, MemOperand(sp, stack_offset)); ++ // Adjust FP to point to saved FP. ++ DCHECK_EQ(stack_offset, 0); ++ Add_d(fp, sp, Operand(fp_offset)); ++} ++ ++void TurboAssembler::LeaveFrame(StackFrame::Type type) { ++ addi_d(sp, fp, 2 * kPointerSize); ++ Ld_d(ra, MemOperand(fp, 1 * kPointerSize)); ++ Ld_d(fp, MemOperand(fp, 0 * kPointerSize)); ++} ++ ++void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ++ StackFrame::Type frame_type) { ++ DCHECK(frame_type == StackFrame::EXIT || ++ frame_type == StackFrame::BUILTIN_EXIT); ++ ++ // Set up the frame structure on the stack. ++ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); ++ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); ++ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); ++ ++ // This is how the stack will look: ++ // fp + 2 (==kCallerSPDisplacement) - old stack's end ++ // [fp + 1 (==kCallerPCOffset)] - saved old ra ++ // [fp + 0 (==kCallerFPOffset)] - saved old fp ++ // [fp - 1 StackFrame::EXIT Smi ++ // [fp - 2 (==kSPOffset)] - sp of the called function ++ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the ++ // new stack (will contain saved ra) ++ ++ // Save registers and reserve room for saved entry sp. ++ addi_d(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); ++ St_d(ra, MemOperand(sp, 3 * kPointerSize)); ++ St_d(fp, MemOperand(sp, 2 * kPointerSize)); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ li(scratch, Operand(StackFrame::TypeToMarker(frame_type))); ++ St_d(scratch, MemOperand(sp, 1 * kPointerSize)); ++ } ++ // Set up new frame pointer. ++ addi_d(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); ++ ++ if (emit_debug_code()) { ++ St_d(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); ++ } ++ ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Save the frame pointer and the context in top. ++ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, ++ isolate())); ++ St_d(fp, MemOperand(t8, 0)); ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ St_d(cp, MemOperand(t8, 0)); ++ } ++ ++ const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); ++ if (save_doubles) { ++ // The stack is already aligned to 0 modulo 8 for stores with sdc1. ++ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; ++ int space = kNumOfSavedRegisters * kDoubleSize; ++ Sub_d(sp, sp, Operand(space)); ++ // Remember: we only need to save every 2nd double FPU value. ++ for (int i = 0; i < kNumOfSavedRegisters; i++) { ++ FPURegister reg = FPURegister::from_code(2 * i); ++ Fst_d(reg, MemOperand(sp, i * kDoubleSize)); ++ } ++ } ++ ++ // Reserve place for the return address, stack space and an optional slot ++ // (used by DirectCEntry to hold the return value if a struct is ++ // returned) and align the frame preparing for calling the runtime function. ++ DCHECK_GE(stack_space, 0); ++ Sub_d(sp, sp, Operand((stack_space + 2) * kPointerSize)); ++ if (frame_alignment > 0) { ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ And(sp, sp, Operand(-frame_alignment)); // Align stack. ++ } ++ ++ // Set the exit frame sp value to point just before the return address ++ // location. ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ addi_d(scratch, sp, kPointerSize); ++ St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); ++} ++ ++void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, ++ bool do_return, ++ bool argument_count_is_length) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ // Optionally restore all double registers. ++ if (save_doubles) { ++ // Remember: we only need to restore every 2nd double FPU value. ++ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2; ++ Sub_d(t8, fp, ++ Operand(ExitFrameConstants::kFixedFrameSizeFromFp + ++ kNumOfSavedRegisters * kDoubleSize)); ++ for (int i = 0; i < kNumOfSavedRegisters; i++) { ++ FPURegister reg = FPURegister::from_code(2 * i); ++ Fld_d(reg, MemOperand(t8, i * kDoubleSize)); ++ } ++ } ++ ++ // Clear top frame. ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); ++ St_d(zero_reg, MemOperand(t8, 0)); ++ ++ // Restore current context from top and clear it in debug mode. ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ Ld_d(cp, MemOperand(t8, 0)); ++ ++#ifdef DEBUG ++ li(t8, ++ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); ++ St_d(a3, MemOperand(t8, 0)); ++#endif ++ ++ // Pop the arguments, restore registers, and return. ++ mov(sp, fp); // Respect ABI stack constraint. ++ Ld_d(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); ++ Ld_d(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); ++ ++ if (argument_count.is_valid()) { ++ if (argument_count_is_length) { ++ add_d(sp, sp, argument_count); ++ } else { ++ Alsl_d(sp, argument_count, sp, kPointerSizeLog2, t8); ++ } ++ } ++ ++ addi_d(sp, sp, 2 * kPointerSize); ++ if (do_return) { ++ Ret(); ++ } ++} ++ ++int TurboAssembler::ActivationFrameAlignment() { ++#if V8_HOST_ARCH_LOONG64 ++ // Running on the real platform. Use the alignment as mandated by the local ++ // environment. ++ // Note: This will break if we ever start generating snapshots on one Mips ++ // platform for another Mips platform with a different alignment. ++ return base::OS::ActivationFrameAlignment(); ++#else // V8_HOST_ARCH_LOONG64 ++ // If we are using the simulator then we should always align to the expected ++ // alignment. As the simulator is used to generate snapshots we do not know ++ // if the target platform will need alignment, so this is controlled from a ++ // flag. ++ return FLAG_sim_stack_alignment; ++#endif // V8_HOST_ARCH_LOONG64 ++} ++ ++void MacroAssembler::AssertStackIsAligned() { ++ if (emit_debug_code()) { ++ const int frame_alignment = ActivationFrameAlignment(); ++ const int frame_alignment_mask = frame_alignment - 1; ++ ++ if (frame_alignment > kPointerSize) { ++ Label alignment_as_expected; ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ andi(scratch, sp, frame_alignment_mask); ++ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); ++ } ++ // Don't use Check here, as it will call Runtime_Abort re-entering here. ++ stop(); ++ bind(&alignment_as_expected); ++ } ++ } ++} ++ ++void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { ++ if (SmiValuesAre32Bits()) { ++ Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset()))); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ Ld_w(dst, src); ++ SmiUntag(dst); ++ } ++} ++ ++void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, ++ Register scratch) { ++ DCHECK_EQ(0, kSmiTag); ++ andi(scratch, value, kSmiTagMask); ++ Branch(smi_label, eq, scratch, Operand(zero_reg)); ++} ++ ++void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, ++ Register scratch) { ++ DCHECK_EQ(0, kSmiTag); ++ andi(scratch, value, kSmiTagMask); ++ Branch(not_smi_label, ne, scratch, Operand(zero_reg)); ++} ++ ++void MacroAssembler::AssertNotSmi(Register object) { ++ if (emit_debug_code()) { ++ STATIC_ASSERT(kSmiTag == 0); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ andi(scratch, object, kSmiTagMask); ++ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertSmi(Register object) { ++ if (emit_debug_code()) { ++ STATIC_ASSERT(kSmiTag == 0); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ andi(scratch, object, kSmiTagMask); ++ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertConstructor(Register object) { ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8, ++ Operand(zero_reg)); ++ ++ LoadMap(t8, object); ++ Ld_bu(t8, FieldMemOperand(t8, Map::kBitFieldOffset)); ++ And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask)); ++ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg)); ++ } ++} ++ ++void MacroAssembler::AssertFunction(Register object) { ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8, ++ Operand(zero_reg)); ++ GetObjectType(object, t8, t8); ++ Check(eq, AbortReason::kOperandIsNotAFunction, t8, ++ Operand(JS_FUNCTION_TYPE)); ++ } ++} ++ ++void MacroAssembler::AssertBoundFunction(Register object) { ++ if (emit_debug_code()) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8, ++ Operand(zero_reg)); ++ GetObjectType(object, t8, t8); ++ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8, ++ Operand(JS_BOUND_FUNCTION_TYPE)); ++ } ++} ++ ++void MacroAssembler::AssertGeneratorObject(Register object) { ++ if (!emit_debug_code()) return; ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ STATIC_ASSERT(kSmiTag == 0); ++ SmiTst(object, t8); ++ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8, ++ Operand(zero_reg)); ++ ++ GetObjectType(object, t8, t8); ++ ++ Label done; ++ ++ // Check if JSGeneratorObject ++ Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE)); ++ ++ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) ++ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); ++ ++ // Check if JSAsyncGeneratorObject ++ Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); ++ ++ Abort(AbortReason::kOperandIsNotAGeneratorObject); ++ ++ bind(&done); ++} ++ ++void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, ++ Register scratch) { ++ if (emit_debug_code()) { ++ Label done_checking; ++ AssertNotSmi(object); ++ LoadRoot(scratch, RootIndex::kUndefinedValue); ++ Branch(&done_checking, eq, object, Operand(scratch)); ++ GetObjectType(object, scratch, scratch); ++ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch, ++ Operand(ALLOCATION_SITE_TYPE)); ++ bind(&done_checking); ++ } ++} ++ ++void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_s(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF32(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmax_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_s(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF32(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmin_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_s(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_d(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF64(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmax_d(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_d(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, ++ FPURegister src2, Label* out_of_line) { ++ if (src1 == src2) { ++ Move_d(dst, src1); ++ return; ++ } ++ ++ // Check if one of operands is NaN. ++ CompareIsNanF64(src1, src2); ++ BranchTrueF(out_of_line); ++ ++ fmin_d(dst, src1, src2); ++} ++ ++void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, ++ FPURegister src2) { ++ fadd_d(dst, src1, src2); ++} ++ ++static const int kRegisterPassedArguments = 8; ++ ++int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, ++ int num_double_arguments) { ++ int stack_passed_words = 0; ++ num_reg_arguments += 2 * num_double_arguments; ++ ++ // O32: Up to four simple arguments are passed in registers a0..a3. ++ // N64: Up to eight simple arguments are passed in registers a0..a7. ++ if (num_reg_arguments > kRegisterPassedArguments) { ++ stack_passed_words += num_reg_arguments - kRegisterPassedArguments; ++ } ++ stack_passed_words += kCArgSlotCount; ++ return stack_passed_words; ++} ++ ++void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, ++ int num_double_arguments, ++ Register scratch) { ++ int frame_alignment = ActivationFrameAlignment(); ++ ++ // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots. ++ // O32: Up to four simple arguments are passed in registers a0..a3. ++ // Those four arguments must have reserved argument slots on the stack for ++ // mips, even though those argument slots are not normally used. ++ // Both ABIs: Remaining arguments are pushed on the stack, above (higher ++ // address than) the (O32) argument slots. (arg slot calculation handled by ++ // CalculateStackPassedWords()). ++ int stack_passed_arguments = ++ CalculateStackPassedWords(num_reg_arguments, num_double_arguments); ++ if (frame_alignment > kPointerSize) { ++ // Make stack end at alignment and make room for num_arguments - 4 words ++ // and the original value of sp. ++ mov(scratch, sp); ++ Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0); ++ St_d(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); ++ } else { ++ Sub_d(sp, sp, Operand(stack_passed_arguments * kPointerSize)); ++ } ++} ++ ++void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, ++ Register scratch) { ++ PrepareCallCFunction(num_reg_arguments, 0, scratch); ++} ++ ++void TurboAssembler::CallCFunction(ExternalReference function, ++ int num_reg_arguments, ++ int num_double_arguments) { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ li(t7, function); ++ CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments); ++} ++ ++void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, ++ int num_double_arguments) { ++ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); ++} ++ ++void TurboAssembler::CallCFunction(ExternalReference function, ++ int num_arguments) { ++ CallCFunction(function, num_arguments, 0); ++} ++ ++void TurboAssembler::CallCFunction(Register function, int num_arguments) { ++ CallCFunction(function, num_arguments, 0); ++} ++ ++void TurboAssembler::CallCFunctionHelper(Register function, ++ int num_reg_arguments, ++ int num_double_arguments) { ++ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); ++ DCHECK(has_frame()); ++ // Make sure that the stack is aligned before calling a C function unless ++ // running in the simulator. The simulator has its own alignment check which ++ // provides more information. ++ // The argument stots are presumed to have been set up by ++ // PrepareCallCFunction. The C function must be called via t9, for mips ABI. ++ ++#if V8_HOST_ARCH_LOONG64 ++ if (emit_debug_code()) { ++ int frame_alignment = base::OS::ActivationFrameAlignment(); ++ int frame_alignment_mask = frame_alignment - 1; ++ if (frame_alignment > kPointerSize) { ++ DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); ++ Label alignment_as_expected; ++ { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ And(scratch, sp, Operand(frame_alignment_mask)); ++ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); ++ } ++ // Don't use Check here, as it will call Runtime_Abort possibly ++ // re-entering here. ++ stop(); ++ bind(&alignment_as_expected); ++ } ++ } ++#endif // V8_HOST_ARCH_LOONG64 ++ ++ // Just call directly. The function called cannot cause a GC, or ++ // allow preemption, so the return address in the link register ++ // stays correct. ++ { ++ BlockTrampolinePoolScope block_trampoline_pool(this); ++ if (function != t7) { ++ mov(t7, function); ++ function = t7; ++ } ++ ++ // Save the frame pointer and PC so that the stack layout remains iterable, ++ // even without an ExitFrame which normally exists between JS and C frames. ++ // 't' registers are caller-saved so this is safe as a scratch register. ++ Register pc_scratch = t1; ++ Register scratch = t2; ++ DCHECK(!AreAliased(pc_scratch, scratch, function)); ++ ++ pcaddi(pc_scratch, 1); ++ ++ // See x64 code for reasoning about how to address the isolate data fields. ++ if (root_array_available()) { ++ St_d(pc_scratch, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_pc_offset())); ++ St_d(fp, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_fp_offset())); ++ } else { ++ DCHECK_NOT_NULL(isolate()); ++ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); ++ St_d(pc_scratch, MemOperand(scratch, 0)); ++ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); ++ St_d(fp, MemOperand(scratch, 0)); ++ } ++ ++ Call(function); ++ ++ // We don't unset the PC; the FP is the source of truth. ++ if (root_array_available()) { ++ St_d(zero_reg, MemOperand(kRootRegister, ++ IsolateData::fast_c_call_caller_fp_offset())); ++ } else { ++ DCHECK_NOT_NULL(isolate()); ++ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); ++ St_d(zero_reg, MemOperand(scratch, 0)); ++ } ++ } ++ ++ int stack_passed_arguments = ++ CalculateStackPassedWords(num_reg_arguments, num_double_arguments); ++ ++ if (base::OS::ActivationFrameAlignment() > kPointerSize) { ++ Ld_d(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); ++ } else { ++ Add_d(sp, sp, Operand(stack_passed_arguments * kPointerSize)); ++ } ++} ++ ++#undef BRANCH_ARGS_CHECK ++ ++void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, ++ Condition cc, Label* condition_met) { ++ And(scratch, object, Operand(~kPageAlignmentMask)); ++ Ld_d(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); ++ And(scratch, scratch, Operand(mask)); ++ Branch(condition_met, cc, scratch, Operand(zero_reg)); ++} ++ ++Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, ++ Register reg4, Register reg5, ++ Register reg6) { ++ RegList regs = 0; ++ if (reg1.is_valid()) regs |= reg1.bit(); ++ if (reg2.is_valid()) regs |= reg2.bit(); ++ if (reg3.is_valid()) regs |= reg3.bit(); ++ if (reg4.is_valid()) regs |= reg4.bit(); ++ if (reg5.is_valid()) regs |= reg5.bit(); ++ if (reg6.is_valid()) regs |= reg6.bit(); ++ ++ const RegisterConfiguration* config = RegisterConfiguration::Default(); ++ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { ++ int code = config->GetAllocatableGeneralCode(i); ++ Register candidate = Register::from_code(code); ++ if (regs & candidate.bit()) continue; ++ return candidate; ++ } ++ UNREACHABLE(); ++} ++ ++void TurboAssembler::ComputeCodeStartAddress(Register dst) { ++ // TODO: range check, add Pcadd macro function? ++ pcaddi(dst, -pc_offset() >> 2); ++} ++ ++void TurboAssembler::ResetSpeculationPoisonRegister() { ++ li(kSpeculationPoisonRegister, -1); ++} ++ ++void TurboAssembler::CallForDeoptimization(Address target, int deopt_id, ++ Label* exit, DeoptimizeKind kind) { ++ USE(exit, kind); ++ NoRootArrayScope no_root_array(this); ++ ++ // Save the deopt id in kRootRegister (we don't need the roots array from now ++ // on). ++ DCHECK_LE(deopt_id, 0xFFFF); ++ li(kRootRegister, deopt_id); ++ Call(target, RelocInfo::RUNTIME_ENTRY); ++} ++ ++void TurboAssembler::LoadCodeObjectEntry(Register destination, ++ Register code_object) { ++ // Code objects are called differently depending on whether we are generating ++ // builtin code (which will later be embedded into the binary) or compiling ++ // user JS code at runtime. ++ // * Builtin code runs in --jitless mode and thus must not call into on-heap ++ // Code targets. Instead, we dispatch through the builtins entry table. ++ // * Codegen at runtime does not have this restriction and we can use the ++ // shorter, branchless instruction sequence. The assumption here is that ++ // targets are usually generated code and not builtin Code objects. ++ if (options().isolate_independent_code) { ++ DCHECK(root_array_available()); ++ Label if_code_is_off_heap, out; ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ ++ DCHECK(!AreAliased(destination, scratch)); ++ DCHECK(!AreAliased(code_object, scratch)); ++ ++ // Check whether the Code object is an off-heap trampoline. If so, call its ++ // (off-heap) entry point directly without going through the (on-heap) ++ // trampoline. Otherwise, just call the Code object as always. ++ Ld_w(scratch, FieldMemOperand(code_object, Code::kFlagsOffset)); ++ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask)); ++ BranchShort(&if_code_is_off_heap, ne, scratch, Operand(zero_reg)); ++ // Not an off-heap trampoline object, the entry point is at ++ // Code::raw_instruction_start(). ++ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag); ++ Branch(&out); ++ ++ // An off-heap trampoline, the entry point is loaded from the builtin entry ++ // table. ++ bind(&if_code_is_off_heap); ++ Ld_w(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset)); ++ slli_d(destination, scratch, kSystemPointerSizeLog2); ++ Add_d(destination, destination, kRootRegister); ++ Ld_d(destination, ++ MemOperand(destination, IsolateData::builtin_entry_table_offset())); ++ ++ bind(&out); ++ } else { ++ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag); ++ } ++} ++ ++void TurboAssembler::CallCodeObject(Register code_object) { ++ LoadCodeObjectEntry(code_object, code_object); ++ Call(code_object); ++} ++ ++void TurboAssembler::JumpCodeObject(Register code_object) { ++ LoadCodeObjectEntry(code_object, code_object); ++ Jump(code_object); ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h +new file mode 100644 +index 00000000..497d61fb +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h +@@ -0,0 +1,1077 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H ++#error This header must be included via macro-assembler.h ++#endif ++ ++#ifndef V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_ ++#define V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_ ++ ++#include "src/codegen/assembler.h" ++#include "src/codegen/loong64/assembler-loong64.h" ++#include "src/common/globals.h" ++ ++namespace v8 { ++namespace internal { ++ ++// Forward declarations. ++enum class AbortReason : uint8_t; ++ ++// Reserved Register Usage Summary. ++// ++// Registers t8 and t7 are reserved for use by the MacroAssembler. ++// ++// The programmer should know that the MacroAssembler may clobber these two, ++// but won't touch other registers except in special cases. ++// ++// Per the MIPS ABI, register t0 -- t8 must be used for indirect function call ++// via 'jirl t[0-8]' instructions. gcc? ++ ++// Flags used for LeaveExitFrame function. ++enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false }; ++ ++// Flags used for the li macro-assembler function. ++enum LiFlags { ++ // If the constant value can be represented in just 12 bits, then ++ // optimize the li to use a single instruction, rather than lu12i_w/lu32i_d/ ++ // lu52i_d/ori sequence. A number of other optimizations that emits less than ++ // maximum number of instructions exists. ++ OPTIMIZE_SIZE = 0, ++ // Always use 4 instructions (lu12i_w/ori/lu32i_d/lu52i_d sequence), ++ // even if the constant could be loaded with just one, so that this value is ++ // patchable later. ++ CONSTANT_SIZE = 1, ++ // For address loads only 3 instruction are required. Used to mark ++ // constant load that will be used as address without relocation ++ // information. It ensures predictable code size, so specific sites ++ // in code are patchable. ++ ADDRESS_LOAD = 2 ++}; ++ ++enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; ++enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; ++enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; ++ ++Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, ++ Register reg3 = no_reg, ++ Register reg4 = no_reg, ++ Register reg5 = no_reg, ++ Register reg6 = no_reg); ++ ++// ----------------------------------------------------------------------------- ++// Static helper functions. ++ ++#define SmiWordOffset(offset) (offset + kPointerSize / 2) ++ ++// Generate a MemOperand for loading a field from an object. ++inline MemOperand FieldMemOperand(Register object, int offset) { ++ return MemOperand(object, offset - kHeapObjectTag); ++} ++ ++class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ++ public: ++ using TurboAssemblerBase::TurboAssemblerBase; ++ ++ // Activation support. ++ void EnterFrame(StackFrame::Type type); ++ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ++ // Out-of-line constant pool not implemented on loong64. ++ UNREACHABLE(); ++ } ++ void LeaveFrame(StackFrame::Type type); ++ ++ // Generates function and stub prologue code. ++ void StubPrologue(StackFrame::Type type); ++ void Prologue(); ++ ++ void InitializeRootRegister() { ++ ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); ++ li(kRootRegister, Operand(isolate_root)); ++ } ++ ++ // Jump unconditionally to given label. ++ // Use rather b(Label) for code generation. ++ void jmp(Label* L) { Branch(L); } ++ ++ // ------------------------------------------------------------------------- ++ // Debugging. ++ ++ void Trap() override; ++ void DebugBreak() override; ++ ++ // Calls Abort(msg) if the condition cc is not satisfied. ++ // Use --debug_code to enable. ++ void Assert(Condition cc, AbortReason reason, Register rj, Operand rk); ++ ++ // Like Assert(), but always enabled. ++ void Check(Condition cc, AbortReason reason, Register rj, Operand rk); ++ ++ // Print a message to stdout and abort execution. ++ void Abort(AbortReason msg); ++ ++ void Branch(Label* label, bool need_link = false); ++ void Branch(Label* label, Condition cond, Register r1, const Operand& r2, ++ bool need_link = false); ++ void BranchShort(Label* label, Condition cond, Register r1, const Operand& r2, ++ bool need_link = false); ++ void Branch(Label* L, Condition cond, Register rj, RootIndex index); ++ ++ // Floating point branches ++ void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, ++ CFRegister cd = FCC0) { ++ CompareF(cmp1, cmp2, cc, cd, true); ++ } ++ ++ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2, ++ CFRegister cd = FCC0) { ++ CompareIsNanF(cmp1, cmp2, cd, true); ++ } ++ ++ void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, ++ CFRegister cd = FCC0) { ++ CompareF(cmp1, cmp2, cc, cd, false); ++ } ++ ++ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2, ++ CFRegister cd = FCC0) { ++ CompareIsNanF(cmp1, cmp2, cd, false); ++ } ++ ++ void BranchTrueShortF(Label* target, CFRegister cc = FCC0); ++ void BranchFalseShortF(Label* target, CFRegister cc = FCC0); ++ ++ void BranchTrueF(Label* target, CFRegister cc = FCC0); ++ void BranchFalseF(Label* target, CFRegister cc = FCC0); ++ ++ static int InstrCountForLi64Bit(int64_t value); ++ inline void LiLower32BitHelper(Register rd, Operand j); ++ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); ++ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { ++ li(rd, Operand(j), mode); ++ } ++ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { ++ li(rd, Operand(static_cast(j)), mode); ++ } ++ void li(Register dst, Handle value, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); ++ void li(Register dst, const StringConstantBase* string, ++ LiFlags mode = OPTIMIZE_SIZE); ++ ++ void LoadFromConstantsTable(Register destination, ++ int constant_index) override; ++ void LoadRootRegisterOffset(Register destination, intptr_t offset) override; ++ void LoadRootRelative(Register destination, int32_t offset) override; ++ ++// Jump, Call, and Ret pseudo instructions implementing inter-working. ++#define COND_ARGS \ ++ Condition cond = al, Register rj = zero_reg, \ ++ const Operand &rk = Operand(zero_reg) ++ ++ void Jump(Register target, COND_ARGS); ++ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); ++ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); ++ // Deffer from li, this method save target to the memory, and then load ++ // it to register use ld_d, it can be used in wasm jump table for concurrent ++ // patching. ++ void PatchAndJump(Address target); ++ void Jump(Handle code, RelocInfo::Mode rmode, COND_ARGS); ++ void Jump(const ExternalReference& reference) override; ++ void Call(Register target, COND_ARGS); ++ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); ++ void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, ++ COND_ARGS); ++ void Call(Label* target); ++ void LoadAddress(Register dst, Label* target); ++ ++ // Load the builtin given by the Smi in |builtin_index| into the same ++ // register. ++ void LoadEntryFromBuiltinIndex(Register builtin_index); ++ void CallBuiltinByIndex(Register builtin_index) override; ++ void CallBuiltin(int builtin_index); ++ ++ void LoadCodeObjectEntry(Register destination, Register code_object) override; ++ ++ void CallCodeObject(Register code_object) override; ++ ++ void JumpCodeObject(Register code_object) override; ++ ++ // Generates an instruction sequence s.t. the return address points to the ++ // instruction following the call. ++ // The return address on the stack is used by frame iteration. ++ void StoreReturnAddressAndCall(Register target); ++ ++ void CallForDeoptimization(Address target, int deopt_id, Label* exit, ++ DeoptimizeKind kind); ++ ++ void Ret(COND_ARGS); ++ ++ // Emit code to discard a non-negative number of pointer-sized elements ++ // from the stack, clobbering only the sp register. ++ void Drop(int count, Condition cond = cc_always, Register reg = no_reg, ++ const Operand& op = Operand(no_reg)); ++ ++ // Trivial case of DropAndRet that utilizes the delay slot and only emits ++ // 2 instructions. ++ void DropAndRet(int drop); ++ ++ void DropAndRet(int drop, Condition cond, Register reg, const Operand& op); ++ ++ void Ld_d(Register rd, const MemOperand& rj); ++ void St_d(Register rd, const MemOperand& rj); ++ ++ void push(Register src) { ++ Add_d(sp, sp, Operand(-kPointerSize)); ++ St_d(src, MemOperand(sp, 0)); ++ } ++ void Push(Register src) { push(src); } ++ void Push(Handle handle); ++ void Push(Smi smi); ++ ++ // Push two registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2) { ++ Sub_d(sp, sp, Operand(2 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push three registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3) { ++ Sub_d(sp, sp, Operand(3 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 2 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src3, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push four registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3, Register src4) { ++ Sub_d(sp, sp, Operand(4 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 3 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 2 * kPointerSize)); ++ St_d(src3, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src4, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ // Push five registers. Pushes leftmost register first (to highest address). ++ void Push(Register src1, Register src2, Register src3, Register src4, ++ Register src5) { ++ Sub_d(sp, sp, Operand(5 * kPointerSize)); ++ St_d(src1, MemOperand(sp, 4 * kPointerSize)); ++ St_d(src2, MemOperand(sp, 3 * kPointerSize)); ++ St_d(src3, MemOperand(sp, 2 * kPointerSize)); ++ St_d(src4, MemOperand(sp, 1 * kPointerSize)); ++ St_d(src5, MemOperand(sp, 0 * kPointerSize)); ++ } ++ ++ void Push(Register src, Condition cond, Register tst1, Register tst2) { ++ // Since we don't have conditional execution we use a Branch. ++ Label skip; ++ Branch(&skip, cond, tst1, Operand(tst2)); ++ addi_d(sp, sp, -kPointerSize); ++ st_d(src, sp, 0); ++ bind(&skip); ++ } ++ ++ void SaveRegisters(RegList registers); ++ void RestoreRegisters(RegList registers); ++ ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode); ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode, Address wasm_target); ++ void CallEphemeronKeyBarrier(Register object, Register address, ++ SaveFPRegsMode fp_mode); ++ ++ // Push multiple registers on the stack. ++ // Registers are saved in numerical order, with higher numbered registers ++ // saved in higher memory addresses. ++ void MultiPush(RegList regs); ++ void MultiPush(RegList regs1, RegList regs2); ++ void MultiPush(RegList regs1, RegList regs2, RegList regs3); ++ void MultiPushFPU(RegList regs); ++ ++ // Calculate how much stack space (in bytes) are required to store caller ++ // registers excluding those specified in the arguments. ++ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, ++ Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg) const; ++ ++ // Push caller saved registers on the stack, and return the number of bytes ++ // stack pointer is adjusted. ++ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg); ++ // Restore caller saved registers from the stack, and return the number of ++ // bytes stack pointer is adjusted. ++ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, ++ Register exclusion2 = no_reg, ++ Register exclusion3 = no_reg); ++ ++ void pop(Register dst) { ++ Ld_d(dst, MemOperand(sp, 0)); ++ Add_d(sp, sp, Operand(kPointerSize)); ++ } ++ void Pop(Register dst) { pop(dst); } ++ ++ // Pop two registers. Pops rightmost register first (from lower address). ++ void Pop(Register src1, Register src2) { ++ DCHECK(src1 != src2); ++ Ld_d(src2, MemOperand(sp, 0 * kPointerSize)); ++ Ld_d(src1, MemOperand(sp, 1 * kPointerSize)); ++ Add_d(sp, sp, 2 * kPointerSize); ++ } ++ ++ // Pop three registers. Pops rightmost register first (from lower address). ++ void Pop(Register src1, Register src2, Register src3) { ++ Ld_d(src3, MemOperand(sp, 0 * kPointerSize)); ++ Ld_d(src2, MemOperand(sp, 1 * kPointerSize)); ++ Ld_d(src1, MemOperand(sp, 2 * kPointerSize)); ++ Add_d(sp, sp, 3 * kPointerSize); ++ } ++ ++ void Pop(uint32_t count = 1) { Add_d(sp, sp, Operand(count * kPointerSize)); } ++ ++ // Pops multiple values from the stack and load them in the ++ // registers specified in regs. Pop order is the opposite as in MultiPush. ++ void MultiPop(RegList regs); ++ void MultiPop(RegList regs1, RegList regs2); ++ void MultiPop(RegList regs1, RegList regs2, RegList regs3); ++ ++ void MultiPopFPU(RegList regs); ++ ++#define DEFINE_INSTRUCTION(instr) \ ++ void instr(Register rd, Register rj, const Operand& rk); \ ++ void instr(Register rd, Register rj, Register rk) { \ ++ instr(rd, rj, Operand(rk)); \ ++ } \ ++ void instr(Register rj, Register rk, int32_t j) { instr(rj, rk, Operand(j)); } ++ ++#define DEFINE_INSTRUCTION2(instr) \ ++ void instr(Register rj, const Operand& rk); \ ++ void instr(Register rj, Register rk) { instr(rj, Operand(rk)); } \ ++ void instr(Register rj, int32_t j) { instr(rj, Operand(j)); } ++ ++ DEFINE_INSTRUCTION(Add_w) ++ DEFINE_INSTRUCTION(Add_d) ++ DEFINE_INSTRUCTION(Div_w) ++ DEFINE_INSTRUCTION(Div_wu) ++ DEFINE_INSTRUCTION(Div_du) ++ DEFINE_INSTRUCTION(Mod_w) ++ DEFINE_INSTRUCTION(Mod_wu) ++ DEFINE_INSTRUCTION(Div_d) ++ DEFINE_INSTRUCTION(Sub_w) ++ DEFINE_INSTRUCTION(Sub_d) ++ DEFINE_INSTRUCTION(Mod_d) ++ DEFINE_INSTRUCTION(Mod_du) ++ DEFINE_INSTRUCTION(Mul_w) ++ DEFINE_INSTRUCTION(Mulh_w) ++ DEFINE_INSTRUCTION(Mulh_wu) ++ DEFINE_INSTRUCTION(Mul_d) ++ DEFINE_INSTRUCTION(Mulh_d) ++ DEFINE_INSTRUCTION2(Div_w) ++ DEFINE_INSTRUCTION2(Div_d) ++ DEFINE_INSTRUCTION2(Div_wu) ++ DEFINE_INSTRUCTION2(Div_du) ++ ++ DEFINE_INSTRUCTION(And) ++ DEFINE_INSTRUCTION(Or) ++ DEFINE_INSTRUCTION(Xor) ++ DEFINE_INSTRUCTION(Nor) ++ DEFINE_INSTRUCTION2(Neg) ++ DEFINE_INSTRUCTION(Andn) ++ DEFINE_INSTRUCTION(Orn) ++ ++ DEFINE_INSTRUCTION(Slt) ++ DEFINE_INSTRUCTION(Sltu) ++ DEFINE_INSTRUCTION(Slti) ++ DEFINE_INSTRUCTION(Sltiu) ++ DEFINE_INSTRUCTION(Sle) ++ DEFINE_INSTRUCTION(Sleu) ++ DEFINE_INSTRUCTION(Sgt) ++ DEFINE_INSTRUCTION(Sgtu) ++ DEFINE_INSTRUCTION(Sge) ++ DEFINE_INSTRUCTION(Sgeu) ++ ++ DEFINE_INSTRUCTION(Rotr_w) ++ DEFINE_INSTRUCTION(Rotr_d) ++ ++#undef DEFINE_INSTRUCTION ++#undef DEFINE_INSTRUCTION2 ++#undef DEFINE_INSTRUCTION3 ++ ++ void SmiUntag(Register dst, const MemOperand& src); ++ void SmiUntag(Register dst, Register src) { ++ if (SmiValuesAre32Bits()) { ++ srai_d(dst, src, kSmiShift); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ srai_w(dst, src, kSmiShift); ++ } ++ } ++ ++ void SmiUntag(Register reg) { SmiUntag(reg, reg); } ++ ++ // Removes current frame and its arguments from the stack preserving ++ // the arguments and a return address pushed to the stack for the next call. ++ // Both |callee_args_count| and |caller_args_count| do not include ++ // receiver. |callee_args_count| is not modified. |caller_args_count| ++ // is trashed. ++ void PrepareForTailCall(Register callee_args_count, ++ Register caller_args_count, Register scratch0, ++ Register scratch1); ++ ++ int CalculateStackPassedWords(int num_reg_arguments, ++ int num_double_arguments); ++ ++ // Before calling a C-function from generated code, align arguments on stack ++ // and add space for the four mips argument slots. ++ // After aligning the frame, non-register arguments must be stored on the ++ // stack, after the argument-slots using helper: CFunctionArgumentOperand(). ++ // The argument count assumes all arguments are word sized. ++ // Some compilers/platforms require the stack to be aligned when calling ++ // C++ code. ++ // Needs a scratch register to do some arithmetic. This register will be ++ // trashed. ++ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, ++ Register scratch); ++ void PrepareCallCFunction(int num_reg_arguments, Register scratch); ++ ++ // Calls a C function and cleans up the space for arguments allocated ++ // by PrepareCallCFunction. The called function is not allowed to trigger a ++ // garbage collection, since that might move the code and invalidate the ++ // return address (unless this is somehow accounted for by the called ++ // function). ++ void CallCFunction(ExternalReference function, int num_arguments); ++ void CallCFunction(Register function, int num_arguments); ++ void CallCFunction(ExternalReference function, int num_reg_arguments, ++ int num_double_arguments); ++ void CallCFunction(Register function, int num_reg_arguments, ++ int num_double_arguments); ++ void MovFromFloatResult(DoubleRegister dst); ++ void MovFromFloatParameter(DoubleRegister dst); ++ ++ // There are two ways of passing double arguments on MIPS, depending on ++ // whether soft or hard floating point ABI is used. These functions ++ // abstract parameter passing for the three different ways we call ++ // C functions from generated code. ++ void MovToFloatParameter(DoubleRegister src); ++ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); ++ void MovToFloatResult(DoubleRegister src); ++ ++ // See comments at the beginning of Builtins::Generate_CEntry. ++ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } ++ inline void PrepareCEntryFunction(const ExternalReference& ref) { ++ li(a1, ref); ++ } ++ ++ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, ++ Label* condition_met); ++#undef COND_ARGS ++ ++ // Performs a truncating conversion of a floating point number as used by ++ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. ++ // Exits with 'result' holding the answer. ++ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, ++ DoubleRegister double_input, StubCallMode stub_mode); ++ ++ // Conditional move. ++ void Movz(Register rd, Register rj, Register rk); ++ void Movn(Register rd, Register rj, Register rk); ++ ++ void LoadZeroIfFPUCondition(Register dest, CFRegister = FCC0); ++ void LoadZeroIfNotFPUCondition(Register dest, CFRegister = FCC0); ++ ++ void LoadZeroIfConditionNotZero(Register dest, Register condition); ++ void LoadZeroIfConditionZero(Register dest, Register condition); ++ void LoadZeroOnCondition(Register rd, Register rj, const Operand& rk, ++ Condition cond); ++ ++ void Clz_w(Register rd, Register rj); ++ void Clz_d(Register rd, Register rj); ++ void Ctz_w(Register rd, Register rj); ++ void Ctz_d(Register rd, Register rj); ++ void Popcnt_w(Register rd, Register rj); ++ void Popcnt_d(Register rd, Register rj); ++ ++ void ExtractBits(Register dest, Register source, Register pos, int size, ++ bool sign_extend = false); ++ void InsertBits(Register dest, Register source, Register pos, int size); ++ ++ void Bstrins_w(Register rk, Register rj, uint16_t msbw, uint16_t lswb); ++ void Bstrins_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); ++ void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); ++ void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw); ++ void Neg_s(FPURegister fd, FPURegister fj); ++ void Neg_d(FPURegister fd, FPURegister fk); ++ ++ // Convert single to unsigned word. ++ void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch); ++ void Trunc_uw_s(Register rd, FPURegister fj, FPURegister scratch); ++ ++ // Change endianness ++ void ByteSwapSigned(Register dest, Register src, int operand_size); ++ void ByteSwapUnsigned(Register dest, Register src, int operand_size); ++ ++ void Ld_b(Register rd, const MemOperand& rj); ++ void Ld_bu(Register rd, const MemOperand& rj); ++ void St_b(Register rd, const MemOperand& rj); ++ ++ void Ld_h(Register rd, const MemOperand& rj); ++ void Ld_hu(Register rd, const MemOperand& rj); ++ void St_h(Register rd, const MemOperand& rj); ++ ++ void Ld_w(Register rd, const MemOperand& rj); ++ void Ld_wu(Register rd, const MemOperand& rj); ++ void St_w(Register rd, const MemOperand& rj); ++ ++ void Fld_s(FPURegister fd, const MemOperand& src); ++ void Fst_s(FPURegister fj, const MemOperand& dst); ++ ++ void Fld_d(FPURegister fd, const MemOperand& src); ++ void Fst_d(FPURegister fj, const MemOperand& dst); ++ ++ void Ll_w(Register rd, const MemOperand& rj); ++ void Sc_w(Register rd, const MemOperand& rj); ++ ++ void Ll_d(Register rd, const MemOperand& rj); ++ void Sc_d(Register rd, const MemOperand& rj); ++ ++ // These functions assume (and assert) that src1!=src2. It is permitted ++ // for the result to alias either input register. ++ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, ++ Label* out_of_line); ++ ++ // Generate out-of-line cases for the macros above. ++ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); ++ ++ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } ++ ++ void mov(Register rd, Register rj) { or_(rd, rj, zero_reg); } ++ ++ inline void Move(Register dst, Handle handle) { li(dst, handle); } ++ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); } ++ ++ inline void Move(Register dst, Register src) { ++ if (dst != src) { ++ mov(dst, src); ++ } ++ } ++ ++ inline void FmoveLow(Register dst_low, FPURegister src) { ++ movfr2gr_s(dst_low, src); ++ } ++ ++ void FmoveLow(FPURegister dst, Register src_low); ++ ++ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); } ++ ++ inline void Move_d(FPURegister dst, FPURegister src) { ++ if (dst != src) { ++ fmov_d(dst, src); ++ } ++ } ++ ++ inline void Move_s(FPURegister dst, FPURegister src) { ++ if (dst != src) { ++ fmov_s(dst, src); ++ } ++ } ++ ++ void Move(FPURegister dst, float imm) { Move(dst, bit_cast(imm)); } ++ void Move(FPURegister dst, double imm) { Move(dst, bit_cast(imm)); } ++ void Move(FPURegister dst, uint32_t src); ++ void Move(FPURegister dst, uint64_t src); ++ ++ // AdddOverflow sets overflow register to a negative value if ++ // overflow occured, otherwise it is zero or positive ++ void AdddOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ // SubdOverflow sets overflow register to a negative value if ++ // overflow occured, otherwise it is zero or positive ++ void SubdOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ // MulOverflow sets overflow register to zero if no overflow occured ++ void MulOverflow(Register dst, Register left, const Operand& right, ++ Register overflow); ++ ++ // Number of instructions needed for calculation of switch table entry address ++ static const int kSwitchTablePrologueSize = 5; ++ ++ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a ++ // functor/function with 'Label *func(size_t index)' declaration. ++ template ++ void GenerateSwitchTable(Register index, size_t case_count, ++ Func GetLabelFunction); ++ ++ // Load an object from the root table. ++ void LoadRoot(Register destination, RootIndex index) override; ++ void LoadRoot(Register destination, RootIndex index, Condition cond, ++ Register src1, const Operand& src2); ++ ++ // If the value is a NaN, canonicalize the value, src must be nan. ++ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); ++ ++ // --------------------------------------------------------------------------- ++ // FPU macros. These do not handle special cases like NaN or +- inf. ++ ++ // Convert unsigned word to double. ++ void Ffint_d_uw(FPURegister fd, FPURegister fj); ++ void Ffint_d_uw(FPURegister fd, Register rj); ++ ++ // Convert unsigned long to double. ++ void Ffint_d_ul(FPURegister fd, FPURegister fj); ++ void Ffint_d_ul(FPURegister fd, Register rj); ++ ++ // Convert unsigned word to float. ++ void Ffint_s_uw(FPURegister fd, FPURegister fj); ++ void Ffint_s_uw(FPURegister fd, Register rj); ++ ++ // Convert unsigned long to float. ++ void Ffint_s_ul(FPURegister fd, FPURegister fj); ++ void Ffint_s_ul(FPURegister fd, Register rj); ++ ++ // Convert double to unsigned word. ++ void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch); ++ void Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch); ++ ++ // Convert single to unsigned word. ++ void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch); ++ void Ftintrz_uw_s(Register rd, FPURegister fs, FPURegister scratch); ++ ++ // Convert double to unsigned long. ++ void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ void Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ ++ // Convert single to unsigned long. ++ void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ void Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch, ++ Register result = no_reg); ++ ++ // Round double functions ++ void Trunc_d(FPURegister fd, FPURegister fj); ++ void Round_d(FPURegister fd, FPURegister fj); ++ void Floor_d(FPURegister fd, FPURegister fj); ++ void Ceil_d(FPURegister fd, FPURegister fj); ++ ++ // Round float functions ++ void Trunc_s(FPURegister fd, FPURegister fj); ++ void Round_s(FPURegister fd, FPURegister fj); ++ void Floor_s(FPURegister fd, FPURegister fj); ++ void Ceil_s(FPURegister fd, FPURegister fj); ++ ++ // Jump the register contains a smi. ++ void JumpIfSmi(Register value, Label* smi_label, Register scratch = t7); ++ ++ void JumpIfEqual(Register a, int32_t b, Label* dest) { ++ li(kScratchReg, Operand(b)); ++ Branch(dest, eq, a, Operand(kScratchReg)); ++ } ++ ++ void JumpIfLessThan(Register a, int32_t b, Label* dest) { ++ li(kScratchReg, Operand(b)); ++ Branch(dest, lt, a, Operand(kScratchReg)); ++ } ++ ++ // Push a standard frame, consisting of ra, fp, context and JS function. ++ void PushStandardFrame(Register function_reg); ++ ++ // Get the actual activation frame alignment for target environment. ++ static int ActivationFrameAlignment(); ++ ++ // Load Scaled Address instructions. Parameter sa (shift argument) must be ++ // between [1, 31] (inclusive). The scratch register may be clobbered. ++ void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch = t7); ++ void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, ++ Register scratch = t7); ++ ++ // Compute the start of the generated instruction stream from the current PC. ++ // This is an alternative to embedding the {CodeObject} handle as a reference. ++ void ComputeCodeStartAddress(Register dst); ++ ++ void ResetSpeculationPoisonRegister(); ++ ++ // Control-flow integrity: ++ ++ // Define a function entrypoint. This doesn't emit any code for this ++ // architecture, as control-flow integrity is not supported for it. ++ void CodeEntry() {} ++ // Define an exception handler. ++ void ExceptionHandler() {} ++ // Define an exception handler and bind a label. ++ void BindExceptionHandler(Label* label) { bind(label); } ++ ++ protected: ++ inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch); ++ inline int32_t GetOffset(Label* L, OffsetSize bits); ++ ++ private: ++ bool has_double_zero_reg_set_ = false; ++ ++ // Performs a truncating conversion of a floating point number as used by ++ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it ++ // succeeds, otherwise falls through if result is saturated. On return ++ // 'result' either holds answer, or is clobbered on fall through. ++ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, ++ Label* done); ++ ++ bool BranchShortOrFallback(Label* L, Condition cond, Register rj, ++ const Operand& rk, bool need_link); ++ ++ // f32 or f64 ++ void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, ++ CFRegister cd, bool f32 = true); ++ ++ void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, ++ bool f32 = true); ++ ++ void CallCFunctionHelper(Register function, int num_reg_arguments, ++ int num_double_arguments); ++ ++ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); ++ ++ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode); ++ ++ // Push a fixed frame, consisting of ra, fp. ++ void PushCommonFrame(Register marker_reg = no_reg); ++ ++ void CallRecordWriteStub(Register object, Register address, ++ RememberedSetAction remembered_set_action, ++ SaveFPRegsMode fp_mode, Handle code_target, ++ Address wasm_target); ++}; ++ ++// MacroAssembler implements a collection of frequently used macros. ++class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ++ public: ++ using TurboAssembler::TurboAssembler; ++ ++ bool IsNear(Label* L, Condition cond, int rs_reg); ++ ++ // Swap two registers. If the scratch register is omitted then a slightly ++ // less efficient form using xor instead of mov is emitted. ++ void Swap(Register reg1, Register reg2, Register scratch = no_reg); ++ ++ void PushRoot(RootIndex index) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Push(scratch); ++ } ++ ++ // Compare the object in a register to a value and jump if they are equal. ++ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(if_equal, eq, with, Operand(scratch)); ++ } ++ ++ // Compare the object in a register to a value and jump if they are not equal. ++ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ LoadRoot(scratch, index); ++ Branch(if_not_equal, ne, with, Operand(scratch)); ++ } ++ ++ // Checks if value is in range [lower_limit, higher_limit] using a single ++ // comparison. ++ void JumpIfIsInRange(Register value, unsigned lower_limit, ++ unsigned higher_limit, Label* on_in_range); ++ ++ // --------------------------------------------------------------------------- ++ // GC Support ++ ++ // Notify the garbage collector that we wrote a pointer into an object. ++ // |object| is the object being stored into, |value| is the object being ++ // stored. value and scratch registers are clobbered by the operation. ++ // The offset is the offset from the start of the object, not the offset from ++ // the tagged HeapObject pointer. For use with FieldOperand(reg, off). ++ void RecordWriteField( ++ Register object, int offset, Register value, Register scratch, ++ RAStatus ra_status, SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, ++ SmiCheck smi_check = INLINE_SMI_CHECK); ++ ++ // For a given |object| notify the garbage collector that the slot |address| ++ // has been written. |value| is the object being stored. The value and ++ // address registers are clobbered by the operation. ++ void RecordWrite( ++ Register object, Register address, Register value, RAStatus ra_status, ++ SaveFPRegsMode save_fp, ++ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, ++ SmiCheck smi_check = INLINE_SMI_CHECK); ++ ++ void Pref(int32_t hint, const MemOperand& rs); ++ ++ // --------------------------------------------------------------------------- ++ // Pseudo-instructions. ++ ++ void LoadWordPair(Register rd, const MemOperand& rj, Register scratch); ++ void StoreWordPair(Register rd, const MemOperand& rj, Register scratch); ++ ++ // Convert double to unsigned long. ++ void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch); ++ ++ void Ftintrz_l_d(FPURegister fd, FPURegister fj); ++ void Ftintrne_l_d(FPURegister fd, FPURegister fj); ++ void Ftintrm_l_d(FPURegister fd, FPURegister fj); ++ void Ftintrp_l_d(FPURegister fd, FPURegister fj); ++ ++ void Ftintrz_w_d(FPURegister fd, FPURegister fj); ++ void Ftintrne_w_d(FPURegister fd, FPURegister fj); ++ void Ftintrm_w_d(FPURegister fd, FPURegister fj); ++ void Ftintrp_w_d(FPURegister fd, FPURegister fj); ++ ++ void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk); ++ ++ // Truncates a double using a specific rounding mode, and writes the value ++ // to the result register. ++ // The except_flag will contain any exceptions caused by the instruction. ++ // If check_inexact is kDontCheckForInexactConversion, then the inexact ++ // exception is masked. ++ void EmitFPUTruncate( ++ FPURoundingMode rounding_mode, Register result, ++ DoubleRegister double_input, Register scratch, ++ DoubleRegister double_scratch, Register except_flag, ++ CheckForInexactConversion check_inexact = kDontCheckForInexactConversion); ++ ++ // Enter exit frame. ++ // argc - argument count to be dropped by LeaveExitFrame. ++ // save_doubles - saves FPU registers on stack, currently disabled. ++ // stack_space - extra stack space. ++ void EnterExitFrame(bool save_doubles, int stack_space = 0, ++ StackFrame::Type frame_type = StackFrame::EXIT); ++ ++ // Leave the current exit frame. ++ void LeaveExitFrame(bool save_doubles, Register arg_count, ++ bool do_return = NO_EMIT_RETURN, ++ bool argument_count_is_length = false); ++ ++ void LoadMap(Register destination, Register object); ++ ++ // Make sure the stack is aligned. Only emits code in debug mode. ++ void AssertStackIsAligned(); ++ ++ // Load the global proxy from the current context. ++ void LoadGlobalProxy(Register dst) { ++ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); ++ } ++ ++ void LoadNativeContextSlot(int index, Register dst); ++ ++ // Load the initial map from the global function. The registers ++ // function and map can be the same, function is then overwritten. ++ void LoadGlobalFunctionInitialMap(Register function, Register map, ++ Register scratch); ++ ++ // ------------------------------------------------------------------------- ++ // JavaScript invokes. ++ ++ // Invoke the JavaScript function code by either calling or jumping. ++ void InvokeFunctionCode(Register function, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count, InvokeFlag flag); ++ ++ // On function call, call into the debugger if necessary. ++ void CheckDebugHook(Register fun, Register new_target, ++ Register expected_parameter_count, ++ Register actual_parameter_count); ++ ++ // Invoke the JavaScript function in the given register. Changes the ++ // current context to the context in the function before invoking. ++ void InvokeFunctionWithNewTarget(Register function, Register new_target, ++ Register actual_parameter_count, ++ InvokeFlag flag); ++ void InvokeFunction(Register function, Register expected_parameter_count, ++ Register actual_parameter_count, InvokeFlag flag); ++ ++ // Frame restart support. ++ void MaybeDropFrames(); ++ ++ // Exception handling. ++ ++ // Push a new stack handler and link into stack handler chain. ++ void PushStackHandler(); ++ ++ // Unlink the stack handler on top of the stack from the stack handler chain. ++ // Must preserve the result register. ++ void PopStackHandler(); ++ ++ // ------------------------------------------------------------------------- ++ // Support functions. ++ ++ void GetObjectType(Register function, Register map, Register type_reg); ++ ++ // ------------------------------------------------------------------------- ++ // Runtime calls. ++ ++ // Call a runtime routine. ++ void CallRuntime(const Runtime::Function* f, int num_arguments, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs); ++ ++ // Convenience function: Same as above, but takes the fid instead. ++ void CallRuntime(Runtime::FunctionId fid, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { ++ const Runtime::Function* function = Runtime::FunctionForId(fid); ++ CallRuntime(function, function->nargs, save_doubles); ++ } ++ ++ // Convenience function: Same as above, but takes the fid instead. ++ void CallRuntime(Runtime::FunctionId fid, int num_arguments, ++ SaveFPRegsMode save_doubles = kDontSaveFPRegs) { ++ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); ++ } ++ ++ // Convenience function: tail call a runtime routine (jump). ++ void TailCallRuntime(Runtime::FunctionId fid); ++ ++ // Jump to the builtin routine. ++ void JumpToExternalReference(const ExternalReference& builtin, ++ bool builtin_exit_frame = false); ++ ++ // Generates a trampoline to jump to the off-heap instruction stream. ++ void JumpToInstructionStream(Address entry); ++ ++ // --------------------------------------------------------------------------- ++ // In-place weak references. ++ void LoadWeakValue(Register out, Register in, Label* target_if_cleared); ++ ++ // ------------------------------------------------------------------------- ++ // StatsCounter support. ++ ++ void IncrementCounter(StatsCounter* counter, int value, Register scratch1, ++ Register scratch2); ++ void DecrementCounter(StatsCounter* counter, int value, Register scratch1, ++ Register scratch2); ++ ++ // ------------------------------------------------------------------------- ++ // Smi utilities. ++ ++ void SmiTag(Register dst, Register src) { ++ STATIC_ASSERT(kSmiTag == 0); ++ if (SmiValuesAre32Bits()) { ++ slli_d(dst, src, 32); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ add_w(dst, src, src); ++ } ++ } ++ ++ void SmiTag(Register reg) { SmiTag(reg, reg); } ++ ++ // Left-shifted from int32 equivalent of Smi. ++ void SmiScale(Register dst, Register src, int scale) { ++ if (SmiValuesAre32Bits()) { ++ // The int portion is upper 32-bits of 64-bit word. ++ srai_d(dst, src, kSmiShift - scale); ++ } else { ++ DCHECK(SmiValuesAre31Bits()); ++ DCHECK_GE(scale, kSmiTagSize); ++ slli_w(dst, src, scale - kSmiTagSize); ++ } ++ } ++ ++ // Test if the register contains a smi. ++ inline void SmiTst(Register value, Register scratch) { ++ And(scratch, value, Operand(kSmiTagMask)); ++ } ++ ++ // Jump if the register contains a non-smi. ++ void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch); ++ ++ // Abort execution if argument is a smi, enabled via --debug-code. ++ void AssertNotSmi(Register object); ++ void AssertSmi(Register object); ++ ++ // Abort execution if argument is not a Constructor, enabled via --debug-code. ++ void AssertConstructor(Register object); ++ ++ // Abort execution if argument is not a JSFunction, enabled via --debug-code. ++ void AssertFunction(Register object); ++ ++ // Abort execution if argument is not a JSBoundFunction, ++ // enabled via --debug-code. ++ void AssertBoundFunction(Register object); ++ ++ // Abort execution if argument is not a JSGeneratorObject (or subclass), ++ // enabled via --debug-code. ++ void AssertGeneratorObject(Register object); ++ ++ // Abort execution if argument is not undefined or an AllocationSite, enabled ++ // via --debug-code. ++ void AssertUndefinedOrAllocationSite(Register object, Register scratch); ++ ++ template ++ void DecodeField(Register dst, Register src) { ++ Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift); ++ } ++ ++ template ++ void DecodeField(Register reg) { ++ DecodeField(reg, reg); ++ } ++ ++ private: ++ // Helper functions for generating invokes. ++ void InvokePrologue(Register expected_parameter_count, ++ Register actual_parameter_count, Label* done, ++ InvokeFlag flag); ++ ++ // Compute memory operands for safepoint stack slots. ++ static int SafepointRegisterStackIndex(int reg_code); ++ ++ // Needs access to SafepointRegisterStackIndex for compiled frame ++ // traversal. ++ friend class StandardFrame; ++ ++ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); ++}; ++ ++template ++void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, ++ Func GetLabelFunction) { ++ // Ensure that dd-ed labels following this instruction use 8 bytes aligned ++ // addresses. ++ BlockTrampolinePoolFor(static_cast(case_count) * 2 + ++ kSwitchTablePrologueSize); ++ UseScratchRegisterScope temps(this); ++ Register scratch = temps.Acquire(); ++ Align(8); // next is 4 instrs. ++ pcaddi(scratch, 4); ++ // alsl_d will do sa ++ alsl_d(scratch, index, scratch, kPointerSizeLog2); ++ Ld_d(scratch, MemOperand(scratch, 0)); ++ jirl(zero_reg, scratch, 0); ++ for (size_t index = 0; index < case_count; ++index) { ++ dd(GetLabelFunction(index)); ++ } ++} ++ ++#define ACCESS_MASM(masm) masm-> ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_ +diff --git a/deps/v8/src/codegen/loong64/register-loong64.h b/deps/v8/src/codegen/loong64/register-loong64.h +new file mode 100644 +index 00000000..1f57f788 +--- /dev/null ++++ b/deps/v8/src/codegen/loong64/register-loong64.h +@@ -0,0 +1,330 @@ ++// Copyright 2018 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_ ++#define V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_ ++ ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/codegen/register.h" ++#include "src/codegen/reglist.h" ++ ++namespace v8 { ++namespace internal { ++ ++// clang-format off ++#define GENERAL_REGISTERS(V) \ ++ V(zero_reg) V(ra) V(gp) V(sp) \ ++ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \ ++ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) V(t8) \ ++ V(tp) V(fp) \ ++ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \ ++ ++#define ALLOCATABLE_GENERAL_REGISTERS(V) \ ++ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \ ++ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(s7) ++ ++#define DOUBLE_REGISTERS(V) \ ++ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ ++ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ ++ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ ++ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31) ++ ++#define FLOAT_REGISTERS DOUBLE_REGISTERS ++#define SIMD128_REGISTERS(V) \ ++ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \ ++ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \ ++ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \ ++ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31) ++ ++#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ ++ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ ++ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) V(f16) \ ++ V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) ++// clang-format on ++ ++// Note that the bit values must match those used in actual instruction ++// encoding. ++const int kNumRegs = 32; ++ ++const RegList kJSCallerSaved = 1 << 4 | // a0 ++ 1 << 5 | // a1 ++ 1 << 6 | // a2 ++ 1 << 7 | // a3 ++ 1 << 8 | // a4 ++ 1 << 9 | // a5 ++ 1 << 10 | // a6 ++ 1 << 11 | // a7 ++ 1 << 12 | // t0 ++ 1 << 13 | // t1 ++ 1 << 14 | // t2 ++ 1 << 15 | // t3 ++ 1 << 16 | // t4 ++ 1 << 17 | // t5 ++ 1 << 20; // t8 ++ ++const int kNumJSCallerSaved = 15; ++ ++// Callee-saved registers preserved when switching from C to JavaScript. ++const RegList kCalleeSaved = 1 << 22 | // fp ++ 1 << 23 | // s0 ++ 1 << 24 | // s1 ++ 1 << 25 | // s2 ++ 1 << 26 | // s3 ++ 1 << 27 | // s4 ++ 1 << 28 | // s5 ++ 1 << 29 | // s6 (roots in Javascript code) ++ 1 << 30 | // s7 (cp in Javascript code) ++ 1 << 31; // s8 ++ ++const int kNumCalleeSaved = 10; ++ ++const RegList kCalleeSavedFPU = 1 << 24 | // f24 ++ 1 << 25 | // f25 ++ 1 << 26 | // f26 ++ 1 << 27 | // f27 ++ 1 << 28 | // f28 ++ 1 << 29 | // f29 ++ 1 << 30 | // f30 ++ 1 << 31; // f31 ++ ++const int kNumCalleeSavedFPU = 8; ++ ++const RegList kCallerSavedFPU = 1 << 0 | // f0 ++ 1 << 1 | // f1 ++ 1 << 2 | // f2 ++ 1 << 3 | // f3 ++ 1 << 4 | // f4 ++ 1 << 5 | // f5 ++ 1 << 6 | // f6 ++ 1 << 7 | // f7 ++ 1 << 8 | // f8 ++ 1 << 9 | // f9 ++ 1 << 10 | // f10 ++ 1 << 11 | // f11 ++ 1 << 12 | // f12 ++ 1 << 13 | // f13 ++ 1 << 14 | // f14 ++ 1 << 15 | // f15 ++ 1 << 16 | // f16 ++ 1 << 17 | // f17 ++ 1 << 18 | // f18 ++ 1 << 19 | // f19 ++ 1 << 20 | // f20 ++ 1 << 21 | // f21 ++ 1 << 22 | // f22 ++ 1 << 23; // f23 ++ ++// Number of registers for which space is reserved in safepoints. Must be a ++// multiple of 8. ++const int kNumSafepointRegisters = 32; ++ ++// Define the list of registers actually saved at safepoints. ++// Note that the number of saved registers may be smaller than the reserved ++// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. ++const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; ++const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; ++ ++const int kUndefIndex = -1; ++// Map with indexes on stack that corresponds to codes of saved registers. ++const int kSafepointRegisterStackIndexMap[kNumRegs] = {kUndefIndex, // zero_reg ++ kUndefIndex, // ra ++ kUndefIndex, // gp ++ kUndefIndex, // sp ++ 0, // a0 ++ 1, // a1 ++ 2, // a2 ++ 3, // a3 ++ 4, // a4 ++ 5, // a5 ++ 6, // a6 ++ 7, // a7 ++ 8, // t0 ++ 9, // t1 ++ 10, // t2 ++ 11, // t3 ++ 12, // t4 ++ 13, // t5 ++ kUndefIndex, // t6 ++ kUndefIndex, // t7 ++ 14, // t8 ++ kUndefIndex, // tp ++ 15, // fp ++ 16, // s0 ++ 17, // s1 ++ 28, // s2 ++ 29, // s3 ++ 20, // s4 ++ 21, // s5 ++ 22, // s6 ++ 23, // s7 ++ 24}; // s8 ++ ++// CPU Registers. ++// ++// 1) We would prefer to use an enum, but enum values are assignment- ++// compatible with int, which has caused code-generation bugs. ++// ++// 2) We would prefer to use a class instead of a struct but we don't like ++// the register initialization to depend on the particular initialization ++// order (which appears to be different on OS X, Linux, and Windows for the ++// installed versions of C++ we tried). Using a struct permits C-style ++// "initialization". Also, the Register objects cannot be const as this ++// forces initialization stubs in MSVC, making us dependent on initialization ++// order. ++// ++// 3) By not using an enum, we are possibly preventing the compiler from ++// doing certain constant folds, which may significantly reduce the ++// code generated for some assembly instructions (because they boil down ++// to a few constants). If this is a problem, we could change the code ++// such that we use an enum in optimized mode, and the struct in debug ++// mode. This way we get the compile-time error checking in debug mode ++// and best performance in optimized code. ++ ++// ----------------------------------------------------------------------------- ++// Implementation of Register and FPURegister. ++ ++enum RegisterCode { ++#define REGISTER_CODE(R) kRegCode_##R, ++ GENERAL_REGISTERS(REGISTER_CODE) ++#undef REGISTER_CODE ++ kRegAfterLast ++}; ++ ++class Register : public RegisterBase { ++ public: ++ static constexpr int kMantissaOffset = 0; ++ static constexpr int kExponentOffset = 4; ++ ++ private: ++ friend class RegisterBase; ++ explicit constexpr Register(int code) : RegisterBase(code) {} ++}; ++ ++// s7: context register ++// s3: scratch register ++// s4: scratch register 2 ++#define DECLARE_REGISTER(R) \ ++ constexpr Register R = Register::from_code(kRegCode_##R); ++GENERAL_REGISTERS(DECLARE_REGISTER) ++#undef DECLARE_REGISTER ++ ++constexpr Register no_reg = Register::no_reg(); ++ ++int ToNumber(Register reg); ++ ++Register ToRegister(int num); ++ ++constexpr bool kPadArguments = false; ++constexpr bool kSimpleFPAliasing = true; ++constexpr bool kSimdMaskRegisters = false; ++ ++enum DoubleRegisterCode { ++#define REGISTER_CODE(R) kDoubleCode_##R, ++ DOUBLE_REGISTERS(REGISTER_CODE) ++#undef REGISTER_CODE ++ kDoubleAfterLast ++}; ++ ++// Coprocessor register. ++class FPURegister : public RegisterBase { ++ public: ++ FPURegister low() const { ++ // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1. ++ // Find low reg of a Double-reg pair, which is the reg itself. ++ DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even. ++ return FPURegister::from_code(code()); ++ } ++ ++ private: ++ friend class RegisterBase; ++ explicit constexpr FPURegister(int code) : RegisterBase(code) {} ++}; ++ ++enum CFRegister { FCC0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 }; ++ ++using FloatRegister = FPURegister; ++ ++using DoubleRegister = FPURegister; ++ ++// TODO here only for build success ++using Simd128Register = FPURegister; ++ ++#define DECLARE_DOUBLE_REGISTER(R) \ ++ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); ++DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER) ++#undef DECLARE_DOUBLE_REGISTER ++ ++constexpr DoubleRegister no_dreg = DoubleRegister::no_reg(); ++ ++// Register aliases. ++// cp is assumed to be a callee saved register. ++constexpr Register kRootRegister = s6; ++constexpr Register cp = s7; ++constexpr Register kScratchReg = s3; ++constexpr Register kScratchReg2 = s4; ++constexpr DoubleRegister kScratchDoubleReg = f30; ++// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0. ++constexpr DoubleRegister kDoubleRegZero = f28; ++ ++// FPU (coprocessor 1) control registers. ++// Currently only FCSR0 is implemented. ++// TODO fscr0 fcsr1 fcsr2 fscsr3 ++struct FPUControlRegister { ++ bool is_valid() const { return reg_code == kFCSRRegister; } ++ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; } ++ int code() const { ++ DCHECK(is_valid()); ++ return reg_code; ++ } ++ int bit() const { ++ DCHECK(is_valid()); ++ return 1 << reg_code; ++ } ++ void setcode(int f) { ++ reg_code = f; ++ DCHECK(is_valid()); ++ } ++ // Unfortunately we can't make this private in a struct. ++ int reg_code; ++}; ++ ++constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister}; ++constexpr FPUControlRegister FCSR = {kFCSRRegister}; ++ ++// Define {RegisterName} methods for the register types. ++DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS) ++DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS) ++ ++// Give alias names to registers for calling conventions. ++constexpr Register kReturnRegister0 = a0; ++constexpr Register kReturnRegister1 = a1; ++constexpr Register kReturnRegister2 = a2; ++constexpr Register kJSFunctionRegister = a1; ++constexpr Register kContextRegister = s7; ++constexpr Register kAllocateSizeRegister = a0; ++constexpr Register kSpeculationPoisonRegister = t3; ++constexpr Register kInterpreterAccumulatorRegister = a0; ++constexpr Register kInterpreterBytecodeOffsetRegister = t0; ++constexpr Register kInterpreterBytecodeArrayRegister = t1; ++constexpr Register kInterpreterDispatchTableRegister = t2; ++ ++constexpr Register kJavaScriptCallArgCountRegister = a0; ++constexpr Register kJavaScriptCallCodeStartRegister = a2; ++constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister; ++constexpr Register kJavaScriptCallNewTargetRegister = a3; ++constexpr Register kJavaScriptCallExtraArg1Register = a2; ++ ++constexpr Register kOffHeapTrampolineRegister = t7; ++constexpr Register kRuntimeCallFunctionRegister = a1; ++constexpr Register kRuntimeCallArgCountRegister = a0; ++constexpr Register kRuntimeCallArgvRegister = a2; ++constexpr Register kWasmInstanceRegister = a0; ++constexpr Register kWasmCompileLazyFuncIndexRegister = t0; ++ ++constexpr DoubleRegister kFPReturnRegister0 = f0; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_ +diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h +index 01175e58..f2a487ac 100644 +--- a/deps/v8/src/codegen/macro-assembler.h ++++ b/deps/v8/src/codegen/macro-assembler.h +@@ -49,6 +49,9 @@ enum AllocationFlags { + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/constants-mips64.h" + #include "src/codegen/mips64/macro-assembler-mips64.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/codegen/loong64/macro-assembler-loong64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/constants-s390.h" + #include "src/codegen/s390/macro-assembler-s390.h" +diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc +index 751d0f87..63a9bf87 100644 +--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc ++++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc +@@ -997,7 +997,7 @@ void Assembler::next(Label* L, bool is_internal) { + } + + bool Assembler::is_near(Label* L) { +- DCHECK(L->is_bound()); ++ if (L == nullptr || !L->is_bound()) return true; + return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; + } + +diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h +index f70e46f8..c585840a 100644 +--- a/deps/v8/src/codegen/mips64/assembler-mips64.h ++++ b/deps/v8/src/codegen/mips64/assembler-mips64.h +@@ -1864,6 +1864,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { + // instruction. We use this information to trigger different mode of + // branch instruction generation, where we use jump instructions rather + // than regular branch instructions. ++ // TODO can this be optimied?????? + bool trampoline_emitted_; + static constexpr int kInvalidSlotPos = -1; + +diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h +index 21a72330..6096413e 100644 +--- a/deps/v8/src/codegen/register-arch.h ++++ b/deps/v8/src/codegen/register-arch.h +@@ -22,6 +22,8 @@ + #include "src/codegen/mips/register-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/codegen/mips64/register-mips64.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/codegen/loong64/register-loong64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/codegen/s390/register-s390.h" + #else +diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc +index 5752b463..50cad4e2 100644 +--- a/deps/v8/src/codegen/register-configuration.cc ++++ b/deps/v8/src/codegen/register-configuration.cc +@@ -58,6 +58,8 @@ static int get_num_allocatable_double_registers() { + kMaxAllocatableDoubleRegisterCount; + #elif V8_TARGET_ARCH_MIPS64 + kMaxAllocatableDoubleRegisterCount; ++#elif V8_TARGET_ARCH_LOONG64 ++ kMaxAllocatableDoubleRegisterCount; + #elif V8_TARGET_ARCH_PPC + kMaxAllocatableDoubleRegisterCount; + #elif V8_TARGET_ARCH_PPC64 +diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc +index 2e62c6f1..d60281eb 100644 +--- a/deps/v8/src/codegen/reloc-info.cc ++++ b/deps/v8/src/codegen/reloc-info.cc +@@ -330,7 +330,8 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { + return false; + #elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ + defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ +- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) ++ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \ ++ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64) + return true; + #endif + } +diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h +index 70b17ab6..3347ef4c 100644 +--- a/deps/v8/src/common/globals.h ++++ b/deps/v8/src/common/globals.h +@@ -58,6 +58,9 @@ constexpr int GB = MB * 1024; + #if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390) + #define USE_SIMULATOR 1 + #endif ++#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64) ++#define USE_SIMULATOR 1 ++#endif + #endif + + // Determine whether the architecture uses an embedded constant pool +diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h +index 84d5d249..c71cb229 100644 +--- a/deps/v8/src/compiler/backend/instruction-codes.h ++++ b/deps/v8/src/compiler/backend/instruction-codes.h +@@ -17,6 +17,8 @@ + #include "src/compiler/backend/mips/instruction-codes-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/compiler/backend/mips64/instruction-codes-mips64.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/compiler/backend/loong64/instruction-codes-loong64.h" + #elif V8_TARGET_ARCH_X64 + #include "src/compiler/backend/x64/instruction-codes-x64.h" + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 +diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc +index c2022b57..c8e1baf3 100644 +--- a/deps/v8/src/compiler/backend/instruction-selector.cc ++++ b/deps/v8/src/compiler/backend/instruction-selector.cc +@@ -2588,7 +2588,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { + #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS + + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ +- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 ++ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_LOONG64 + void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } + + void InstructionSelector::VisitWord64AtomicStore(Node* node) { +@@ -2613,7 +2613,8 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { + UNIMPLEMENTED(); + } + #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64 +- // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 ++ // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 && ++ // !V8_TARGET_ARCH_LOONG64 + + #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM + // This is only needed on 32-bit to split the 64-bit value into two operands. +@@ -2627,7 +2628,7 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) { + + #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X + #if !V8_TARGET_ARCH_ARM64 +-#if !V8_TARGET_ARCH_MIPS64 ++#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 + void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } + void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } +diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc +new file mode 100644 +index 00000000..af7c9155 +--- /dev/null ++++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc +@@ -0,0 +1,2844 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/callable.h" ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/optimized-compilation-info.h" ++#include "src/compiler/backend/code-generator-impl.h" ++#include "src/compiler/backend/code-generator.h" ++#include "src/compiler/backend/gap-resolver.h" ++#include "src/compiler/node-matchers.h" ++#include "src/compiler/osr.h" ++#include "src/heap/memory-chunk.h" ++#include "src/wasm/wasm-code-manager.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++#define __ tasm()-> ++ ++// TODO(plind): consider renaming these macros. ++#define TRACE_MSG(msg) \ ++ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ ++ __LINE__) ++ ++#define TRACE_UNIMPL() \ ++ PrintF("UNIMPLEMENTED code_generator_loong64: %s at line %d\n", __FUNCTION__, \ ++ __LINE__) ++ ++// Adds Loong64-specific methods to convert InstructionOperands. ++class Loong64OperandConverter final : public InstructionOperandConverter { ++ public: ++ Loong64OperandConverter(CodeGenerator* gen, Instruction* instr) ++ : InstructionOperandConverter(gen, instr) {} ++ ++ FloatRegister OutputSingleRegister(size_t index = 0) { ++ return ToSingleRegister(instr_->OutputAt(index)); ++ } ++ ++ FloatRegister InputSingleRegister(size_t index) { ++ return ToSingleRegister(instr_->InputAt(index)); ++ } ++ ++ FloatRegister ToSingleRegister(InstructionOperand* op) { ++ // Single (Float) and Double register namespace is same on LOONG64, ++ // both are typedefs of FPURegister. ++ return ToDoubleRegister(op); ++ } ++ ++ Register InputOrZeroRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) { ++ DCHECK_EQ(0, InputInt32(index)); ++ return zero_reg; ++ } ++ return InputRegister(index); ++ } ++ ++ DoubleRegister InputOrZeroDoubleRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; ++ ++ return InputDoubleRegister(index); ++ } ++ ++ DoubleRegister InputOrZeroSingleRegister(size_t index) { ++ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; ++ ++ return InputSingleRegister(index); ++ } ++ ++ Operand InputImmediate(size_t index) { ++ Constant constant = ToConstant(instr_->InputAt(index)); ++ switch (constant.type()) { ++ case Constant::kInt32: ++ return Operand(constant.ToInt32()); ++ case Constant::kInt64: ++ return Operand(constant.ToInt64()); ++ case Constant::kFloat32: ++ return Operand::EmbeddedNumber(constant.ToFloat32()); ++ case Constant::kFloat64: ++ return Operand::EmbeddedNumber(constant.ToFloat64().value()); ++ case Constant::kExternalReference: ++ case Constant::kCompressedHeapObject: ++ case Constant::kHeapObject: ++ // TODO(plind): Maybe we should handle ExtRef & HeapObj here? ++ // maybe not done on arm due to const pool ?? ++ break; ++ case Constant::kDelayedStringConstant: ++ return Operand::EmbeddedStringConstant( ++ constant.ToDelayedStringConstant()); ++ case Constant::kRpoNumber: ++ UNREACHABLE(); // TODO(titzer): RPO immediates on loong64? ++ break; ++ } ++ UNREACHABLE(); ++ } ++ ++ Operand InputOperand(size_t index) { ++ InstructionOperand* op = instr_->InputAt(index); ++ if (op->IsRegister()) { ++ return Operand(ToRegister(op)); ++ } ++ return InputImmediate(index); ++ } ++ ++ MemOperand MemoryOperand(size_t* first_index) { ++ const size_t index = *first_index; ++ switch (AddressingModeField::decode(instr_->opcode())) { ++ case kMode_None: ++ break; ++ case kMode_MRI: ++ *first_index += 2; ++ return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); ++ case kMode_MRR: ++ *first_index += 2; ++ return MemOperand(InputRegister(index + 0), InputRegister(index + 1)); ++ } ++ UNREACHABLE(); ++ } ++ ++ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } ++ ++ MemOperand ToMemOperand(InstructionOperand* op) const { ++ DCHECK_NOT_NULL(op); ++ DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); ++ return SlotToMemOperand(AllocatedOperand::cast(op)->index()); ++ } ++ ++ MemOperand SlotToMemOperand(int slot) const { ++ FrameOffset offset = frame_access_state()->GetFrameOffset(slot); ++ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); ++ } ++}; ++ ++static inline bool HasRegisterInput(Instruction* instr, size_t index) { ++ return instr->InputAt(index)->IsRegister(); ++} ++ ++namespace { ++ ++class OutOfLineRecordWrite final : public OutOfLineCode { ++ public: ++ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, ++ Register value, Register scratch0, Register scratch1, ++ RecordWriteMode mode, StubCallMode stub_mode) ++ : OutOfLineCode(gen), ++ object_(object), ++ index_(index), ++ value_(value), ++ scratch0_(scratch0), ++ scratch1_(scratch1), ++ mode_(mode), ++ stub_mode_(stub_mode), ++ must_save_lr_(!gen->frame_access_state()->has_frame()), ++ zone_(gen->zone()) {} ++ ++ void Generate() final { ++ if (mode_ > RecordWriteMode::kValueIsPointer) { ++ __ JumpIfSmi(value_, exit()); ++ } ++ __ CheckPageFlag(value_, scratch0_, ++ MemoryChunk::kPointersToHereAreInterestingMask, eq, ++ exit()); ++ __ Add_d(scratch1_, object_, index_); ++ RememberedSetAction const remembered_set_action = ++ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET ++ : OMIT_REMEMBERED_SET; ++ SaveFPRegsMode const save_fp_mode = ++ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; ++ if (must_save_lr_) { ++ // We need to save and restore ra if the frame was elided. ++ __ Push(ra); ++ } ++ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { ++ __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); ++ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched when the code ++ // is added to the native module and copied into wasm code space. ++ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, ++ save_fp_mode, wasm::WasmCode::kRecordWrite); ++ } else { ++ __ CallRecordWriteStub(object_, scratch1_, remembered_set_action, ++ save_fp_mode); ++ } ++ if (must_save_lr_) { ++ __ Pop(ra); ++ } ++ } ++ ++ private: ++ Register const object_; ++ Register const index_; ++ Register const value_; ++ Register const scratch0_; ++ Register const scratch1_; ++ RecordWriteMode const mode_; ++ StubCallMode const stub_mode_; ++ bool must_save_lr_; ++ Zone* zone_; ++}; ++ ++#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ ++ class ool_name final : public OutOfLineCode { \ ++ public: \ ++ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ ++ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ ++ \ ++ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ ++ \ ++ private: \ ++ T const dst_; \ ++ T const src1_; \ ++ T const src2_; \ ++ } ++ ++CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister); ++CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister); ++ ++#undef CREATE_OOL_CLASS ++ ++Condition FlagsConditionToConditionCmp(FlagsCondition condition) { ++ switch (condition) { ++ case kEqual: ++ return eq; ++ case kNotEqual: ++ return ne; ++ case kSignedLessThan: ++ return lt; ++ case kSignedGreaterThanOrEqual: ++ return ge; ++ case kSignedLessThanOrEqual: ++ return le; ++ case kSignedGreaterThan: ++ return gt; ++ case kUnsignedLessThan: ++ return lo; ++ case kUnsignedGreaterThanOrEqual: ++ return hs; ++ case kUnsignedLessThanOrEqual: ++ return ls; ++ case kUnsignedGreaterThan: ++ return hi; ++ case kUnorderedEqual: ++ case kUnorderedNotEqual: ++ break; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++Condition FlagsConditionToConditionTst(FlagsCondition condition) { ++ switch (condition) { ++ case kNotEqual: ++ return ne; ++ case kEqual: ++ return eq; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++Condition FlagsConditionToConditionOvf(FlagsCondition condition) { ++ switch (condition) { ++ case kOverflow: ++ return ne; ++ case kNotOverflow: ++ return eq; ++ default: ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, ++ FlagsCondition condition) { ++ switch (condition) { ++ case kEqual: ++ *predicate = true; ++ return CEQ; ++ case kNotEqual: ++ *predicate = false; ++ return CEQ; ++ case kUnsignedLessThan: ++ *predicate = true; ++ return CLT; ++ case kUnsignedGreaterThanOrEqual: ++ *predicate = false; ++ return CLT; ++ case kUnsignedLessThanOrEqual: ++ *predicate = true; ++ return CLE; ++ case kUnsignedGreaterThan: ++ *predicate = false; ++ return CLE; ++ case kUnorderedEqual: ++ case kUnorderedNotEqual: ++ *predicate = true; ++ break; ++ default: ++ *predicate = true; ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, ++ InstructionCode opcode, Instruction* instr, ++ Loong64OperandConverter const& i) { ++ const MemoryAccessMode access_mode = ++ static_cast(MiscField::decode(opcode)); ++ if (access_mode == kMemoryAccessPoisoned) { ++ Register value = i.OutputRegister(); ++ codegen->tasm()->And(value, value, kSpeculationPoisonRegister); ++ } ++} ++ ++} // namespace ++ ++#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ ++ do { \ ++ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ ++ do { \ ++ __ dbar(0); \ ++ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// only use for sub_w and sub_d ++#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \ ++ do { \ ++ Label binop; \ ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ __ dbar(0); \ ++ __ bind(&binop); \ ++ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ ++ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ ++ Operand(i.InputRegister(2))); \ ++ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \ ++ size, bin_instr, representation) \ ++ do { \ ++ Label binop; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ if (representation == 32) { \ ++ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \ ++ } \ ++ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(3))); \ ++ __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \ ++ __ dbar(0); \ ++ __ bind(&binop); \ ++ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ ++ size, sign_extend); \ ++ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ ++ Operand(i.InputRegister(2))); \ ++ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ ++ size); \ ++ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \ ++ load_linked, store_conditional, sign_extend, size, representation) \ ++ do { \ ++ Label exchange; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ if (representation == 32) { \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ ++ } \ ++ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(1))); \ ++ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \ ++ __ dbar(0); \ ++ __ bind(&exchange); \ ++ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ ++ size); \ ++ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \ ++ store_conditional) \ ++ do { \ ++ Label compareExchange; \ ++ Label exit; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ __ dbar(0); \ ++ __ bind(&compareExchange); \ ++ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&exit, ne, i.InputRegister(2), \ ++ Operand(i.OutputRegister(0))); \ ++ __ mov(i.TempRegister(2), i.InputRegister(3)); \ ++ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ ++ Operand(zero_reg)); \ ++ __ bind(&exit); \ ++ __ dbar(0); \ ++ } while (0) ++ ++// TODO remove second dbar? ++#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \ ++ load_linked, store_conditional, sign_extend, size, representation) \ ++ do { \ ++ Label compareExchange; \ ++ Label exit; \ ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ ++ if (representation == 32) { \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ ++ } else { \ ++ DCHECK_EQ(representation, 64); \ ++ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ ++ } \ ++ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \ ++ Operand(i.TempRegister(1))); \ ++ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \ ++ __ dbar(0); \ ++ __ bind(&compareExchange); \ ++ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \ ++ size, sign_extend); \ ++ __ BranchShort(&exit, ne, i.InputRegister(2), \ ++ Operand(i.OutputRegister(0))); \ ++ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ ++ size); \ ++ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ ++ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ ++ Operand(zero_reg)); \ ++ __ bind(&exit); \ ++ __ dbar(0); \ ++ } while (0) ++ ++#define ASSEMBLE_IEEE754_BINOP(name) \ ++ do { \ ++ FrameScope scope(tasm(), StackFrame::MANUAL); \ ++ __ PrepareCallCFunction(0, 2, kScratchReg); \ ++ __ MovToFloatParameters(i.InputDoubleRegister(0), \ ++ i.InputDoubleRegister(1)); \ ++ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ ++ /* Move the result in the double result register. */ \ ++ __ MovFromFloatResult(i.OutputDoubleRegister()); \ ++ } while (0) ++ ++#define ASSEMBLE_IEEE754_UNOP(name) \ ++ do { \ ++ FrameScope scope(tasm(), StackFrame::MANUAL); \ ++ __ PrepareCallCFunction(0, 1, kScratchReg); \ ++ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ ++ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ ++ /* Move the result in the double result register. */ \ ++ __ MovFromFloatResult(i.OutputDoubleRegister()); \ ++ } while (0) ++ ++#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \ ++ do { \ ++ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \ ++ i.InputSimd128Register(1)); \ ++ } while (0) ++ ++void CodeGenerator::AssembleDeconstructFrame() { ++ __ mov(sp, fp); ++ __ Pop(ra, fp); ++} ++ ++void CodeGenerator::AssemblePrepareTailCall() { ++ if (frame_access_state()->has_frame()) { ++ __ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); ++ __ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); ++ } ++ frame_access_state()->SetFrameAccessToSP(); ++} ++ ++void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, ++ Register scratch1, ++ Register scratch2, ++ Register scratch3) { ++ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); ++ Label done; ++ ++ // Check if current frame is an arguments adaptor frame. ++ __ Ld_d(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); ++ __ Branch(&done, ne, scratch3, ++ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); ++ ++ // Load arguments count from current arguments adaptor frame (note, it ++ // does not include receiver). ++ Register caller_args_count_reg = scratch1; ++ __ Ld_d(caller_args_count_reg, ++ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); ++ __ SmiUntag(caller_args_count_reg); ++ ++ __ PrepareForTailCall(args_reg, caller_args_count_reg, scratch2, scratch3); ++ __ bind(&done); ++} ++ ++namespace { ++ ++void AdjustStackPointerForTailCall(TurboAssembler* tasm, ++ FrameAccessState* state, ++ int new_slot_above_sp, ++ bool allow_shrinkage = true) { ++ int current_sp_offset = state->GetSPToFPSlotCount() + ++ StandardFrameConstants::kFixedSlotCountAboveFp; ++ int stack_slot_delta = new_slot_above_sp - current_sp_offset; ++ if (stack_slot_delta > 0) { ++ tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); ++ state->IncreaseSPDelta(stack_slot_delta); ++ } else if (allow_shrinkage && stack_slot_delta < 0) { ++ tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); ++ state->IncreaseSPDelta(stack_slot_delta); ++ } ++} ++ ++} // namespace ++ ++void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, ++ int first_unused_stack_slot) { ++ AdjustStackPointerForTailCall(tasm(), frame_access_state(), ++ first_unused_stack_slot, false); ++} ++ ++void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, ++ int first_unused_stack_slot) { ++ AdjustStackPointerForTailCall(tasm(), frame_access_state(), ++ first_unused_stack_slot); ++} ++ ++// Check that {kJavaScriptCallCodeStartRegister} is correct. ++void CodeGenerator::AssembleCodeStartRegisterCheck() { ++ __ ComputeCodeStartAddress(kScratchReg); ++ __ Assert(eq, AbortReason::kWrongFunctionCodeStart, ++ kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); ++} ++ ++// Check if the code object is marked for deoptimization. If it is, then it ++// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need ++// to: ++// 1. read from memory the word that contains that bit, which can be found in ++// the flags in the referenced {CodeDataContainer} object; ++// 2. test kMarkedForDeoptimizationBit in those flags; and ++// 3. if it is not zero then it jumps to the builtin. ++void CodeGenerator::BailoutIfDeoptimized() { ++ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; ++ __ Ld_d(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); ++ __ Ld_w(kScratchReg, ++ FieldMemOperand(kScratchReg, ++ CodeDataContainer::kKindSpecificFlagsOffset)); ++ __ And(kScratchReg, kScratchReg, ++ Operand(1 << Code::kMarkedForDeoptimizationBit)); ++ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), ++ RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); ++} ++ ++void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { ++ // Calculate a mask which has all bits set in the normal case, but has all ++ // bits cleared if we are speculatively executing the wrong PC. ++ __ li(kSpeculationPoisonRegister, -1); ++ __ ComputeCodeStartAddress(kScratchReg); ++ __ sub_d(kScratchReg, kScratchReg, kJavaScriptCallCodeStartRegister); ++ __ maskeqz(kSpeculationPoisonRegister, kSpeculationPoisonRegister, ++ kScratchReg); ++} ++ ++void CodeGenerator::AssembleRegisterArgumentPoisoning() { ++ __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); ++ __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); ++ __ And(sp, sp, kSpeculationPoisonRegister); ++} ++ ++// Assembles an instruction after register allocation, producing machine code. ++CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ++ Instruction* instr) { ++ Loong64OperandConverter i(this, instr); ++ InstructionCode opcode = instr->opcode(); ++ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); ++ switch (arch_opcode) { ++ case kArchCallCodeObject: { ++ if (instr->InputAt(0)->IsImmediate()) { ++ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); ++ } else { ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ CallCodeObject(reg); ++ } ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchCallBuiltinPointer: { ++ DCHECK(!instr->InputAt(0)->IsImmediate()); ++ Register builtin_index = i.InputRegister(0); ++ __ CallBuiltinByIndex(builtin_index); ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchCallWasmFunction: { ++ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { ++ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, ++ i.TempRegister(0), i.TempRegister(1), ++ i.TempRegister(2)); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ Constant constant = i.ToConstant(instr->InputAt(0)); ++ Address wasm_code = static_cast

(constant.ToInt64()); ++ __ Call(wasm_code, constant.rmode()); ++ } else { ++ __ addi_d(kScratchReg, i.InputRegister(0), 0); ++ __ Call(kScratchReg); ++ } ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchTailCallCodeObjectFromJSFunction: ++ case kArchTailCallCodeObject: { ++ if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { ++ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, ++ i.TempRegister(0), i.TempRegister(1), ++ i.TempRegister(2)); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); ++ } else { ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ JumpCodeObject(reg); ++ } ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchTailCallWasm: { ++ if (instr->InputAt(0)->IsImmediate()) { ++ Constant constant = i.ToConstant(instr->InputAt(0)); ++ Address wasm_code = static_cast
(constant.ToInt64()); ++ __ Jump(wasm_code, constant.rmode()); ++ } else { ++ __ addi_d(kScratchReg, i.InputRegister(0), 0); ++ __ Jump(kScratchReg); ++ } ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchTailCallAddress: { ++ CHECK(!instr->InputAt(0)->IsImmediate()); ++ Register reg = i.InputRegister(0); ++ DCHECK_IMPLIES( ++ HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister), ++ reg == kJavaScriptCallCodeStartRegister); ++ __ Jump(reg); ++ frame_access_state()->ClearSPDelta(); ++ frame_access_state()->SetFrameAccessToDefault(); ++ break; ++ } ++ case kArchCallJSFunction: { ++ Register func = i.InputRegister(0); ++ if (FLAG_debug_code) { ++ // Check the function's context matches the context argument. ++ __ Ld_d(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); ++ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, ++ Operand(kScratchReg)); ++ } ++ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); ++ __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); ++ __ CallCodeObject(a2); ++ RecordCallPosition(instr); ++ frame_access_state()->ClearSPDelta(); ++ break; ++ } ++ case kArchPrepareCallCFunction: { ++ int const num_parameters = MiscField::decode(instr->opcode()); ++ __ PrepareCallCFunction(num_parameters, kScratchReg); ++ // Frame alignment requires using FP-relative frame addressing. ++ frame_access_state()->SetFrameAccessToFP(); ++ break; ++ } ++ case kArchSaveCallerRegisters: { ++ fp_mode_ = ++ static_cast(MiscField::decode(instr->opcode())); ++ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); ++ // kReturnRegister0 should have been saved before entering the stub. ++ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); ++ DCHECK(IsAligned(bytes, kSystemPointerSize)); ++ DCHECK_EQ(0, frame_access_state()->sp_delta()); ++ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); ++ DCHECK(!caller_registers_saved_); ++ caller_registers_saved_ = true; ++ break; ++ } ++ case kArchRestoreCallerRegisters: { ++ DCHECK(fp_mode_ == ++ static_cast(MiscField::decode(instr->opcode()))); ++ DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs); ++ // Don't overwrite the returned value. ++ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); ++ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); ++ DCHECK_EQ(0, frame_access_state()->sp_delta()); ++ DCHECK(caller_registers_saved_); ++ caller_registers_saved_ = false; ++ break; ++ } ++ case kArchPrepareTailCall: ++ AssemblePrepareTailCall(); ++ break; ++ case kArchCallCFunction: { ++ int const num_parameters = MiscField::decode(instr->opcode()); ++ Label start_call; ++ bool isWasmCapiFunction = ++ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); ++ // from start_call to return address. ++ int offset = __ root_array_available() ? 44 : 80; // 11 or 20 instrs ++#if V8_HOST_ARCH_LOONG64 ++ if (__ emit_debug_code()) { ++ offset += 12; // see CallCFunction ++ } ++#endif ++ if (isWasmCapiFunction) { ++ // Put the return address in a stack slot. ++ // __ mov(kScratchReg, ra); ++ __ bind(&start_call); ++ __ pcaddi(t7, -4); // __ nal(); ++ //__ nop(); ++ //__ Daddu(ra, ra, offset - 8); // 8 = nop + nal ++ __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); ++ // __ mov(ra, kScratchReg); ++ } ++ if (instr->InputAt(0)->IsImmediate()) { ++ ExternalReference ref = i.InputExternalReference(0); ++ __ CallCFunction(ref, num_parameters); ++ } else { ++ Register func = i.InputRegister(0); ++ __ CallCFunction(func, num_parameters); ++ } ++ if (isWasmCapiFunction) { ++ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); ++ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); ++ } ++ ++ frame_access_state()->SetFrameAccessToDefault(); ++ // Ideally, we should decrement SP delta to match the change of stack ++ // pointer in CallCFunction. However, for certain architectures (e.g. ++ // ARM), there may be more strict alignment requirement, causing old SP ++ // to be saved on the stack. In those cases, we can not calculate the SP ++ // delta statically. ++ frame_access_state()->ClearSPDelta(); ++ if (caller_registers_saved_) { ++ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. ++ // Here, we assume the sequence to be: ++ // kArchSaveCallerRegisters; ++ // kArchCallCFunction; ++ // kArchRestoreCallerRegisters; ++ int bytes = ++ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); ++ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); ++ } ++ break; ++ } ++ case kArchJmp: ++ AssembleArchJump(i.InputRpo(0)); ++ break; ++ case kArchBinarySearchSwitch: ++ AssembleArchBinarySearchSwitch(instr); ++ break; ++ break; ++ case kArchTableSwitch: ++ AssembleArchTableSwitch(instr); ++ break; ++ case kArchAbortCSAAssert: ++ DCHECK(i.InputRegister(0) == a0); ++ { ++ // We don't actually want to generate a pile of code for this, so just ++ // claim there is a stack frame, without generating one. ++ FrameScope scope(tasm(), StackFrame::NONE); ++ __ Call( ++ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), ++ RelocInfo::CODE_TARGET); ++ } ++ __ stop(); ++ break; ++ case kArchDebugBreak: ++ __ DebugBreak(); ++ break; ++ case kArchComment: ++ __ RecordComment(reinterpret_cast(i.InputInt64(0))); ++ break; ++ case kArchNop: ++ case kArchThrowTerminator: ++ // don't emit code for nops. ++ break; ++ case kArchDeoptimize: { ++ DeoptimizationExit* exit = ++ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); ++ CodeGenResult result = AssembleDeoptimizerCall(exit); ++ if (result != kSuccess) return result; ++ break; ++ } ++ case kArchRet: ++ AssembleReturn(instr->InputAt(0)); ++ break; ++ case kArchStackPointerGreaterThan: ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kArchStackCheckOffset: ++ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); ++ break; ++ case kArchFramePointer: ++ __ mov(i.OutputRegister(), fp); ++ break; ++ case kArchParentFramePointer: ++ if (frame_access_state()->has_frame()) { ++ __ Ld_d(i.OutputRegister(), MemOperand(fp, 0)); ++ } else { ++ __ mov(i.OutputRegister(), fp); ++ } ++ break; ++ case kArchTruncateDoubleToI: ++ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), ++ i.InputDoubleRegister(0), DetermineStubCallMode()); ++ break; ++ case kArchStoreWithWriteBarrier: { ++ RecordWriteMode mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ Register object = i.InputRegister(0); ++ Register index = i.InputRegister(1); ++ Register value = i.InputRegister(2); ++ Register scratch0 = i.TempRegister(0); ++ Register scratch1 = i.TempRegister(1); ++ auto ool = new (zone()) ++ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1, ++ mode, DetermineStubCallMode()); ++ __ Add_d(kScratchReg, object, index); ++ __ St_d(value, MemOperand(kScratchReg, 0)); ++ __ CheckPageFlag(object, scratch0, ++ MemoryChunk::kPointersFromHereAreInterestingMask, ne, ++ ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kArchStackSlot: { ++ FrameOffset offset = ++ frame_access_state()->GetFrameOffset(i.InputInt32(0)); ++ Register base_reg = offset.from_stack_pointer() ? sp : fp; ++ __ Add_d(i.OutputRegister(), base_reg, Operand(offset.offset())); ++ int alignment = i.InputInt32(1); ++ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || ++ alignment == 16); ++ if (FLAG_debug_code && alignment > 0) { ++ // Verify that the output_register is properly aligned ++ __ And(kScratchReg, i.OutputRegister(), ++ Operand(kSystemPointerSize - 1)); ++ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, ++ Operand(zero_reg)); ++ } ++ if (alignment == 2 * kSystemPointerSize) { ++ Label done; ++ __ Add_d(kScratchReg, base_reg, Operand(offset.offset())); ++ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); ++ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); ++ __ Add_d(i.OutputRegister(), i.OutputRegister(), kSystemPointerSize); ++ __ bind(&done); ++ } else if (alignment > 2 * kSystemPointerSize) { ++ Label done; ++ __ Add_d(kScratchReg, base_reg, Operand(offset.offset())); ++ __ And(kScratchReg, kScratchReg, Operand(alignment - 1)); ++ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg)); ++ __ li(kScratchReg2, alignment); ++ __ Sub_d(kScratchReg2, kScratchReg2, Operand(kScratchReg)); ++ __ Add_d(i.OutputRegister(), i.OutputRegister(), kScratchReg2); ++ __ bind(&done); ++ } ++ ++ break; ++ } ++ case kArchWordPoisonOnSpeculation: ++ __ And(i.OutputRegister(), i.InputRegister(0), ++ kSpeculationPoisonRegister); ++ break; ++ case kIeee754Float64Acos: ++ ASSEMBLE_IEEE754_UNOP(acos); ++ break; ++ case kIeee754Float64Acosh: ++ ASSEMBLE_IEEE754_UNOP(acosh); ++ break; ++ case kIeee754Float64Asin: ++ ASSEMBLE_IEEE754_UNOP(asin); ++ break; ++ case kIeee754Float64Asinh: ++ ASSEMBLE_IEEE754_UNOP(asinh); ++ break; ++ case kIeee754Float64Atan: ++ ASSEMBLE_IEEE754_UNOP(atan); ++ break; ++ case kIeee754Float64Atanh: ++ ASSEMBLE_IEEE754_UNOP(atanh); ++ break; ++ case kIeee754Float64Atan2: ++ ASSEMBLE_IEEE754_BINOP(atan2); ++ break; ++ case kIeee754Float64Cos: ++ ASSEMBLE_IEEE754_UNOP(cos); ++ break; ++ case kIeee754Float64Cosh: ++ ASSEMBLE_IEEE754_UNOP(cosh); ++ break; ++ case kIeee754Float64Cbrt: ++ ASSEMBLE_IEEE754_UNOP(cbrt); ++ break; ++ case kIeee754Float64Exp: ++ ASSEMBLE_IEEE754_UNOP(exp); ++ break; ++ case kIeee754Float64Expm1: ++ ASSEMBLE_IEEE754_UNOP(expm1); ++ break; ++ case kIeee754Float64Log: ++ ASSEMBLE_IEEE754_UNOP(log); ++ break; ++ case kIeee754Float64Log1p: ++ ASSEMBLE_IEEE754_UNOP(log1p); ++ break; ++ case kIeee754Float64Log2: ++ ASSEMBLE_IEEE754_UNOP(log2); ++ break; ++ case kIeee754Float64Log10: ++ ASSEMBLE_IEEE754_UNOP(log10); ++ break; ++ case kIeee754Float64Pow: ++ ASSEMBLE_IEEE754_BINOP(pow); ++ break; ++ case kIeee754Float64Sin: ++ ASSEMBLE_IEEE754_UNOP(sin); ++ break; ++ case kIeee754Float64Sinh: ++ ASSEMBLE_IEEE754_UNOP(sinh); ++ break; ++ case kIeee754Float64Tan: ++ ASSEMBLE_IEEE754_UNOP(tan); ++ break; ++ case kIeee754Float64Tanh: ++ ASSEMBLE_IEEE754_UNOP(tanh); ++ break; ++ case kLoong64Add: ++ __ Add_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Dadd: ++ __ Add_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64DaddOvf: ++ __ AdddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kLoong64Sub: ++ __ Sub_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Dsub: ++ __ Sub_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64DsubOvf: ++ __ SubdOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kLoong64Mul: ++ __ Mul_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64MulOvf: ++ __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), ++ kScratchReg); ++ break; ++ case kLoong64MulHigh: ++ __ Mulh_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64MulHighU: ++ __ Mulh_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64DMulHigh: ++ __ Mulh_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Div: ++ __ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLoong64DivU: ++ __ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLoong64Mod: ++ __ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64ModU: ++ __ Mod_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Dmul: ++ __ Mul_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Ddiv: ++ __ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLoong64DdivU: ++ __ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ break; ++ case kLoong64Dmod: ++ __ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64DmodU: ++ __ Mod_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Dlsa: ++ DCHECK(instr->InputAt(2)->IsImmediate()); ++ __ Alsl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), ++ i.InputInt8(2), t7); ++ break; ++ case kLoong64Lsa: ++ DCHECK(instr->InputAt(2)->IsImmediate()); ++ __ Alsl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), ++ i.InputInt8(2), t7); ++ break; ++ case kLoong64And: ++ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64And32: ++ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ break; ++ case kLoong64Or: ++ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Or32: ++ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ break; ++ case kLoong64Nor: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ } else { ++ DCHECK_EQ(0, i.InputOperand(1).immediate()); ++ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); ++ } ++ break; ++ case kLoong64Nor32: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ } else { ++ DCHECK_EQ(0, i.InputOperand(1).immediate()); ++ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ } ++ break; ++ case kLoong64Xor: ++ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Xor32: ++ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ __ slli_w(i.OutputRegister(), i.OutputRegister(), 0x0); ++ break; ++ case kLoong64Clz: ++ __ Clz_w(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLoong64Dclz: ++ __ clz_d(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLoong64Ctz: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Ctz_w(dst, src); ++ } break; ++ case kLoong64Dctz: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Ctz_d(dst, src); ++ } break; ++ case kLoong64Popcnt: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Popcnt_w(dst, src); ++ } break; ++ case kLoong64Dpopcnt: { ++ Register src = i.InputRegister(0); ++ Register dst = i.OutputRegister(); ++ __ Popcnt_d(dst, src); ++ } break; ++ case kLoong64Shl: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ sll_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_w(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kLoong64Shr: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ slli_w(i.InputRegister(0), i.InputRegister(0), 0x0); ++ __ srl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_w(i.OutputRegister(), i.InputRegister(0), 0x0); ++ __ srli_w(i.OutputRegister(), i.OutputRegister(), ++ static_cast(imm)); ++ } ++ break; ++ case kLoong64Sar: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ slli_w(i.InputRegister(0), i.InputRegister(0), 0x0); ++ __ sra_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_w(i.OutputRegister(), i.InputRegister(0), 0x0); ++ __ srai_w(i.OutputRegister(), i.OutputRegister(), ++ static_cast(imm)); ++ } ++ break; ++ case kLoong64Ext: ++ __ bstrpick_w(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ break; ++ case kLoong64Ins: ++ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { ++ __ bstrins_w(i.OutputRegister(), zero_reg, ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } else { ++ __ bstrins_w(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } ++ break; ++ case kLoong64Dext: { ++ __ bstrpick_d(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ break; ++ } ++ case kLoong64Dins: ++ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { ++ __ bstrins_d(i.OutputRegister(), zero_reg, ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } else { ++ __ bstrins_d(i.OutputRegister(), i.InputRegister(0), ++ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1)); ++ } ++ break; ++ case kLoong64Dshl: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ sll_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ slli_d(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kLoong64Dshr: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ srl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ srli_d(i.OutputRegister(), i.InputRegister(0), ++ static_cast(imm)); ++ } ++ break; ++ case kLoong64Dsar: ++ if (instr->InputAt(1)->IsRegister()) { ++ __ sra_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); ++ } else { ++ int64_t imm = i.InputOperand(1).immediate(); ++ __ srai_d(i.OutputRegister(), i.InputRegister(0), imm); ++ } ++ break; ++ case kLoong64Ror: ++ __ Rotr_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Dror: ++ __ Rotr_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); ++ break; ++ case kLoong64Tst: ++ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kLoong64Cmp: ++ // Pseudo-instruction used for cmp/branch. No opcode emitted here. ++ break; ++ case kLoong64Mov: ++ // TODO(plind): Should we combine mov/li like this, or use separate instr? ++ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType ++ if (HasRegisterInput(instr, 0)) { ++ __ mov(i.OutputRegister(), i.InputRegister(0)); ++ } else { ++ __ li(i.OutputRegister(), i.InputOperand(0)); ++ } ++ break; ++ ++ case kLoong64CmpS: { ++ FPURegister left = i.InputOrZeroSingleRegister(0); ++ FPURegister right = i.InputOrZeroSingleRegister(1); ++ bool predicate; ++ FPUCondition cc = ++ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); ++ ++ if ((left == kDoubleRegZero || right == kDoubleRegZero) && ++ !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ CompareF32(left, right, cc); ++ } break; ++ case kLoong64AddS: ++ // TODO(plind): add special case: combine mult & add. ++ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64SubS: ++ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64MulS: ++ // TODO(plind): add special case: right op is -1.0, see arm port. ++ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64DivS: ++ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64ModS: { ++ // TODO(bmeurer): We should really get rid of this special instruction, ++ // and generate a CallAddress instruction instead. ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(0, 2, kScratchReg); ++ __ MovToFloatParameters(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) ++ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); ++ // Move the result in the double result register. ++ __ MovFromFloatResult(i.OutputSingleRegister()); ++ break; ++ } ++ case kLoong64AbsS: ++ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kLoong64NegS: ++ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kLoong64SqrtS: { ++ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLoong64MaxS: ++ __ fmax_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64MinS: ++ __ fmin_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64CmpD: { ++ FPURegister left = i.InputOrZeroDoubleRegister(0); ++ FPURegister right = i.InputOrZeroDoubleRegister(1); ++ bool predicate; ++ FPUCondition cc = ++ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); ++ if ((left == kDoubleRegZero || right == kDoubleRegZero) && ++ !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ CompareF64(left, right, cc); ++ } break; ++ case kLoong64AddD: ++ // TODO(plind): add special case: combine mult & add. ++ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64SubD: ++ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64MulD: ++ // TODO(plind): add special case: right op is -1.0, see arm port. ++ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64DivD: ++ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64ModD: { ++ // TODO(bmeurer): We should really get rid of this special instruction, ++ // and generate a CallAddress instruction instead. ++ FrameScope scope(tasm(), StackFrame::MANUAL); ++ __ PrepareCallCFunction(0, 2, kScratchReg); ++ __ MovToFloatParameters(i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); ++ // Move the result in the double result register. ++ __ MovFromFloatResult(i.OutputDoubleRegister()); ++ break; ++ } ++ case kLoong64AbsD: ++ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLoong64NegD: ++ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLoong64SqrtD: { ++ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLoong64MaxD: ++ __ fmax_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64MinD: ++ __ fmin_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), ++ i.InputDoubleRegister(1)); ++ break; ++ case kLoong64Float64RoundDown: { ++ __ Floor_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLoong64Float32RoundDown: { ++ __ Floor_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLoong64Float64RoundTruncate: { ++ __ Trunc_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLoong64Float32RoundTruncate: { ++ __ Trunc_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLoong64Float64RoundUp: { ++ __ Ceil_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLoong64Float32RoundUp: { ++ __ Ceil_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLoong64Float64RoundTiesEven: { ++ __ Round_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ } ++ case kLoong64Float32RoundTiesEven: { ++ __ Round_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); ++ break; ++ } ++ case kLoong64Float32Max: { ++ FPURegister dst = i.OutputSingleRegister(); ++ FPURegister src1 = i.InputSingleRegister(0); ++ FPURegister src2 = i.InputSingleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2); ++ __ Float32Max(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLoong64Float64Max: { ++ FPURegister dst = i.OutputDoubleRegister(); ++ FPURegister src1 = i.InputDoubleRegister(0); ++ FPURegister src2 = i.InputDoubleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2); ++ __ Float64Max(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLoong64Float32Min: { ++ FPURegister dst = i.OutputSingleRegister(); ++ FPURegister src1 = i.InputSingleRegister(0); ++ FPURegister src2 = i.InputSingleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2); ++ __ Float32Min(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLoong64Float64Min: { ++ FPURegister dst = i.OutputDoubleRegister(); ++ FPURegister src1 = i.InputDoubleRegister(0); ++ FPURegister src2 = i.InputDoubleRegister(1); ++ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2); ++ __ Float64Min(dst, src1, src2, ool->entry()); ++ __ bind(ool->exit()); ++ break; ++ } ++ case kLoong64Float64SilenceNaN: ++ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLoong64CvtSD: ++ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLoong64CvtDS: ++ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); ++ break; ++ case kLoong64CvtDW: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_w(scratch, i.InputRegister(0)); ++ __ ffint_d_w(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLoong64CvtSW: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_w(scratch, i.InputRegister(0)); ++ __ ffint_s_w(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLoong64CvtSUw: { ++ __ Ffint_s_uw(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLoong64CvtSL: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_d(scratch, i.InputRegister(0)); ++ __ ffint_s_l(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLoong64CvtDL: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ movgr2fr_d(scratch, i.InputRegister(0)); ++ __ ffint_d_l(i.OutputDoubleRegister(), scratch); ++ break; ++ } ++ case kLoong64CvtDUw: { ++ __ Ffint_d_uw(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLoong64CvtDUl: { ++ __ Ffint_d_ul(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLoong64CvtSUl: { ++ __ Ffint_s_ul(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ } ++ case kLoong64FloorWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrm_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLoong64CeilWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrp_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLoong64RoundWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrne_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLoong64TruncWD: { ++ FPURegister scratch = kScratchDoubleReg; ++ // Other arches use round to zero here, so we follow. ++ __ ftintrz_w_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLoong64FloorWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrm_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLoong64CeilWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrp_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLoong64RoundWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrne_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ break; ++ } ++ case kLoong64TruncWS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ ftintrz_w_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_s(i.OutputRegister(), scratch); ++ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, ++ // because INT32_MIN allows easier out-of-bounds detection. ++ __ addi_w(kScratchReg, i.OutputRegister(), 1); ++ __ slt(kScratchReg2, kScratchReg, i.OutputRegister()); ++ __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2); ++ break; ++ } ++ case kLoong64TruncLS: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register tmp_fcsr = kScratchReg; ++ Register result = kScratchReg2; ++ ++ bool load_status = instr->OutputCount() > 1; ++ if (load_status) { ++ // Save FCSR. ++ __ movfcsr2gr(tmp_fcsr); // __ cfc1(tmp_fcsr, FCSR); ++ // Clear FPU flags. ++ __ movgr2fcsr(zero_reg); // __ ctc1(zero_reg, FCSR); ++ } ++ // Other arches use round to zero here, so we follow. ++ __ ftintrz_l_s(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_d(i.OutputRegister(), scratch); ++ if (load_status) { ++ __ movfcsr2gr(result); // __ cfc1(result, FCSR); ++ // Check for overflow and NaNs. ++ __ And(result, result, ++ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); ++ __ Slt(result, zero_reg, result); ++ __ xori(result, result, 1); ++ __ mov(i.OutputRegister(1), result); ++ // Restore FCSR ++ __ movgr2fcsr(tmp_fcsr); // __ ctc1(tmp_fcsr, FCSR); ++ } ++ break; ++ } ++ case kLoong64TruncLD: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register tmp_fcsr = kScratchReg; ++ Register result = kScratchReg2; ++ ++ bool load_status = instr->OutputCount() > 1; ++ if (load_status) { ++ // Save FCSR. ++ __ movfcsr2gr(tmp_fcsr); // __ cfc1(tmp_fcsr, FCSR); ++ // Clear FPU flags. ++ __ movgr2fcsr(zero_reg); // __ ctc1(zero_reg, FCSR); ++ } ++ // Other arches use round to zero here, so we follow. ++ __ ftintrz_l_d(scratch, i.InputDoubleRegister(0)); ++ __ movfr2gr_d(i.OutputRegister(0), scratch); ++ if (load_status) { ++ __ movfcsr2gr(result); // __ cfc1(result, FCSR); ++ // Check for overflow and NaNs. ++ __ And(result, result, ++ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); ++ __ Slt(result, zero_reg, result); ++ __ xori(result, result, 1); ++ __ mov(i.OutputRegister(1), result); ++ // Restore FCSR ++ __ movgr2fcsr(tmp_fcsr); // __ ctc1(tmp_fcsr, FCSR); ++ } ++ break; ++ } ++ case kLoong64TruncUwD: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ Ftintrz_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch); ++ break; ++ } ++ case kLoong64TruncUwS: { ++ FPURegister scratch = kScratchDoubleReg; ++ __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); ++ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, ++ // because 0 allows easier out-of-bounds detection. ++ __ addi_w(kScratchReg, i.OutputRegister(), 1); ++ __ Movz(i.OutputRegister(), zero_reg, kScratchReg); ++ break; ++ } ++ case kLoong64TruncUlS: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; ++ __ Ftintrz_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch, ++ result); ++ break; ++ } ++ case kLoong64TruncUlD: { ++ FPURegister scratch = kScratchDoubleReg; ++ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; ++ __ Ftintrz_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch, ++ result); ++ break; ++ } ++ case kLoong64BitcastDL: ++ __ movfr2gr_d(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLoong64BitcastLD: ++ __ movgr2fr_d(i.OutputDoubleRegister(), i.InputRegister(0)); ++ break; ++ case kLoong64Float64ExtractLowWord32: ++ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLoong64Float64ExtractHighWord32: ++ __ movfrh2gr_s(i.OutputRegister(), i.InputDoubleRegister(0)); ++ break; ++ case kLoong64Float64InsertLowWord32: ++ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); ++ break; ++ case kLoong64Float64InsertHighWord32: ++ __ movgr2frh_w(i.OutputDoubleRegister(), i.InputRegister(1)); ++ break; ++ // ... more basic instructions ... ++ ++ case kLoong64Seb: ++ __ ext_w_b(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLoong64Seh: ++ __ ext_w_h(i.OutputRegister(), i.InputRegister(0)); ++ break; ++ case kLoong64Lbu: ++ __ Ld_bu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Lb: ++ __ Ld_b(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Sb: ++ __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLoong64Lhu: ++ __ Ld_hu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Ulhu: ++ __ Ld_hu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Lh: ++ __ Ld_h(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Ulh: ++ __ Ld_h(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Sh: ++ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLoong64Ush: ++ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLoong64Lw: ++ __ Ld_w(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Ulw: ++ __ Ld_w(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Lwu: ++ __ Ld_wu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Ulwu: ++ __ Ld_wu(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Ld: ++ __ Ld_d(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Uld: ++ __ Ld_d(i.OutputRegister(), i.MemoryOperand()); ++ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); ++ break; ++ case kLoong64Sw: ++ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLoong64Usw: ++ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLoong64Sd: ++ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLoong64Usd: ++ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand()); ++ break; ++ case kLoong64Lwc1: { ++ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand()); ++ break; ++ } ++ case kLoong64Ulwc1: { ++ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand()); ++ break; ++ } ++ case kLoong64Swc1: { ++ size_t index = 0; ++ MemOperand operand = i.MemoryOperand(&index); ++ FPURegister ft = i.InputOrZeroSingleRegister(index); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_s(ft, operand); ++ break; ++ } ++ case kLoong64Uswc1: { ++ size_t index = 0; ++ MemOperand operand = i.MemoryOperand(&index); ++ FPURegister ft = i.InputOrZeroSingleRegister(index); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_s(ft, operand); ++ break; ++ } ++ case kLoong64Ldc1: ++ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand()); ++ break; ++ case kLoong64Uldc1: ++ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand()); ++ break; ++ case kLoong64Sdc1: { ++ FPURegister ft = i.InputOrZeroDoubleRegister(2); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_d(ft, i.MemoryOperand()); ++ break; ++ } ++ case kLoong64Usdc1: { ++ FPURegister ft = i.InputOrZeroDoubleRegister(2); ++ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ ++ __ Fst_d(ft, i.MemoryOperand()); ++ break; ++ } ++ case kLoong64Sync: { ++ __ dbar(0); ++ break; ++ } ++ case kLoong64Push: ++ if (instr->InputAt(0)->IsFPRegister()) { ++ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); ++ __ Sub_d(sp, sp, Operand(kDoubleSize)); ++ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize); ++ } else { ++ __ Push(i.InputRegister(0)); ++ frame_access_state()->IncreaseSPDelta(1); ++ } ++ break; ++ case kLoong64Peek: { ++ // The incoming value is 0-based, but we need a 1-based value. ++ int reverse_slot = i.InputInt32(0) + 1; ++ int offset = ++ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); ++ if (instr->OutputAt(0)->IsFPRegister()) { ++ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); ++ if (op->representation() == MachineRepresentation::kFloat64) { ++ __ Fld_d(i.OutputDoubleRegister(), MemOperand(fp, offset)); ++ } else { ++ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32); ++ __ Fld_s( ++ i.OutputSingleRegister(0), ++ MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset)); ++ } ++ } else { ++ __ Ld_d(i.OutputRegister(0), MemOperand(fp, offset)); ++ } ++ break; ++ } ++ case kLoong64StackClaim: { ++ __ Sub_d(sp, sp, Operand(i.InputInt32(0))); ++ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / ++ kSystemPointerSize); ++ break; ++ } ++ case kLoong64StoreToStackSlot: { ++ if (instr->InputAt(0)->IsFPRegister()) { ++ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); ++ } else { ++ __ St_d(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); ++ } ++ break; ++ } ++ case kLoong64ByteSwap64: { ++ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8); ++ break; ++ } ++ case kLoong64ByteSwap32: { ++ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4); ++ break; ++ } ++ case kWord32AtomicLoadInt8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_b); ++ break; ++ case kWord32AtomicLoadUint8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu); ++ break; ++ case kWord32AtomicLoadInt16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_h); ++ break; ++ case kWord32AtomicLoadUint16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu); ++ break; ++ case kWord32AtomicLoadWord32: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_w); ++ break; ++ case kLoong64Word64AtomicLoadUint8: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu); ++ break; ++ case kLoong64Word64AtomicLoadUint16: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu); ++ break; ++ case kLoong64Word64AtomicLoadUint32: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_wu); ++ break; ++ case kLoong64Word64AtomicLoadUint64: ++ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_d); ++ break; ++ case kWord32AtomicStoreWord8: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b); ++ break; ++ case kWord32AtomicStoreWord16: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h); ++ break; ++ case kWord32AtomicStoreWord32: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w); ++ break; ++ case kLoong64Word64AtomicStoreWord8: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b); ++ break; ++ case kLoong64Word64AtomicStoreWord16: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h); ++ break; ++ case kLoong64Word64AtomicStoreWord32: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w); ++ break; ++ case kLoong64Word64AtomicStoreWord64: ++ ASSEMBLE_ATOMIC_STORE_INTEGER(St_d); ++ break; ++ case kWord32AtomicExchangeInt8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32); ++ break; ++ case kWord32AtomicExchangeUint8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32); ++ break; ++ case kWord32AtomicExchangeInt16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32); ++ break; ++ case kWord32AtomicExchangeUint16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32); ++ break; ++ case kWord32AtomicExchangeWord32: ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amswap_db_w(i.OutputRegister(0), i.InputRegister(2), ++ i.TempRegister(0)); ++ break; ++ case kLoong64Word64AtomicExchangeUint8: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64); ++ break; ++ case kLoong64Word64AtomicExchangeUint16: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64); ++ break; ++ case kLoong64Word64AtomicExchangeUint32: ++ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64); ++ break; ++ case kLoong64Word64AtomicExchangeUint64: ++ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amswap_db_d(i.OutputRegister(0), i.InputRegister(2), ++ i.TempRegister(0)); ++ break; ++ case kWord32AtomicCompareExchangeInt8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32); ++ break; ++ case kWord32AtomicCompareExchangeUint8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32); ++ break; ++ case kWord32AtomicCompareExchangeInt16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32); ++ break; ++ case kWord32AtomicCompareExchangeUint16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32); ++ break; ++ case kWord32AtomicCompareExchangeWord32: ++ __ slli_w(i.InputRegister(2), i.InputRegister(2), 0); ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w); ++ break; ++ case kLoong64Word64AtomicCompareExchangeUint8: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64); ++ break; ++ case kLoong64Word64AtomicCompareExchangeUint16: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64); ++ break; ++ case kLoong64Word64AtomicCompareExchangeUint32: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64); ++ break; ++ case kLoong64Word64AtomicCompareExchangeUint64: ++ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d); ++ break; ++ case kWord32AtomicAddWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amadd_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kWord32AtomicSubWord32: ++ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w); ++ break; ++ case kWord32AtomicAndWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amand_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kWord32AtomicOrWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amor_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kWord32AtomicXorWord32: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amxor_db_w(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++#define ATOMIC_BINOP_CASE(op, inst) \ ++ case kWord32Atomic##op##Int8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Uint8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Int16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst, 32); \ ++ break; \ ++ case kWord32Atomic##op##Uint16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst, 32); \ ++ break; ++ ATOMIC_BINOP_CASE(Add, Add_w) ++ ATOMIC_BINOP_CASE(Sub, Sub_w) ++ ATOMIC_BINOP_CASE(And, And) ++ ATOMIC_BINOP_CASE(Or, Or) ++ ATOMIC_BINOP_CASE(Xor, Xor) ++#undef ATOMIC_BINOP_CASE ++ ++ case kLoong64Word64AtomicAddUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amadd_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kLoong64Word64AtomicSubUint64: ++ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d); ++ break; ++ case kLoong64Word64AtomicAndUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amand_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kLoong64Word64AtomicOrUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++ case kLoong64Word64AtomicXorUint64: ++ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); ++ __ amxor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0)); ++ break; ++#define ATOMIC_BINOP_CASE(op, inst) \ ++ case kLoong64Word64Atomic##op##Uint8: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst, 64); \ ++ break; \ ++ case kLoong64Word64Atomic##op##Uint16: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst, 64); \ ++ break; \ ++ case kLoong64Word64Atomic##op##Uint32: \ ++ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, inst, 64); \ ++ break; ++ ATOMIC_BINOP_CASE(Add, Add_d) ++ ATOMIC_BINOP_CASE(Sub, Sub_d) ++ ATOMIC_BINOP_CASE(And, And) ++ ATOMIC_BINOP_CASE(Or, Or) ++ ATOMIC_BINOP_CASE(Xor, Xor) ++#undef ATOMIC_BINOP_CASE ++ case kLoong64AssertEqual: ++ __ Assert(eq, static_cast(i.InputOperand(2).immediate()), ++ i.InputRegister(0), Operand(i.InputRegister(1))); ++ break; ++ case kLoong64S128Zero: ++ case kLoong64I32x4Splat: ++ case kLoong64I32x4ExtractLane: ++ case kLoong64I32x4AddHoriz: ++ case kLoong64I32x4Add: ++ case kLoong64I32x4ReplaceLane: ++ case kLoong64I32x4Sub: ++ case kLoong64F64x2Abs: ++ default: ++ break; ++ } ++ return kSuccess; ++} // NOLINT(readability/fn_size) ++ ++#define UNSUPPORTED_COND(opcode, condition) \ ++ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ ++ << "\""; \ ++ UNIMPLEMENTED(); ++ ++void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, ++ Instruction* instr, FlagsCondition condition, ++ Label* tlabel, Label* flabel, bool fallthru) { ++#undef __ ++#define __ tasm-> ++ Loong64OperandConverter i(gen, instr); ++ ++ Condition cc = kNoCondition; ++ // LOONG64 does not have condition code flags, so compare and branch are ++ // implemented differently than on the other arch's. The compare operations ++ // emit loong64 pseudo-instructions, which are handled here by branch ++ // instructions that do the actual comparison. Essential that the input ++ // registers to compare pseudo-op are not modified before this branch op, as ++ // they are tested here. ++ ++ if (instr->arch_opcode() == kLoong64Tst) { ++ cc = FlagsConditionToConditionTst(condition); ++ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg)); ++ } else if (instr->arch_opcode() == kLoong64Dadd || ++ instr->arch_opcode() == kLoong64Dsub) { ++ cc = FlagsConditionToConditionOvf(condition); ++ __ srai_d(kScratchReg, i.OutputRegister(), 32); ++ __ srai_w(kScratchReg2, i.OutputRegister(), 31); ++ __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg)); ++ } else if (instr->arch_opcode() == kLoong64DaddOvf || ++ instr->arch_opcode() == kLoong64DsubOvf) { ++ switch (condition) { ++ // Overflow occurs if overflow register is negative ++ case kOverflow: ++ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg)); ++ break; ++ case kNotOverflow: ++ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg)); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ break; ++ } ++ } else if (instr->arch_opcode() == kLoong64MulOvf) { ++ // Overflow occurs if overflow register is not zero ++ switch (condition) { ++ case kOverflow: ++ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg)); ++ break; ++ case kNotOverflow: ++ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg)); ++ break; ++ default: ++ UNSUPPORTED_COND(kLoong64MulOvf, condition); ++ break; ++ } ++ } else if (instr->arch_opcode() == kLoong64Cmp) { ++ cc = FlagsConditionToConditionCmp(condition); ++ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); ++ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { ++ cc = FlagsConditionToConditionCmp(condition); ++ Register lhs_register = sp; ++ uint32_t offset; ++ if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) { ++ lhs_register = i.TempRegister(0); ++ __ Sub_d(lhs_register, sp, offset); ++ } ++ __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0))); ++ } else if (instr->arch_opcode() == kLoong64CmpS || ++ instr->arch_opcode() == kLoong64CmpD) { ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ if (predicate) { ++ __ BranchTrueF(tlabel); ++ } else { ++ __ BranchFalseF(tlabel); ++ } ++ } else { ++ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", ++ instr->arch_opcode()); ++ UNIMPLEMENTED(); ++ } ++ if (!fallthru) __ Branch(flabel); // no fallthru to flabel. ++#undef __ ++#define __ tasm()-> ++} ++ ++// Assembles branches after an instruction. ++void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { ++ Label* tlabel = branch->true_label; ++ Label* flabel = branch->false_label; ++ ++ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, ++ branch->fallthru); ++} ++ ++void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, ++ Instruction* instr) { ++ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). ++ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { ++ return; ++ } ++ ++ Loong64OperandConverter i(this, instr); ++ condition = NegateFlagsCondition(condition); ++ ++ switch (instr->arch_opcode()) { ++ case kLoong64Cmp: { ++ __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0), ++ i.InputOperand(1), ++ FlagsConditionToConditionCmp(condition)); ++ } ++ return; ++ case kLoong64Tst: { ++ switch (condition) { ++ case kEqual: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); ++ break; ++ case kNotEqual: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return; ++ case kLoong64Dadd: ++ case kLoong64Dsub: { ++ // Check for overflow creates 1 or 0 for result. ++ __ srli_d(kScratchReg, i.OutputRegister(), 63); ++ __ srli_w(kScratchReg2, i.OutputRegister(), 31); ++ __ xor_(kScratchReg2, kScratchReg, kScratchReg2); ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg2); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kLoong64DaddOvf: ++ case kLoong64DsubOvf: { ++ // Overflow occurs if overflow register is negative ++ __ Slt(kScratchReg2, kScratchReg, zero_reg); ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg2); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kLoong64MulOvf: { ++ // Overflow occurs if overflow register is not zero ++ switch (condition) { ++ case kOverflow: ++ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, ++ kScratchReg); ++ break; ++ case kNotOverflow: ++ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); ++ break; ++ default: ++ UNSUPPORTED_COND(instr->arch_opcode(), condition); ++ } ++ } ++ return; ++ case kLoong64CmpS: ++ case kLoong64CmpD: { ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ if (predicate) { ++ __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); ++ } else { ++ __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister); ++ } ++ } ++ return; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++#undef UNSUPPORTED_COND ++ ++void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, ++ BranchInfo* branch) { ++ AssembleArchBranch(instr, branch); ++} ++ ++void CodeGenerator::AssembleArchJump(RpoNumber target) { ++ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target)); ++} ++ ++void CodeGenerator::AssembleArchTrap(Instruction* instr, ++ FlagsCondition condition) { ++ class OutOfLineTrap final : public OutOfLineCode { ++ public: ++ OutOfLineTrap(CodeGenerator* gen, Instruction* instr) ++ : OutOfLineCode(gen), instr_(instr), gen_(gen) {} ++ void Generate() final { ++ Loong64OperandConverter i(gen_, instr_); ++ TrapId trap_id = ++ static_cast(i.InputInt32(instr_->InputCount() - 1)); ++ GenerateCallToTrap(trap_id); ++ } ++ ++ private: ++ void GenerateCallToTrap(TrapId trap_id) { ++ if (trap_id == TrapId::kInvalid) { ++ // We cannot test calls to the runtime in cctest/test-run-wasm. ++ // Therefore we emit a call to C here instead of a call to the runtime. ++ // We use the context register as the scratch register, because we do ++ // not have a context here. ++ __ PrepareCallCFunction(0, 0, cp); ++ __ CallCFunction( ++ ExternalReference::wasm_call_trap_callback_for_testing(), 0); ++ __ LeaveFrame(StackFrame::WASM); ++ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); ++ int pop_count = ++ static_cast(call_descriptor->StackParameterCount()); ++ pop_count += (pop_count & 1); // align ++ __ Drop(pop_count); ++ __ Ret(); ++ } else { ++ gen_->AssembleSourcePosition(instr_); ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched when the code ++ // is added to the native module and copied into wasm code space. ++ __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); ++ ReferenceMap* reference_map = ++ new (gen_->zone()) ReferenceMap(gen_->zone()); ++ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); ++ if (FLAG_debug_code) { ++ __ stop(); ++ } ++ } ++ } ++ Instruction* instr_; ++ CodeGenerator* gen_; ++ }; ++ auto ool = new (zone()) OutOfLineTrap(this, instr); ++ Label* tlabel = ool->entry(); ++ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); ++} ++ ++// Assembles boolean materializations after an instruction. ++void CodeGenerator::AssembleArchBoolean(Instruction* instr, ++ FlagsCondition condition) { ++ Loong64OperandConverter i(this, instr); ++ ++ // Materialize a full 32-bit 1 or 0 value. The result register is always the ++ // last output of the instruction. ++ DCHECK_NE(0u, instr->OutputCount()); ++ Register result = i.OutputRegister(instr->OutputCount() - 1); ++ Condition cc = kNoCondition; ++ // Loong64 does not have condition code flags, so compare and branch are ++ // implemented differently than on the other arch's. The compare operations ++ // emit loong64 pseudo-instructions, which are checked and handled here. ++ ++ if (instr->arch_opcode() == kLoong64Tst) { ++ cc = FlagsConditionToConditionTst(condition); ++ if (cc == eq) { ++ __ Sltu(result, kScratchReg, 1); ++ } else { ++ __ Sltu(result, zero_reg, kScratchReg); ++ } ++ return; ++ } else if (instr->arch_opcode() == kLoong64Dadd || ++ instr->arch_opcode() == kLoong64Dsub) { ++ cc = FlagsConditionToConditionOvf(condition); ++ // Check for overflow creates 1 or 0 for result. ++ __ srli_d(kScratchReg, i.OutputRegister(), 63); ++ __ srli_w(kScratchReg2, i.OutputRegister(), 31); ++ __ xor_(result, kScratchReg, kScratchReg2); ++ if (cc == eq) // Toggle result for not overflow. ++ __ xori(result, result, 1); ++ return; ++ } else if (instr->arch_opcode() == kLoong64DaddOvf || ++ instr->arch_opcode() == kLoong64DsubOvf) { ++ // Overflow occurs if overflow register is negative ++ __ slt(result, kScratchReg, zero_reg); ++ } else if (instr->arch_opcode() == kLoong64MulOvf) { ++ // Overflow occurs if overflow register is not zero ++ __ Sgtu(result, kScratchReg, zero_reg); ++ } else if (instr->arch_opcode() == kLoong64Cmp) { ++ cc = FlagsConditionToConditionCmp(condition); ++ switch (cc) { ++ case eq: ++ case ne: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ if (instr->InputAt(1)->IsImmediate()) { ++ if (is_int12(-right.immediate())) { ++ if (right.immediate() == 0) { ++ if (cc == eq) { ++ __ Sltu(result, left, 1); ++ } else { ++ __ Sltu(result, zero_reg, left); ++ } ++ } else { ++ __ Add_d(result, left, Operand(-right.immediate())); ++ if (cc == eq) { ++ __ Sltu(result, result, 1); ++ } else { ++ __ Sltu(result, zero_reg, result); ++ } ++ } ++ } else { ++ if (is_uint12(right.immediate())) { ++ __ Xor(result, left, right); ++ } else { ++ __ li(kScratchReg, right); ++ __ Xor(result, left, kScratchReg); ++ } ++ if (cc == eq) { ++ __ Sltu(result, result, 1); ++ } else { ++ __ Sltu(result, zero_reg, result); ++ } ++ } ++ } else { ++ __ Xor(result, left, right); ++ if (cc == eq) { ++ __ Sltu(result, result, 1); ++ } else { ++ __ Sltu(result, zero_reg, result); ++ } ++ } ++ } break; ++ case lt: ++ case ge: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ __ Slt(result, left, right); ++ if (cc == ge) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ case gt: ++ case le: { ++ Register left = i.InputRegister(1); ++ Operand right = i.InputOperand(0); ++ __ Slt(result, left, right); ++ if (cc == le) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ case lo: ++ case hs: { ++ Register left = i.InputRegister(0); ++ Operand right = i.InputOperand(1); ++ __ Sltu(result, left, right); ++ if (cc == hs) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ case hi: ++ case ls: { ++ Register left = i.InputRegister(1); ++ Operand right = i.InputOperand(0); ++ __ Sltu(result, left, right); ++ if (cc == ls) { ++ __ xori(result, result, 1); ++ } ++ } break; ++ default: ++ UNREACHABLE(); ++ } ++ return; ++ } else if (instr->arch_opcode() == kLoong64CmpD || ++ instr->arch_opcode() == kLoong64CmpS) { ++ FPURegister left = i.InputOrZeroDoubleRegister(0); ++ FPURegister right = i.InputOrZeroDoubleRegister(1); ++ if ((left == kDoubleRegZero || right == kDoubleRegZero) && ++ !__ IsDoubleZeroRegSet()) { ++ __ Move(kDoubleRegZero, 0.0); ++ } ++ bool predicate; ++ FlagsConditionToConditionCmpFPU(&predicate, condition); ++ { ++ __ movcf2gr(result, FCC0); ++ if (!predicate) { ++ __ xori(result, result, 1); ++ } ++ } ++ return; ++ } else { ++ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", ++ instr->arch_opcode()); ++ TRACE_UNIMPL(); ++ UNIMPLEMENTED(); ++ } ++} ++ ++void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { ++ Loong64OperandConverter i(this, instr); ++ Register input = i.InputRegister(0); ++ std::vector> cases; ++ for (size_t index = 2; index < instr->InputCount(); index += 2) { ++ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); ++ } ++ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), ++ cases.data() + cases.size()); ++} ++ ++void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { ++ Loong64OperandConverter i(this, instr); ++ Register input = i.InputRegister(0); ++ size_t const case_count = instr->InputCount() - 2; ++ ++ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count)); ++ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { ++ return GetLabel(i.InputRpo(index + 2)); ++ }); ++} ++ ++void CodeGenerator::FinishFrame(Frame* frame) { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ if (saves_fpu != 0) { ++ int count = base::bits::CountPopulation(saves_fpu); ++ DCHECK_EQ(kNumCalleeSavedFPU, count); ++ frame->AllocateSavedCalleeRegisterSlots(count * ++ (kDoubleSize / kSystemPointerSize)); ++ } ++ ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ if (saves != 0) { ++ int count = base::bits::CountPopulation(saves); ++ DCHECK_EQ(kNumCalleeSaved, count + 1); ++ frame->AllocateSavedCalleeRegisterSlots(count); ++ } ++} ++ ++void CodeGenerator::AssembleConstructFrame() { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ if (frame_access_state()->has_frame()) { ++ if (call_descriptor->IsCFunctionCall()) { ++ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { ++ __ StubPrologue(StackFrame::C_WASM_ENTRY); ++ // Reserve stack space for saving the c_entry_fp later. ++ __ Sub_d(sp, sp, Operand(kSystemPointerSize)); ++ } else { ++ __ Push(ra, fp); ++ __ mov(fp, sp); ++ } ++ } else if (call_descriptor->IsJSFunctionCall()) { ++ __ Prologue(); ++ if (call_descriptor->PushArgumentCount()) { ++ __ Push(kJavaScriptCallArgCountRegister); ++ } ++ } else { ++ __ StubPrologue(info()->GetOutputStackFrameType()); ++ if (call_descriptor->IsWasmFunctionCall()) { ++ __ Push(kWasmInstanceRegister); ++ } else if (call_descriptor->IsWasmImportWrapper() || ++ call_descriptor->IsWasmCapiFunction()) { ++ // Wasm import wrappers are passed a tuple in the place of the instance. ++ // Unpack the tuple into the instance and the target callable. ++ // This must be done here in the codegen because it cannot be expressed ++ // properly in the graph. ++ __ Ld_d(kJSFunctionRegister, ++ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset)); ++ __ Ld_d(kWasmInstanceRegister, ++ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); ++ __ Push(kWasmInstanceRegister); ++ if (call_descriptor->IsWasmCapiFunction()) { ++ // Reserve space for saving the PC later. ++ __ Sub_d(sp, sp, Operand(kSystemPointerSize)); ++ } ++ } ++ } ++ } ++ ++ int required_slots = ++ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); ++ ++ if (info()->is_osr()) { ++ // TurboFan OSR-compiled functions cannot be entered directly. ++ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); ++ ++ // Unoptimized code jumps directly to this entrypoint while the unoptimized ++ // frame is still on the stack. Optimized code uses OSR values directly from ++ // the unoptimized frame. Thus, all that needs to be done is to allocate the ++ // remaining stack slots. ++ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); ++ osr_pc_offset_ = __ pc_offset(); ++ required_slots -= osr_helper()->UnoptimizedFrameSlots(); ++ ResetSpeculationPoison(); ++ } ++ ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ ++ if (required_slots > 0) { ++ DCHECK(frame_access_state()->has_frame()); ++ if (info()->IsWasm() && required_slots > 128) { ++ // For WebAssembly functions with big frames we have to do the stack ++ // overflow check before we construct the frame. Otherwise we may not ++ // have enough space on the stack to call the runtime for the stack ++ // overflow. ++ Label done; ++ ++ // If the frame is bigger than the stack, we throw the stack overflow ++ // exception unconditionally. Thereby we can avoid the integer overflow ++ // check in the condition code. ++ if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) { ++ __ Ld_d( ++ kScratchReg, ++ FieldMemOperand(kWasmInstanceRegister, ++ WasmInstanceObject::kRealStackLimitAddressOffset)); ++ __ Ld_d(kScratchReg, MemOperand(kScratchReg, 0)); ++ __ Add_d(kScratchReg, kScratchReg, ++ Operand(required_slots * kSystemPointerSize)); ++ __ Branch(&done, uge, sp, Operand(kScratchReg)); ++ } ++ ++ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); ++ // We come from WebAssembly, there are no references for the GC. ++ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); ++ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); ++ if (FLAG_debug_code) { ++ __ stop(); ++ } ++ ++ __ bind(&done); ++ } ++ } ++ ++ const int returns = frame()->GetReturnSlotCount(); ++ ++ // Skip callee-saved and return slots, which are pushed below. ++ required_slots -= base::bits::CountPopulation(saves); ++ required_slots -= base::bits::CountPopulation(saves_fpu); ++ required_slots -= returns; ++ if (required_slots > 0) { ++ __ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize)); ++ } ++ ++ if (saves_fpu != 0) { ++ // Save callee-saved FPU registers. ++ __ MultiPushFPU(saves_fpu); ++ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu)); ++ } ++ ++ if (saves != 0) { ++ // Save callee-saved registers. ++ __ MultiPush(saves); ++ DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1); ++ } ++ ++ if (returns != 0) { ++ // Create space for returns. ++ __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize)); ++ } ++} ++ ++void CodeGenerator::AssembleReturn(InstructionOperand* pop) { ++ auto call_descriptor = linkage()->GetIncomingDescriptor(); ++ ++ const int returns = frame()->GetReturnSlotCount(); ++ if (returns != 0) { ++ __ Add_d(sp, sp, Operand(returns * kSystemPointerSize)); ++ } ++ ++ // Restore GP registers. ++ const RegList saves = call_descriptor->CalleeSavedRegisters(); ++ if (saves != 0) { ++ __ MultiPop(saves); ++ } ++ ++ // Restore FPU registers. ++ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); ++ if (saves_fpu != 0) { ++ __ MultiPopFPU(saves_fpu); ++ } ++ ++ Loong64OperandConverter g(this, nullptr); ++ if (call_descriptor->IsCFunctionCall()) { ++ AssembleDeconstructFrame(); ++ } else if (frame_access_state()->has_frame()) { ++ // Canonicalize JSFunction return sites for now unless they have an variable ++ // number of stack slot pops. ++ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) { ++ if (return_label_.is_bound()) { ++ __ Branch(&return_label_); ++ return; ++ } else { ++ __ bind(&return_label_); ++ AssembleDeconstructFrame(); ++ } ++ } else { ++ AssembleDeconstructFrame(); ++ } ++ } ++ int pop_count = static_cast(call_descriptor->StackParameterCount()); ++ if (pop->IsImmediate()) { ++ pop_count += g.ToConstant(pop).ToInt32(); ++ } else { ++ Register pop_reg = g.ToRegister(pop); ++ __ slli_d(pop_reg, pop_reg, kSystemPointerSizeLog2); ++ __ Add_d(sp, sp, pop_reg); ++ } ++ if (pop_count != 0) { ++ __ DropAndRet(pop_count); ++ } else { ++ __ Ret(); ++ } ++} ++ ++void CodeGenerator::FinishCode() {} ++ ++void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {} ++ ++void CodeGenerator::AssembleMove(InstructionOperand* source, ++ InstructionOperand* destination) { ++ Loong64OperandConverter g(this, nullptr); ++ // Dispatch on the source and destination operand kinds. Not all ++ // combinations are possible. ++ if (source->IsRegister()) { ++ DCHECK(destination->IsRegister() || destination->IsStackSlot()); ++ Register src = g.ToRegister(source); ++ if (destination->IsRegister()) { ++ __ mov(g.ToRegister(destination), src); ++ } else { ++ __ St_d(src, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsStackSlot()) { ++ DCHECK(destination->IsRegister() || destination->IsStackSlot()); ++ MemOperand src = g.ToMemOperand(source); ++ if (destination->IsRegister()) { ++ __ Ld_d(g.ToRegister(destination), src); ++ } else { ++ Register temp = kScratchReg; ++ __ Ld_d(temp, src); ++ __ St_d(temp, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsConstant()) { ++ Constant src = g.ToConstant(source); ++ if (destination->IsRegister() || destination->IsStackSlot()) { ++ Register dst = ++ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; ++ switch (src.type()) { ++ case Constant::kInt32: ++ __ li(dst, Operand(src.ToInt32())); ++ break; ++ case Constant::kFloat32: ++ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32())); ++ break; ++ case Constant::kInt64: ++ if (RelocInfo::IsWasmReference(src.rmode())) { ++ __ li(dst, Operand(src.ToInt64(), src.rmode())); ++ } else { ++ __ li(dst, Operand(src.ToInt64())); ++ } ++ break; ++ case Constant::kFloat64: ++ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value())); ++ break; ++ case Constant::kExternalReference: ++ __ li(dst, src.ToExternalReference()); ++ break; ++ case Constant::kDelayedStringConstant: ++ __ li(dst, src.ToDelayedStringConstant()); ++ break; ++ case Constant::kHeapObject: { ++ Handle src_object = src.ToHeapObject(); ++ RootIndex index; ++ if (IsMaterializableFromRoot(src_object, &index)) { ++ __ LoadRoot(dst, index); ++ } else { ++ __ li(dst, src_object); ++ } ++ break; ++ } ++ case Constant::kCompressedHeapObject: ++ UNREACHABLE(); ++ case Constant::kRpoNumber: ++ UNREACHABLE(); // TODO(titzer): loading RPO numbers on LOONG64. ++ break; ++ } ++ if (destination->IsStackSlot()) __ St_d(dst, g.ToMemOperand(destination)); ++ } else if (src.type() == Constant::kFloat32) { ++ if (destination->IsFPStackSlot()) { ++ MemOperand dst = g.ToMemOperand(destination); ++ if (bit_cast(src.ToFloat32()) == 0) { ++ __ St_d(zero_reg, dst); ++ } else { ++ __ li(kScratchReg, Operand(bit_cast(src.ToFloat32()))); ++ __ St_d(kScratchReg, dst); ++ } ++ } else { ++ DCHECK(destination->IsFPRegister()); ++ FloatRegister dst = g.ToSingleRegister(destination); ++ __ Move(dst, src.ToFloat32()); ++ } ++ } else { ++ DCHECK_EQ(Constant::kFloat64, src.type()); ++ DoubleRegister dst = destination->IsFPRegister() ++ ? g.ToDoubleRegister(destination) ++ : kScratchDoubleReg; ++ __ Move(dst, src.ToFloat64().value()); ++ if (destination->IsFPStackSlot()) { ++ __ Fst_d(dst, g.ToMemOperand(destination)); ++ } ++ } ++ } else if (source->IsFPRegister()) { ++ FPURegister src = g.ToDoubleRegister(source); ++ if (destination->IsFPRegister()) { ++ FPURegister dst = g.ToDoubleRegister(destination); ++ __ Move(dst, src); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ __ Fst_d(src, g.ToMemOperand(destination)); ++ } ++ } else if (source->IsFPStackSlot()) { ++ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); ++ MemOperand src = g.ToMemOperand(source); ++ if (destination->IsFPRegister()) { ++ __ Fld_d(g.ToDoubleRegister(destination), src); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ FPURegister temp = kScratchDoubleReg; ++ __ Fld_d(temp, src); ++ __ Fst_d(temp, g.ToMemOperand(destination)); ++ } ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void CodeGenerator::AssembleSwap(InstructionOperand* source, ++ InstructionOperand* destination) { ++ Loong64OperandConverter g(this, nullptr); ++ // Dispatch on the source and destination operand kinds. Not all ++ // combinations are possible. ++ if (source->IsRegister()) { ++ // Register-register. ++ Register temp = kScratchReg; ++ Register src = g.ToRegister(source); ++ if (destination->IsRegister()) { ++ Register dst = g.ToRegister(destination); ++ __ Move(temp, src); ++ __ Move(src, dst); ++ __ Move(dst, temp); ++ } else { ++ DCHECK(destination->IsStackSlot()); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ mov(temp, src); ++ __ Ld_d(src, dst); ++ __ St_d(temp, dst); ++ } ++ } else if (source->IsStackSlot()) { ++ DCHECK(destination->IsStackSlot()); ++ Register temp_0 = kScratchReg; ++ Register temp_1 = kScratchReg2; ++ MemOperand src = g.ToMemOperand(source); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ Ld_d(temp_0, src); ++ __ Ld_d(temp_1, dst); ++ __ St_d(temp_0, dst); ++ __ St_d(temp_1, src); ++ } else if (source->IsFPRegister()) { ++ FPURegister temp = kScratchDoubleReg; ++ FPURegister src = g.ToDoubleRegister(source); ++ if (destination->IsFPRegister()) { ++ FPURegister dst = g.ToDoubleRegister(destination); ++ __ Move(temp, src); ++ __ Move(src, dst); ++ __ Move(dst, temp); ++ } else { ++ DCHECK(destination->IsFPStackSlot()); ++ MemOperand dst = g.ToMemOperand(destination); ++ __ Move(temp, src); ++ __ Fld_d(src, dst); ++ __ Fst_d(temp, dst); ++ } ++ } else if (source->IsFPStackSlot()) { ++ DCHECK(destination->IsFPStackSlot()); ++ Register temp_0 = kScratchReg; ++ MemOperand src0 = g.ToMemOperand(source); ++ MemOperand src1(src0.base(), src0.offset() + kIntSize); ++ MemOperand dst0 = g.ToMemOperand(destination); ++ MemOperand dst1(dst0.base(), dst0.offset() + kIntSize); ++ FPURegister temp_1 = kScratchDoubleReg; ++ __ Fld_d(temp_1, dst0); // Save destination in temp_1. ++ __ Ld_w(temp_0, src0); // Then use temp_0 to copy source to destination. ++ __ St_w(temp_0, dst0); ++ __ Ld_w(temp_0, src1); ++ __ St_w(temp_0, dst1); ++ __ Fst_d(temp_1, src0); ++ } else { ++ // No other combinations are possible. ++ UNREACHABLE(); ++ } ++} ++ ++void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { ++ // On 64-bit LOONG64 we emit the jump tables inline. ++ UNREACHABLE(); ++} ++ ++#undef ASSEMBLE_ATOMIC_LOAD_INTEGER ++#undef ASSEMBLE_ATOMIC_STORE_INTEGER ++#undef ASSEMBLE_ATOMIC_BINOP ++#undef ASSEMBLE_ATOMIC_BINOP_EXT ++#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER ++#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT ++#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER ++#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT ++#undef ASSEMBLE_IEEE754_BINOP ++#undef ASSEMBLE_IEEE754_UNOP ++ ++#undef TRACE_MSG ++#undef TRACE_UNIMPL ++#undef __ ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h +new file mode 100644 +index 00000000..99328e1e +--- /dev/null ++++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h +@@ -0,0 +1,415 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_ ++#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_ ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++// LOONG64-specific opcodes that specify which assembly sequence to emit. ++// Most opcodes specify a single instruction. ++#define TARGET_ARCH_OPCODE_LIST(V) \ ++ V(Loong64Add) \ ++ V(Loong64Dadd) \ ++ V(Loong64DaddOvf) \ ++ V(Loong64Sub) \ ++ V(Loong64Dsub) \ ++ V(Loong64DsubOvf) \ ++ V(Loong64Mul) \ ++ V(Loong64MulOvf) \ ++ V(Loong64MulHigh) \ ++ V(Loong64DMulHigh) \ ++ V(Loong64MulHighU) \ ++ V(Loong64Dmul) \ ++ V(Loong64Div) \ ++ V(Loong64Ddiv) \ ++ V(Loong64DivU) \ ++ V(Loong64DdivU) \ ++ V(Loong64Mod) \ ++ V(Loong64Dmod) \ ++ V(Loong64ModU) \ ++ V(Loong64DmodU) \ ++ V(Loong64And) \ ++ V(Loong64And32) \ ++ V(Loong64Or) \ ++ V(Loong64Or32) \ ++ V(Loong64Nor) \ ++ V(Loong64Nor32) \ ++ V(Loong64Xor) \ ++ V(Loong64Xor32) \ ++ V(Loong64Clz) \ ++ V(Loong64Lsa) \ ++ V(Loong64Dlsa) \ ++ V(Loong64Shl) \ ++ V(Loong64Shr) \ ++ V(Loong64Sar) \ ++ V(Loong64Ext) \ ++ V(Loong64Ins) \ ++ V(Loong64Dext) \ ++ V(Loong64Dins) \ ++ V(Loong64Dclz) \ ++ V(Loong64Ctz) \ ++ V(Loong64Dctz) \ ++ V(Loong64Popcnt) \ ++ V(Loong64Dpopcnt) \ ++ V(Loong64Dshl) \ ++ V(Loong64Dshr) \ ++ V(Loong64Dsar) \ ++ V(Loong64Ror) \ ++ V(Loong64Dror) \ ++ V(Loong64Mov) \ ++ V(Loong64Tst) \ ++ V(Loong64Cmp) \ ++ V(Loong64CmpS) \ ++ V(Loong64AddS) \ ++ V(Loong64SubS) \ ++ V(Loong64MulS) \ ++ V(Loong64DivS) \ ++ V(Loong64ModS) \ ++ V(Loong64AbsS) \ ++ V(Loong64NegS) \ ++ V(Loong64SqrtS) \ ++ V(Loong64MaxS) \ ++ V(Loong64MinS) \ ++ V(Loong64CmpD) \ ++ V(Loong64AddD) \ ++ V(Loong64SubD) \ ++ V(Loong64MulD) \ ++ V(Loong64DivD) \ ++ V(Loong64ModD) \ ++ V(Loong64AbsD) \ ++ V(Loong64NegD) \ ++ V(Loong64SqrtD) \ ++ V(Loong64MaxD) \ ++ V(Loong64MinD) \ ++ V(Loong64Float64RoundDown) \ ++ V(Loong64Float64RoundTruncate) \ ++ V(Loong64Float64RoundUp) \ ++ V(Loong64Float64RoundTiesEven) \ ++ V(Loong64Float32RoundDown) \ ++ V(Loong64Float32RoundTruncate) \ ++ V(Loong64Float32RoundUp) \ ++ V(Loong64Float32RoundTiesEven) \ ++ V(Loong64CvtSD) \ ++ V(Loong64CvtDS) \ ++ V(Loong64TruncWD) \ ++ V(Loong64RoundWD) \ ++ V(Loong64FloorWD) \ ++ V(Loong64CeilWD) \ ++ V(Loong64TruncWS) \ ++ V(Loong64RoundWS) \ ++ V(Loong64FloorWS) \ ++ V(Loong64CeilWS) \ ++ V(Loong64TruncLS) \ ++ V(Loong64TruncLD) \ ++ V(Loong64TruncUwD) \ ++ V(Loong64TruncUwS) \ ++ V(Loong64TruncUlS) \ ++ V(Loong64TruncUlD) \ ++ V(Loong64CvtDW) \ ++ V(Loong64CvtSL) \ ++ V(Loong64CvtSW) \ ++ V(Loong64CvtSUw) \ ++ V(Loong64CvtSUl) \ ++ V(Loong64CvtDL) \ ++ V(Loong64CvtDUw) \ ++ V(Loong64CvtDUl) \ ++ V(Loong64Lb) \ ++ V(Loong64Lbu) \ ++ V(Loong64Sb) \ ++ V(Loong64Lh) \ ++ V(Loong64Ulh) \ ++ V(Loong64Lhu) \ ++ V(Loong64Ulhu) \ ++ V(Loong64Sh) \ ++ V(Loong64Ush) \ ++ V(Loong64Ld) \ ++ V(Loong64Uld) \ ++ V(Loong64Lw) \ ++ V(Loong64Ulw) \ ++ V(Loong64Lwu) \ ++ V(Loong64Ulwu) \ ++ V(Loong64Sw) \ ++ V(Loong64Usw) \ ++ V(Loong64Sd) \ ++ V(Loong64Usd) \ ++ V(Loong64Lwc1) \ ++ V(Loong64Ulwc1) \ ++ V(Loong64Swc1) \ ++ V(Loong64Uswc1) \ ++ V(Loong64Ldc1) \ ++ V(Loong64Uldc1) \ ++ V(Loong64Sdc1) \ ++ V(Loong64Usdc1) \ ++ V(Loong64BitcastDL) \ ++ V(Loong64BitcastLD) \ ++ V(Loong64Float64ExtractLowWord32) \ ++ V(Loong64Float64ExtractHighWord32) \ ++ V(Loong64Float64InsertLowWord32) \ ++ V(Loong64Float64InsertHighWord32) \ ++ V(Loong64Float32Max) \ ++ V(Loong64Float64Max) \ ++ V(Loong64Float32Min) \ ++ V(Loong64Float64Min) \ ++ V(Loong64Float64SilenceNaN) \ ++ V(Loong64Push) \ ++ V(Loong64Peek) \ ++ V(Loong64StoreToStackSlot) \ ++ V(Loong64ByteSwap64) \ ++ V(Loong64ByteSwap32) \ ++ V(Loong64StackClaim) \ ++ V(Loong64Seb) \ ++ V(Loong64Seh) \ ++ V(Loong64Sync) \ ++ V(Loong64AssertEqual) \ ++ V(Loong64S128Zero) \ ++ V(Loong64I32x4Splat) \ ++ V(Loong64I32x4ExtractLane) \ ++ V(Loong64I32x4ReplaceLane) \ ++ V(Loong64I32x4Add) \ ++ V(Loong64I32x4AddHoriz) \ ++ V(Loong64I32x4Sub) \ ++ V(Loong64F64x2Abs) \ ++ V(Loong64F64x2Neg) \ ++ V(Loong64F32x4Splat) \ ++ V(Loong64F32x4ExtractLane) \ ++ V(Loong64F32x4ReplaceLane) \ ++ V(Loong64F32x4SConvertI32x4) \ ++ V(Loong64F32x4UConvertI32x4) \ ++ V(Loong64I32x4Mul) \ ++ V(Loong64I32x4MaxS) \ ++ V(Loong64I32x4MinS) \ ++ V(Loong64I32x4Eq) \ ++ V(Loong64I32x4Ne) \ ++ V(Loong64I32x4Shl) \ ++ V(Loong64I32x4ShrS) \ ++ V(Loong64I32x4ShrU) \ ++ V(Loong64I32x4MaxU) \ ++ V(Loong64I32x4MinU) \ ++ V(Loong64F64x2Sqrt) \ ++ V(Loong64F64x2Add) \ ++ V(Loong64F64x2Sub) \ ++ V(Loong64F64x2Mul) \ ++ V(Loong64F64x2Div) \ ++ V(Loong64F64x2Min) \ ++ V(Loong64F64x2Max) \ ++ V(Loong64F64x2Eq) \ ++ V(Loong64F64x2Ne) \ ++ V(Loong64F64x2Lt) \ ++ V(Loong64F64x2Le) \ ++ V(Loong64F64x2Splat) \ ++ V(Loong64F64x2ExtractLane) \ ++ V(Loong64F64x2ReplaceLane) \ ++ V(Loong64I64x2Splat) \ ++ V(Loong64I64x2ExtractLane) \ ++ V(Loong64I64x2ReplaceLane) \ ++ V(Loong64I64x2Add) \ ++ V(Loong64I64x2Sub) \ ++ V(Loong64I64x2Mul) \ ++ V(Loong64I64x2Neg) \ ++ V(Loong64I64x2Shl) \ ++ V(Loong64I64x2ShrS) \ ++ V(Loong64I64x2ShrU) \ ++ V(Loong64F32x4Abs) \ ++ V(Loong64F32x4Neg) \ ++ V(Loong64F32x4Sqrt) \ ++ V(Loong64F32x4RecipApprox) \ ++ V(Loong64F32x4RecipSqrtApprox) \ ++ V(Loong64F32x4Add) \ ++ V(Loong64F32x4AddHoriz) \ ++ V(Loong64F32x4Sub) \ ++ V(Loong64F32x4Mul) \ ++ V(Loong64F32x4Div) \ ++ V(Loong64F32x4Max) \ ++ V(Loong64F32x4Min) \ ++ V(Loong64F32x4Eq) \ ++ V(Loong64F32x4Ne) \ ++ V(Loong64F32x4Lt) \ ++ V(Loong64F32x4Le) \ ++ V(Loong64I32x4SConvertF32x4) \ ++ V(Loong64I32x4UConvertF32x4) \ ++ V(Loong64I32x4Neg) \ ++ V(Loong64I32x4GtS) \ ++ V(Loong64I32x4GeS) \ ++ V(Loong64I32x4GtU) \ ++ V(Loong64I32x4GeU) \ ++ V(Loong64I32x4Abs) \ ++ V(Loong64I16x8Splat) \ ++ V(Loong64I16x8ExtractLaneU) \ ++ V(Loong64I16x8ExtractLaneS) \ ++ V(Loong64I16x8ReplaceLane) \ ++ V(Loong64I16x8Neg) \ ++ V(Loong64I16x8Shl) \ ++ V(Loong64I16x8ShrS) \ ++ V(Loong64I16x8ShrU) \ ++ V(Loong64I16x8Add) \ ++ V(Loong64I16x8AddSaturateS) \ ++ V(Loong64I16x8AddHoriz) \ ++ V(Loong64I16x8Sub) \ ++ V(Loong64I16x8SubSaturateS) \ ++ V(Loong64I16x8Mul) \ ++ V(Loong64I16x8MaxS) \ ++ V(Loong64I16x8MinS) \ ++ V(Loong64I16x8Eq) \ ++ V(Loong64I16x8Ne) \ ++ V(Loong64I16x8GtS) \ ++ V(Loong64I16x8GeS) \ ++ V(Loong64I16x8AddSaturateU) \ ++ V(Loong64I16x8SubSaturateU) \ ++ V(Loong64I16x8MaxU) \ ++ V(Loong64I16x8MinU) \ ++ V(Loong64I16x8GtU) \ ++ V(Loong64I16x8GeU) \ ++ V(Loong64I16x8RoundingAverageU) \ ++ V(Loong64I16x8Abs) \ ++ V(Loong64I8x16Splat) \ ++ V(Loong64I8x16ExtractLaneU) \ ++ V(Loong64I8x16ExtractLaneS) \ ++ V(Loong64I8x16ReplaceLane) \ ++ V(Loong64I8x16Neg) \ ++ V(Loong64I8x16Shl) \ ++ V(Loong64I8x16ShrS) \ ++ V(Loong64I8x16Add) \ ++ V(Loong64I8x16AddSaturateS) \ ++ V(Loong64I8x16Sub) \ ++ V(Loong64I8x16SubSaturateS) \ ++ V(Loong64I8x16Mul) \ ++ V(Loong64I8x16MaxS) \ ++ V(Loong64I8x16MinS) \ ++ V(Loong64I8x16Eq) \ ++ V(Loong64I8x16Ne) \ ++ V(Loong64I8x16GtS) \ ++ V(Loong64I8x16GeS) \ ++ V(Loong64I8x16ShrU) \ ++ V(Loong64I8x16AddSaturateU) \ ++ V(Loong64I8x16SubSaturateU) \ ++ V(Loong64I8x16MaxU) \ ++ V(Loong64I8x16MinU) \ ++ V(Loong64I8x16GtU) \ ++ V(Loong64I8x16GeU) \ ++ V(Loong64I8x16RoundingAverageU) \ ++ V(Loong64I8x16Abs) \ ++ V(Loong64S128And) \ ++ V(Loong64S128Or) \ ++ V(Loong64S128Xor) \ ++ V(Loong64S128Not) \ ++ V(Loong64S128Select) \ ++ V(Loong64S128AndNot) \ ++ V(Loong64S1x4AnyTrue) \ ++ V(Loong64S1x4AllTrue) \ ++ V(Loong64S1x8AnyTrue) \ ++ V(Loong64S1x8AllTrue) \ ++ V(Loong64S1x16AnyTrue) \ ++ V(Loong64S1x16AllTrue) \ ++ V(Loong64S32x4InterleaveRight) \ ++ V(Loong64S32x4InterleaveLeft) \ ++ V(Loong64S32x4PackEven) \ ++ V(Loong64S32x4PackOdd) \ ++ V(Loong64S32x4InterleaveEven) \ ++ V(Loong64S32x4InterleaveOdd) \ ++ V(Loong64S32x4Shuffle) \ ++ V(Loong64S16x8InterleaveRight) \ ++ V(Loong64S16x8InterleaveLeft) \ ++ V(Loong64S16x8PackEven) \ ++ V(Loong64S16x8PackOdd) \ ++ V(Loong64S16x8InterleaveEven) \ ++ V(Loong64S16x8InterleaveOdd) \ ++ V(Loong64S16x4Reverse) \ ++ V(Loong64S16x2Reverse) \ ++ V(Loong64S8x16InterleaveRight) \ ++ V(Loong64S8x16InterleaveLeft) \ ++ V(Loong64S8x16PackEven) \ ++ V(Loong64S8x16PackOdd) \ ++ V(Loong64S8x16InterleaveEven) \ ++ V(Loong64S8x16InterleaveOdd) \ ++ V(Loong64S8x16Shuffle) \ ++ V(Loong64S8x16Swizzle) \ ++ V(Loong64S8x16Concat) \ ++ V(Loong64S8x8Reverse) \ ++ V(Loong64S8x4Reverse) \ ++ V(Loong64S8x2Reverse) \ ++ V(Loong64S8x16LoadSplat) \ ++ V(Loong64S16x8LoadSplat) \ ++ V(Loong64S32x4LoadSplat) \ ++ V(Loong64S64x2LoadSplat) \ ++ V(Loong64I16x8Load8x8S) \ ++ V(Loong64I16x8Load8x8U) \ ++ V(Loong64I32x4Load16x4S) \ ++ V(Loong64I32x4Load16x4U) \ ++ V(Loong64I64x2Load32x2S) \ ++ V(Loong64I64x2Load32x2U) \ ++ V(Loong64I32x4SConvertI16x8Low) \ ++ V(Loong64I32x4SConvertI16x8High) \ ++ V(Loong64I32x4UConvertI16x8Low) \ ++ V(Loong64I32x4UConvertI16x8High) \ ++ V(Loong64I16x8SConvertI8x16Low) \ ++ V(Loong64I16x8SConvertI8x16High) \ ++ V(Loong64I16x8SConvertI32x4) \ ++ V(Loong64I16x8UConvertI32x4) \ ++ V(Loong64I16x8UConvertI8x16Low) \ ++ V(Loong64I16x8UConvertI8x16High) \ ++ V(Loong64I8x16SConvertI16x8) \ ++ V(Loong64I8x16UConvertI16x8) \ ++ V(Loong64Word64AtomicLoadUint8) \ ++ V(Loong64Word64AtomicLoadUint16) \ ++ V(Loong64Word64AtomicLoadUint32) \ ++ V(Loong64Word64AtomicLoadUint64) \ ++ V(Loong64Word64AtomicStoreWord8) \ ++ V(Loong64Word64AtomicStoreWord16) \ ++ V(Loong64Word64AtomicStoreWord32) \ ++ V(Loong64Word64AtomicStoreWord64) \ ++ V(Loong64Word64AtomicAddUint8) \ ++ V(Loong64Word64AtomicAddUint16) \ ++ V(Loong64Word64AtomicAddUint32) \ ++ V(Loong64Word64AtomicAddUint64) \ ++ V(Loong64Word64AtomicSubUint8) \ ++ V(Loong64Word64AtomicSubUint16) \ ++ V(Loong64Word64AtomicSubUint32) \ ++ V(Loong64Word64AtomicSubUint64) \ ++ V(Loong64Word64AtomicAndUint8) \ ++ V(Loong64Word64AtomicAndUint16) \ ++ V(Loong64Word64AtomicAndUint32) \ ++ V(Loong64Word64AtomicAndUint64) \ ++ V(Loong64Word64AtomicOrUint8) \ ++ V(Loong64Word64AtomicOrUint16) \ ++ V(Loong64Word64AtomicOrUint32) \ ++ V(Loong64Word64AtomicOrUint64) \ ++ V(Loong64Word64AtomicXorUint8) \ ++ V(Loong64Word64AtomicXorUint16) \ ++ V(Loong64Word64AtomicXorUint32) \ ++ V(Loong64Word64AtomicXorUint64) \ ++ V(Loong64Word64AtomicExchangeUint8) \ ++ V(Loong64Word64AtomicExchangeUint16) \ ++ V(Loong64Word64AtomicExchangeUint32) \ ++ V(Loong64Word64AtomicExchangeUint64) \ ++ V(Loong64Word64AtomicCompareExchangeUint8) \ ++ V(Loong64Word64AtomicCompareExchangeUint16) \ ++ V(Loong64Word64AtomicCompareExchangeUint32) \ ++ V(Loong64Word64AtomicCompareExchangeUint64) ++ ++// Addressing modes represent the "shape" of inputs to an instruction. ++// Many instructions support multiple addressing modes. Addressing modes ++// are encoded into the InstructionCode of the instruction and tell the ++// code generator after register allocation which assembler method to call. ++// ++// We use the following local notation for addressing modes: ++// ++// R = register ++// O = register or stack slot ++// D = double register ++// I = immediate (handle, external, int32) ++// MRI = [register + immediate] ++// MRR = [register + register] ++// TODO(plind): Add the new r6 address modes. ++#define TARGET_ADDRESSING_MODE_LIST(V) \ ++ V(MRI) /* [%r0 + K] */ \ ++ V(MRR) /* [%r0 + %r1] */ ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_ +diff --git a/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc +new file mode 100644 +index 00000000..8437aa25 +--- /dev/null ++++ b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc +@@ -0,0 +1,1537 @@ ++// Copyright 2015 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/compiler/backend/instruction-scheduler.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++bool InstructionScheduler::SchedulerSupported() { return true; } ++ ++int InstructionScheduler::GetTargetInstructionFlags( ++ const Instruction* instr) const { ++ switch (instr->arch_opcode()) { ++ case kLoong64AbsD: ++ case kLoong64AbsS: ++ case kLoong64Add: ++ case kLoong64AddD: ++ case kLoong64AddS: ++ case kLoong64And: ++ case kLoong64And32: ++ case kLoong64AssertEqual: ++ case kLoong64BitcastDL: ++ case kLoong64BitcastLD: ++ case kLoong64ByteSwap32: ++ case kLoong64ByteSwap64: ++ case kLoong64CeilWD: ++ case kLoong64CeilWS: ++ case kLoong64Clz: ++ case kLoong64Cmp: ++ case kLoong64CmpD: ++ case kLoong64CmpS: ++ case kLoong64Ctz: ++ case kLoong64CvtDL: ++ case kLoong64CvtDS: ++ case kLoong64CvtDUl: ++ case kLoong64CvtDUw: ++ case kLoong64CvtDW: ++ case kLoong64CvtSD: ++ case kLoong64CvtSL: ++ case kLoong64CvtSUl: ++ case kLoong64CvtSUw: ++ case kLoong64CvtSW: ++ case kLoong64DMulHigh: ++ case kLoong64MulHighU: ++ case kLoong64Dadd: ++ case kLoong64DaddOvf: ++ case kLoong64Dclz: ++ case kLoong64Dctz: ++ case kLoong64Ddiv: ++ case kLoong64DdivU: ++ case kLoong64Dext: ++ case kLoong64Dins: ++ case kLoong64Div: ++ case kLoong64DivD: ++ case kLoong64DivS: ++ case kLoong64DivU: ++ case kLoong64Dlsa: ++ case kLoong64Dmod: ++ case kLoong64DmodU: ++ case kLoong64Dmul: ++ case kLoong64Dpopcnt: ++ case kLoong64Dror: ++ case kLoong64Dsar: ++ case kLoong64Dshl: ++ case kLoong64Dshr: ++ case kLoong64Dsub: ++ case kLoong64DsubOvf: ++ case kLoong64Ext: ++ case kLoong64F64x2Abs: ++ case kLoong64F64x2Neg: ++ case kLoong64F64x2Sqrt: ++ case kLoong64F64x2Add: ++ case kLoong64F64x2Sub: ++ case kLoong64F64x2Mul: ++ case kLoong64F64x2Div: ++ case kLoong64F64x2Min: ++ case kLoong64F64x2Max: ++ case kLoong64F64x2Eq: ++ case kLoong64F64x2Ne: ++ case kLoong64F64x2Lt: ++ case kLoong64F64x2Le: ++ case kLoong64I64x2Splat: ++ case kLoong64I64x2ExtractLane: ++ case kLoong64I64x2ReplaceLane: ++ case kLoong64I64x2Add: ++ case kLoong64I64x2Sub: ++ case kLoong64I64x2Mul: ++ case kLoong64I64x2Neg: ++ case kLoong64I64x2Shl: ++ case kLoong64I64x2ShrS: ++ case kLoong64I64x2ShrU: ++ case kLoong64F32x4Abs: ++ case kLoong64F32x4Add: ++ case kLoong64F32x4AddHoriz: ++ case kLoong64F32x4Eq: ++ case kLoong64F32x4ExtractLane: ++ case kLoong64F32x4Lt: ++ case kLoong64F32x4Le: ++ case kLoong64F32x4Max: ++ case kLoong64F32x4Min: ++ case kLoong64F32x4Mul: ++ case kLoong64F32x4Div: ++ case kLoong64F32x4Ne: ++ case kLoong64F32x4Neg: ++ case kLoong64F32x4Sqrt: ++ case kLoong64F32x4RecipApprox: ++ case kLoong64F32x4RecipSqrtApprox: ++ case kLoong64F32x4ReplaceLane: ++ case kLoong64F32x4SConvertI32x4: ++ case kLoong64F32x4Splat: ++ case kLoong64F32x4Sub: ++ case kLoong64F32x4UConvertI32x4: ++ case kLoong64F64x2Splat: ++ case kLoong64F64x2ExtractLane: ++ case kLoong64F64x2ReplaceLane: ++ case kLoong64Float32Max: ++ case kLoong64Float32Min: ++ case kLoong64Float32RoundDown: ++ case kLoong64Float32RoundTiesEven: ++ case kLoong64Float32RoundTruncate: ++ case kLoong64Float32RoundUp: ++ case kLoong64Float64ExtractLowWord32: ++ case kLoong64Float64ExtractHighWord32: ++ case kLoong64Float64InsertLowWord32: ++ case kLoong64Float64InsertHighWord32: ++ case kLoong64Float64Max: ++ case kLoong64Float64Min: ++ case kLoong64Float64RoundDown: ++ case kLoong64Float64RoundTiesEven: ++ case kLoong64Float64RoundTruncate: ++ case kLoong64Float64RoundUp: ++ case kLoong64Float64SilenceNaN: ++ case kLoong64FloorWD: ++ case kLoong64FloorWS: ++ case kLoong64I16x8Add: ++ case kLoong64I16x8AddHoriz: ++ case kLoong64I16x8AddSaturateS: ++ case kLoong64I16x8AddSaturateU: ++ case kLoong64I16x8Eq: ++ case kLoong64I16x8ExtractLaneU: ++ case kLoong64I16x8ExtractLaneS: ++ case kLoong64I16x8GeS: ++ case kLoong64I16x8GeU: ++ case kLoong64I16x8GtS: ++ case kLoong64I16x8GtU: ++ case kLoong64I16x8MaxS: ++ case kLoong64I16x8MaxU: ++ case kLoong64I16x8MinS: ++ case kLoong64I16x8MinU: ++ case kLoong64I16x8Mul: ++ case kLoong64I16x8Ne: ++ case kLoong64I16x8Neg: ++ case kLoong64I16x8ReplaceLane: ++ case kLoong64I8x16SConvertI16x8: ++ case kLoong64I16x8SConvertI32x4: ++ case kLoong64I16x8SConvertI8x16High: ++ case kLoong64I16x8SConvertI8x16Low: ++ case kLoong64I16x8Shl: ++ case kLoong64I16x8ShrS: ++ case kLoong64I16x8ShrU: ++ case kLoong64I16x8Splat: ++ case kLoong64I16x8Sub: ++ case kLoong64I16x8SubSaturateS: ++ case kLoong64I16x8SubSaturateU: ++ case kLoong64I8x16UConvertI16x8: ++ case kLoong64I16x8UConvertI32x4: ++ case kLoong64I16x8UConvertI8x16High: ++ case kLoong64I16x8UConvertI8x16Low: ++ case kLoong64I16x8RoundingAverageU: ++ case kLoong64I16x8Abs: ++ case kLoong64I32x4Add: ++ case kLoong64I32x4AddHoriz: ++ case kLoong64I32x4Eq: ++ case kLoong64I32x4ExtractLane: ++ case kLoong64I32x4GeS: ++ case kLoong64I32x4GeU: ++ case kLoong64I32x4GtS: ++ case kLoong64I32x4GtU: ++ case kLoong64I32x4MaxS: ++ case kLoong64I32x4MaxU: ++ case kLoong64I32x4MinS: ++ case kLoong64I32x4MinU: ++ case kLoong64I32x4Mul: ++ case kLoong64I32x4Ne: ++ case kLoong64I32x4Neg: ++ case kLoong64I32x4ReplaceLane: ++ case kLoong64I32x4SConvertF32x4: ++ case kLoong64I32x4SConvertI16x8High: ++ case kLoong64I32x4SConvertI16x8Low: ++ case kLoong64I32x4Shl: ++ case kLoong64I32x4ShrS: ++ case kLoong64I32x4ShrU: ++ case kLoong64I32x4Splat: ++ case kLoong64I32x4Sub: ++ case kLoong64I32x4UConvertF32x4: ++ case kLoong64I32x4UConvertI16x8High: ++ case kLoong64I32x4UConvertI16x8Low: ++ case kLoong64I32x4Abs: ++ case kLoong64I8x16Add: ++ case kLoong64I8x16AddSaturateS: ++ case kLoong64I8x16AddSaturateU: ++ case kLoong64I8x16Eq: ++ case kLoong64I8x16ExtractLaneU: ++ case kLoong64I8x16ExtractLaneS: ++ case kLoong64I8x16GeS: ++ case kLoong64I8x16GeU: ++ case kLoong64I8x16GtS: ++ case kLoong64I8x16GtU: ++ case kLoong64I8x16MaxS: ++ case kLoong64I8x16MaxU: ++ case kLoong64I8x16MinS: ++ case kLoong64I8x16MinU: ++ case kLoong64I8x16Mul: ++ case kLoong64I8x16Ne: ++ case kLoong64I8x16Neg: ++ case kLoong64I8x16ReplaceLane: ++ case kLoong64I8x16Shl: ++ case kLoong64I8x16ShrS: ++ case kLoong64I8x16ShrU: ++ case kLoong64I8x16Splat: ++ case kLoong64I8x16Sub: ++ case kLoong64I8x16SubSaturateS: ++ case kLoong64I8x16SubSaturateU: ++ case kLoong64I8x16RoundingAverageU: ++ case kLoong64I8x16Abs: ++ case kLoong64Ins: ++ case kLoong64Lsa: ++ case kLoong64MaxD: ++ case kLoong64MaxS: ++ case kLoong64MinD: ++ case kLoong64MinS: ++ case kLoong64Mod: ++ case kLoong64ModU: ++ case kLoong64Mov: ++ case kLoong64Mul: ++ case kLoong64MulD: ++ case kLoong64MulHigh: ++ case kLoong64MulOvf: ++ case kLoong64MulS: ++ case kLoong64NegD: ++ case kLoong64NegS: ++ case kLoong64Nor: ++ case kLoong64Nor32: ++ case kLoong64Or: ++ case kLoong64Or32: ++ case kLoong64Popcnt: ++ case kLoong64Ror: ++ case kLoong64RoundWD: ++ case kLoong64RoundWS: ++ case kLoong64S128And: ++ case kLoong64S128Or: ++ case kLoong64S128Not: ++ case kLoong64S128Select: ++ case kLoong64S128AndNot: ++ case kLoong64S128Xor: ++ case kLoong64S128Zero: ++ case kLoong64S16x8InterleaveEven: ++ case kLoong64S16x8InterleaveOdd: ++ case kLoong64S16x8InterleaveLeft: ++ case kLoong64S16x8InterleaveRight: ++ case kLoong64S16x8PackEven: ++ case kLoong64S16x8PackOdd: ++ case kLoong64S16x2Reverse: ++ case kLoong64S16x4Reverse: ++ case kLoong64S1x16AllTrue: ++ case kLoong64S1x16AnyTrue: ++ case kLoong64S1x4AllTrue: ++ case kLoong64S1x4AnyTrue: ++ case kLoong64S1x8AllTrue: ++ case kLoong64S1x8AnyTrue: ++ case kLoong64S32x4InterleaveEven: ++ case kLoong64S32x4InterleaveOdd: ++ case kLoong64S32x4InterleaveLeft: ++ case kLoong64S32x4InterleaveRight: ++ case kLoong64S32x4PackEven: ++ case kLoong64S32x4PackOdd: ++ case kLoong64S32x4Shuffle: ++ case kLoong64S8x16Concat: ++ case kLoong64S8x16InterleaveEven: ++ case kLoong64S8x16InterleaveOdd: ++ case kLoong64S8x16InterleaveLeft: ++ case kLoong64S8x16InterleaveRight: ++ case kLoong64S8x16PackEven: ++ case kLoong64S8x16PackOdd: ++ case kLoong64S8x2Reverse: ++ case kLoong64S8x4Reverse: ++ case kLoong64S8x8Reverse: ++ case kLoong64S8x16Shuffle: ++ case kLoong64S8x16Swizzle: ++ case kLoong64Sar: ++ case kLoong64Seb: ++ case kLoong64Seh: ++ case kLoong64Shl: ++ case kLoong64Shr: ++ case kLoong64SqrtD: ++ case kLoong64SqrtS: ++ case kLoong64Sub: ++ case kLoong64SubD: ++ case kLoong64SubS: ++ case kLoong64TruncLD: ++ case kLoong64TruncLS: ++ case kLoong64TruncUlD: ++ case kLoong64TruncUlS: ++ case kLoong64TruncUwD: ++ case kLoong64TruncUwS: ++ case kLoong64TruncWD: ++ case kLoong64TruncWS: ++ case kLoong64Tst: ++ case kLoong64Xor: ++ case kLoong64Xor32: ++ return kNoOpcodeFlags; ++ ++ case kLoong64Lb: ++ case kLoong64Lbu: ++ case kLoong64Ld: ++ case kLoong64Ldc1: ++ case kLoong64Lh: ++ case kLoong64Lhu: ++ case kLoong64Lw: ++ case kLoong64Lwc1: ++ case kLoong64Lwu: ++ case kLoong64Peek: ++ case kLoong64Uld: ++ case kLoong64Uldc1: ++ case kLoong64Ulh: ++ case kLoong64Ulhu: ++ case kLoong64Ulw: ++ case kLoong64Ulwu: ++ case kLoong64Ulwc1: ++ case kLoong64S8x16LoadSplat: ++ case kLoong64S16x8LoadSplat: ++ case kLoong64S32x4LoadSplat: ++ case kLoong64S64x2LoadSplat: ++ case kLoong64I16x8Load8x8S: ++ case kLoong64I16x8Load8x8U: ++ case kLoong64I32x4Load16x4S: ++ case kLoong64I32x4Load16x4U: ++ case kLoong64I64x2Load32x2S: ++ case kLoong64I64x2Load32x2U: ++ case kLoong64Word64AtomicLoadUint8: ++ case kLoong64Word64AtomicLoadUint16: ++ case kLoong64Word64AtomicLoadUint32: ++ case kLoong64Word64AtomicLoadUint64: ++ ++ return kIsLoadOperation; ++ ++ case kLoong64ModD: ++ case kLoong64ModS: ++ case kLoong64Push: ++ case kLoong64Sb: ++ case kLoong64Sd: ++ case kLoong64Sdc1: ++ case kLoong64Sh: ++ case kLoong64StackClaim: ++ case kLoong64StoreToStackSlot: ++ case kLoong64Sw: ++ case kLoong64Swc1: ++ case kLoong64Usd: ++ case kLoong64Usdc1: ++ case kLoong64Ush: ++ case kLoong64Usw: ++ case kLoong64Uswc1: ++ case kLoong64Sync: ++ case kLoong64Word64AtomicStoreWord8: ++ case kLoong64Word64AtomicStoreWord16: ++ case kLoong64Word64AtomicStoreWord32: ++ case kLoong64Word64AtomicStoreWord64: ++ case kLoong64Word64AtomicAddUint8: ++ case kLoong64Word64AtomicAddUint16: ++ case kLoong64Word64AtomicAddUint32: ++ case kLoong64Word64AtomicAddUint64: ++ case kLoong64Word64AtomicSubUint8: ++ case kLoong64Word64AtomicSubUint16: ++ case kLoong64Word64AtomicSubUint32: ++ case kLoong64Word64AtomicSubUint64: ++ case kLoong64Word64AtomicAndUint8: ++ case kLoong64Word64AtomicAndUint16: ++ case kLoong64Word64AtomicAndUint32: ++ case kLoong64Word64AtomicAndUint64: ++ case kLoong64Word64AtomicOrUint8: ++ case kLoong64Word64AtomicOrUint16: ++ case kLoong64Word64AtomicOrUint32: ++ case kLoong64Word64AtomicOrUint64: ++ case kLoong64Word64AtomicXorUint8: ++ case kLoong64Word64AtomicXorUint16: ++ case kLoong64Word64AtomicXorUint32: ++ case kLoong64Word64AtomicXorUint64: ++ case kLoong64Word64AtomicExchangeUint8: ++ case kLoong64Word64AtomicExchangeUint16: ++ case kLoong64Word64AtomicExchangeUint32: ++ case kLoong64Word64AtomicExchangeUint64: ++ case kLoong64Word64AtomicCompareExchangeUint8: ++ case kLoong64Word64AtomicCompareExchangeUint16: ++ case kLoong64Word64AtomicCompareExchangeUint32: ++ case kLoong64Word64AtomicCompareExchangeUint64: ++ return kHasSideEffect; ++ ++#define CASE(Name) case k##Name: ++ COMMON_ARCH_OPCODE_LIST(CASE) ++#undef CASE ++ // Already covered in architecture independent code. ++ UNREACHABLE(); ++ } ++ ++ UNREACHABLE(); ++} ++ ++enum Latency { ++ BRANCH = 4, // Estimated max. ++ RINT_S = 4, // Estimated. ++ RINT_D = 4, // Estimated. ++ ++ MULT = 4, ++ MULTU = 4, ++ DMULT = 4, ++ DMULTU = 4, ++ ++ MUL = 7, ++ DMUL = 7, ++ MUH = 7, ++ MUHU = 7, ++ DMUH = 7, ++ DMUHU = 7, ++ ++ DIV = 50, // Min:11 Max:50 ++ DDIV = 50, ++ DIVU = 50, ++ DDIVU = 50, ++ ++ ABS_S = 4, ++ ABS_D = 4, ++ NEG_S = 4, ++ NEG_D = 4, ++ ADD_S = 4, ++ ADD_D = 4, ++ SUB_S = 4, ++ SUB_D = 4, ++ MAX_S = 4, // Estimated. ++ MIN_S = 4, ++ MAX_D = 4, // Estimated. ++ MIN_D = 4, ++ C_cond_S = 4, ++ C_cond_D = 4, ++ MUL_S = 4, ++ ++ MADD_S = 4, ++ MSUB_S = 4, ++ NMADD_S = 4, ++ NMSUB_S = 4, ++ ++ CABS_cond_S = 4, ++ CABS_cond_D = 4, ++ ++ CVT_D_S = 4, ++ CVT_PS_PW = 4, ++ ++ CVT_S_W = 4, ++ CVT_S_L = 4, ++ CVT_D_W = 4, ++ CVT_D_L = 4, ++ ++ CVT_S_D = 4, ++ ++ CVT_W_S = 4, ++ CVT_W_D = 4, ++ CVT_L_S = 4, ++ CVT_L_D = 4, ++ ++ CEIL_W_S = 4, ++ CEIL_W_D = 4, ++ CEIL_L_S = 4, ++ CEIL_L_D = 4, ++ ++ FLOOR_W_S = 4, ++ FLOOR_W_D = 4, ++ FLOOR_L_S = 4, ++ FLOOR_L_D = 4, ++ ++ ROUND_W_S = 4, ++ ROUND_W_D = 4, ++ ROUND_L_S = 4, ++ ROUND_L_D = 4, ++ ++ TRUNC_W_S = 4, ++ TRUNC_W_D = 4, ++ TRUNC_L_S = 4, ++ TRUNC_L_D = 4, ++ ++ MOV_S = 4, ++ MOV_D = 4, ++ ++ MOVF_S = 4, ++ MOVF_D = 4, ++ ++ MOVN_S = 4, ++ MOVN_D = 4, ++ ++ MOVT_S = 4, ++ MOVT_D = 4, ++ ++ MOVZ_S = 4, ++ MOVZ_D = 4, ++ ++ MUL_D = 5, ++ MADD_D = 5, ++ MSUB_D = 5, ++ NMADD_D = 5, ++ NMSUB_D = 5, ++ ++ RECIP_S = 13, ++ RECIP_D = 26, ++ ++ RSQRT_S = 17, ++ RSQRT_D = 36, ++ ++ DIV_S = 17, ++ SQRT_S = 17, ++ ++ DIV_D = 32, ++ SQRT_D = 32, ++ ++ MTC1 = 4, ++ MTHC1 = 4, ++ DMTC1 = 4, ++ LWC1 = 4, ++ LDC1 = 4, ++ ++ MFC1 = 1, ++ MFHC1 = 1, ++ DMFC1 = 1, ++ MFHI = 1, ++ MFLO = 1, ++ SWC1 = 1, ++ SDC1 = 1, ++}; ++ ++int DadduLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int DsubuLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int AndLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int OrLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int NorLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int XorLatency(bool is_operand_register = true) { ++ return DadduLatency(is_operand_register); ++} ++ ++int MulLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::MUL; ++ } else { ++ return Latency::MUL + 1; ++ } ++} ++ ++int DmulLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DMUL; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int MulhLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MUH; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int MulhuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::MUH; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DMulhLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DMUH; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DivLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::DIV; ++ } else { ++ return Latency::DIV + 1; ++ } ++} ++ ++int DivuLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return Latency::DIVU; ++ } else { ++ return Latency::DIVU + 1; ++ } ++} ++ ++int DdivLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DDIV; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DdivuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = Latency::DDIVU; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int ModLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int ModuLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DmodLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int DmoduLatency(bool is_operand_register = true) { ++ int latency = 0; ++ latency = 1; ++ if (!is_operand_register) { ++ latency += 1; ++ } ++ return latency; ++} ++ ++int MovzLatency() { return Latency::BRANCH + 1; } ++ ++int MovnLatency() { return Latency::BRANCH + 1; } ++ ++int DlsaLatency() { ++ // Estimated max. ++ return DadduLatency() + 1; ++} ++ ++int CallLatency() { ++ // Estimated. ++ return DadduLatency(false) + Latency::BRANCH + 5; ++} ++ ++int JumpLatency() { ++ // Estimated max. ++ return 1 + DadduLatency() + Latency::BRANCH + 2; ++} ++ ++int SmiUntagLatency() { return 1; } ++ ++int PrepareForTailCallLatency() { ++ // Estimated max. ++ return 2 * (DlsaLatency() + DadduLatency(false)) + 2 + Latency::BRANCH + ++ Latency::BRANCH + 2 * DsubuLatency(false) + 2 + Latency::BRANCH + 1; ++} ++ ++int AssemblePopArgumentsAdoptFrameLatency() { ++ return 1 + Latency::BRANCH + 1 + SmiUntagLatency() + ++ PrepareForTailCallLatency(); ++} ++ ++int AssertLatency() { return 1; } ++ ++int PrepareCallCFunctionLatency() { ++ int frame_alignment = TurboAssembler::ActivationFrameAlignment(); ++ if (frame_alignment > kSystemPointerSize) { ++ return 1 + DsubuLatency(false) + AndLatency(false) + 1; ++ } else { ++ return DsubuLatency(false); ++ } ++} ++ ++int AdjustBaseAndOffsetLatency() { ++ return 3; // Estimated max. ++} ++ ++int AlignedMemoryLatency() { return AdjustBaseAndOffsetLatency() + 1; } ++ ++int UlhuLatency() { return AlignedMemoryLatency(); } ++ ++int UlwLatency() { return AlignedMemoryLatency(); } ++ ++int UlwuLatency() { return AlignedMemoryLatency(); } ++ ++int UldLatency() { return AlignedMemoryLatency(); } ++ ++int Ulwc1Latency() { return AlignedMemoryLatency(); } ++ ++int Uldc1Latency() { return AlignedMemoryLatency(); } ++ ++int UshLatency() { return AlignedMemoryLatency(); } ++ ++int UswLatency() { return AlignedMemoryLatency(); } ++ ++int UsdLatency() { return AlignedMemoryLatency(); } ++ ++int Uswc1Latency() { return AlignedMemoryLatency(); } ++ ++int Usdc1Latency() { return AlignedMemoryLatency(); } ++ ++int Lwc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LWC1; } ++ ++int Swc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SWC1; } ++ ++int Sdc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::SDC1; } ++ ++int Ldc1Latency() { return AdjustBaseAndOffsetLatency() + Latency::LDC1; } ++ ++int MultiPushLatency() { ++ int latency = DsubuLatency(false); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ latency++; ++ } ++ return latency; ++} ++ ++int MultiPushFPULatency() { ++ int latency = DsubuLatency(false); ++ for (int16_t i = kNumRegisters - 1; i >= 0; i--) { ++ latency += Sdc1Latency(); ++ } ++ return latency; ++} ++ ++int PushCallerSavedLatency(SaveFPRegsMode fp_mode) { ++ int latency = MultiPushLatency(); ++ if (fp_mode == kSaveFPRegs) { ++ latency += MultiPushFPULatency(); ++ } ++ return latency; ++} ++ ++int MultiPopLatency() { ++ int latency = DadduLatency(false); ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ latency++; ++ } ++ return latency; ++} ++ ++int MultiPopFPULatency() { ++ int latency = DadduLatency(false); ++ for (int16_t i = 0; i < kNumRegisters; i++) { ++ latency += Ldc1Latency(); ++ } ++ return latency; ++} ++ ++int PopCallerSavedLatency(SaveFPRegsMode fp_mode) { ++ int latency = MultiPopLatency(); ++ if (fp_mode == kSaveFPRegs) { ++ latency += MultiPopFPULatency(); ++ } ++ return latency; ++} ++ ++int CallCFunctionHelperLatency() { ++ // Estimated. ++ int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency(); ++ if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { ++ latency++; ++ } else { ++ latency += DadduLatency(false); ++ } ++ return latency; ++} ++ ++int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); } ++ ++int AssembleArchJumpLatency() { ++ // Estimated max. ++ return Latency::BRANCH; ++} ++ ++int GenerateSwitchTableLatency() { ++ int latency = 0; ++ latency = DlsaLatency() + 2; ++ latency += 2; ++ return latency; ++} ++ ++int AssembleArchTableSwitchLatency() { ++ return Latency::BRANCH + GenerateSwitchTableLatency(); ++} ++ ++int DropAndRetLatency() { ++ // Estimated max. ++ return DadduLatency(false) + JumpLatency(); ++} ++ ++int AssemblerReturnLatency() { ++ // Estimated max. ++ return DadduLatency(false) + MultiPopLatency() + MultiPopFPULatency() + ++ Latency::BRANCH + DadduLatency() + 1 + DropAndRetLatency(); ++} ++ ++int TryInlineTruncateDoubleToILatency() { ++ return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) + ++ Latency::BRANCH; ++} ++ ++int CallStubDelayedLatency() { return 1 + CallLatency(); } ++ ++int TruncateDoubleToIDelayedLatency() { ++ // TODO(loong64): This no longer reflects how TruncateDoubleToI is called. ++ return TryInlineTruncateDoubleToILatency() + 1 + DsubuLatency(false) + ++ Sdc1Latency() + CallStubDelayedLatency() + DadduLatency(false) + 1; ++} ++ ++int CheckPageFlagLatency() { ++ return AndLatency(false) + AlignedMemoryLatency() + AndLatency(false) + ++ Latency::BRANCH; ++} ++ ++int SltuLatency(bool is_operand_register = true) { ++ if (is_operand_register) { ++ return 1; ++ } else { ++ return 2; // Estimated max. ++ } ++} ++ ++int BranchShortHelperLatency() { ++ return 2; // Estimated max. ++} ++ ++int BranchShortLatency() { return BranchShortHelperLatency(); } ++ ++int MoveLatency() { return 1; } ++ ++int MovToFloatParametersLatency() { return 2 * MoveLatency(); } ++ ++int MovFromFloatResultLatency() { return MoveLatency(); } ++ ++int DaddOverflowLatency() { ++ // Estimated max. ++ return 6; ++} ++ ++int DsubOverflowLatency() { ++ // Estimated max. ++ return 6; ++} ++ ++int MulOverflowLatency() { ++ // Estimated max. ++ return MulLatency() + MulhLatency() + 2; ++} ++ ++int DclzLatency() { return 1; } ++ ++int CtzLatency() { return 3 + DclzLatency(); } ++ ++int DctzLatency() { return 4; } ++ ++int PopcntLatency() { ++ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 + ++ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() + ++ 1 + MulLatency() + 1; ++} ++ ++int DpopcntLatency() { ++ return 2 + AndLatency() + DsubuLatency() + 1 + AndLatency() + 1 + ++ AndLatency() + DadduLatency() + 1 + DadduLatency() + 1 + AndLatency() + ++ 1 + DmulLatency() + 1; ++} ++ ++int CompareFLatency() { return Latency::C_cond_S; } ++ ++int CompareF32Latency() { return CompareFLatency(); } ++ ++int CompareF64Latency() { return CompareFLatency(); } ++ ++int CompareIsNanFLatency() { return CompareFLatency(); } ++ ++int CompareIsNanF32Latency() { return CompareIsNanFLatency(); } ++ ++int CompareIsNanF64Latency() { return CompareIsNanFLatency(); } ++ ++int NegsLatency() { return Latency::NEG_S; } ++ ++int NegdLatency() { return Latency::NEG_D; } ++ ++int Float64RoundLatency() { return Latency::RINT_D + 4; } ++ ++int Float32RoundLatency() { return Latency::RINT_S + 4; } ++ ++int Float32MaxLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF32Latency() + Latency::BRANCH; ++ return latency + Latency::MAX_S; ++} ++ ++int Float64MaxLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF64Latency() + Latency::BRANCH; ++ return latency + Latency::MAX_D; ++} ++ ++int Float32MinLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF32Latency() + Latency::BRANCH; ++ return latency + Latency::MIN_S; ++} ++ ++int Float64MinLatency() { ++ // Estimated max. ++ int latency = CompareIsNanF64Latency() + Latency::BRANCH; ++ return latency + Latency::MIN_D; ++} ++ ++int TruncLSLatency(bool load_status) { ++ int latency = Latency::TRUNC_L_S + Latency::DMFC1; ++ if (load_status) { ++ latency += SltuLatency() + 7; ++ } ++ return latency; ++} ++ ++int TruncLDLatency(bool load_status) { ++ int latency = Latency::TRUNC_L_D + Latency::DMFC1; ++ if (load_status) { ++ latency += SltuLatency() + 7; ++ } ++ return latency; ++} ++ ++int TruncUlSLatency() { ++ // Estimated max. ++ return 2 * CompareF32Latency() + CompareIsNanF32Latency() + ++ 4 * Latency::BRANCH + Latency::SUB_S + 2 * Latency::TRUNC_L_S + ++ 3 * Latency::DMFC1 + OrLatency() + Latency::MTC1 + Latency::MOV_S + ++ SltuLatency() + 4; ++} ++ ++int TruncUlDLatency() { ++ // Estimated max. ++ return 2 * CompareF64Latency() + CompareIsNanF64Latency() + ++ 4 * Latency::BRANCH + Latency::SUB_D + 2 * Latency::TRUNC_L_D + ++ 3 * Latency::DMFC1 + OrLatency() + Latency::DMTC1 + Latency::MOV_D + ++ SltuLatency() + 4; ++} ++ ++int PushLatency() { return DadduLatency() + AlignedMemoryLatency(); } ++ ++int ByteSwapSignedLatency() { return 2; } ++ ++int LlLatency(int offset) { ++ bool is_one_instruction = is_int14(offset); ++ if (is_one_instruction) { ++ return 1; ++ } else { ++ return 3; ++ } ++} ++ ++int ExtractBitsLatency(bool sign_extend, int size) { ++ int latency = 2; ++ if (sign_extend) { ++ switch (size) { ++ case 8: ++ case 16: ++ case 32: ++ latency += 1; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++ return latency; ++} ++ ++int InsertBitsLatency() { return 2 + DsubuLatency(false) + 2; } ++ ++int ScLatency(int offset) { ++ bool is_one_instruction = is_int14(offset); ++ if (is_one_instruction) { ++ return 1; ++ } else { ++ return 3; ++ } ++} ++ ++int Word32AtomicExchangeLatency(bool sign_extend, int size) { ++ return DadduLatency(false) + 1 + DsubuLatency() + 2 + LlLatency(0) + ++ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + ++ ScLatency(0) + BranchShortLatency() + 1; ++} ++ ++int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { ++ return 2 + DsubuLatency() + 2 + LlLatency(0) + ++ ExtractBitsLatency(sign_extend, size) + InsertBitsLatency() + ++ ScLatency(0) + BranchShortLatency() + 1; ++} ++ ++int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { ++ // Basic latency modeling for LOONG64 instructions. They have been determined ++ // in empirical way. ++ switch (instr->arch_opcode()) { ++ case kArchCallCodeObject: ++ case kArchCallWasmFunction: ++ return CallLatency(); ++ case kArchTailCallCodeObjectFromJSFunction: ++ case kArchTailCallCodeObject: { ++ int latency = 0; ++ if (instr->arch_opcode() == kArchTailCallCodeObjectFromJSFunction) { ++ latency = AssemblePopArgumentsAdoptFrameLatency(); ++ } ++ return latency + JumpLatency(); ++ } ++ case kArchTailCallWasm: ++ case kArchTailCallAddress: ++ return JumpLatency(); ++ case kArchCallJSFunction: { ++ int latency = 0; ++ if (FLAG_debug_code) { ++ latency = 1 + AssertLatency(); ++ } ++ return latency + 1 + DadduLatency(false) + CallLatency(); ++ } ++ case kArchPrepareCallCFunction: ++ return PrepareCallCFunctionLatency(); ++ case kArchSaveCallerRegisters: { ++ auto fp_mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ return PushCallerSavedLatency(fp_mode); ++ } ++ case kArchRestoreCallerRegisters: { ++ auto fp_mode = ++ static_cast(MiscField::decode(instr->opcode())); ++ return PopCallerSavedLatency(fp_mode); ++ } ++ case kArchPrepareTailCall: ++ return 2; ++ case kArchCallCFunction: ++ return CallCFunctionLatency(); ++ case kArchJmp: ++ return AssembleArchJumpLatency(); ++ case kArchTableSwitch: ++ return AssembleArchTableSwitchLatency(); ++ case kArchAbortCSAAssert: ++ return CallLatency() + 1; ++ case kArchDebugBreak: ++ return 1; ++ case kArchComment: ++ case kArchNop: ++ case kArchThrowTerminator: ++ case kArchDeoptimize: ++ return 0; ++ case kArchRet: ++ return AssemblerReturnLatency(); ++ case kArchFramePointer: ++ return 1; ++ case kArchParentFramePointer: ++ // Estimated max. ++ return AlignedMemoryLatency(); ++ case kArchTruncateDoubleToI: ++ return TruncateDoubleToIDelayedLatency(); ++ case kArchStoreWithWriteBarrier: ++ return DadduLatency() + 1 + CheckPageFlagLatency(); ++ case kArchStackSlot: ++ // Estimated max. ++ return DadduLatency(false) + AndLatency(false) + AssertLatency() + ++ DadduLatency(false) + AndLatency(false) + BranchShortLatency() + ++ 1 + DsubuLatency() + DadduLatency(); ++ case kArchWordPoisonOnSpeculation: ++ return AndLatency(); ++ case kIeee754Float64Acos: ++ case kIeee754Float64Acosh: ++ case kIeee754Float64Asin: ++ case kIeee754Float64Asinh: ++ case kIeee754Float64Atan: ++ case kIeee754Float64Atanh: ++ case kIeee754Float64Atan2: ++ case kIeee754Float64Cos: ++ case kIeee754Float64Cosh: ++ case kIeee754Float64Cbrt: ++ case kIeee754Float64Exp: ++ case kIeee754Float64Expm1: ++ case kIeee754Float64Log: ++ case kIeee754Float64Log1p: ++ case kIeee754Float64Log10: ++ case kIeee754Float64Log2: ++ case kIeee754Float64Pow: ++ case kIeee754Float64Sin: ++ case kIeee754Float64Sinh: ++ case kIeee754Float64Tan: ++ case kIeee754Float64Tanh: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kLoong64Add: ++ case kLoong64Dadd: ++ return DadduLatency(instr->InputAt(1)->IsRegister()); ++ case kLoong64DaddOvf: ++ return DaddOverflowLatency(); ++ case kLoong64Sub: ++ case kLoong64Dsub: ++ return DsubuLatency(instr->InputAt(1)->IsRegister()); ++ case kLoong64DsubOvf: ++ return DsubOverflowLatency(); ++ case kLoong64Mul: ++ return MulLatency(); ++ case kLoong64MulOvf: ++ return MulOverflowLatency(); ++ case kLoong64MulHigh: ++ return MulhLatency(); ++ case kLoong64MulHighU: ++ return MulhuLatency(); ++ case kLoong64DMulHigh: ++ return DMulhLatency(); ++ case kLoong64Div: { ++ int latency = DivLatency(instr->InputAt(1)->IsRegister()); ++ return latency++; ++ } ++ case kLoong64DivU: { ++ int latency = DivuLatency(instr->InputAt(1)->IsRegister()); ++ return latency++; ++ } ++ case kLoong64Mod: ++ return ModLatency(); ++ case kLoong64ModU: ++ return ModuLatency(); ++ case kLoong64Dmul: ++ return DmulLatency(); ++ case kLoong64Ddiv: { ++ int latency = DdivLatency(); ++ return latency++; ++ } ++ case kLoong64DdivU: { ++ int latency = DdivuLatency(); ++ return latency++; ++ } ++ case kLoong64Dmod: ++ return DmodLatency(); ++ case kLoong64DmodU: ++ return DmoduLatency(); ++ case kLoong64Dlsa: ++ case kLoong64Lsa: ++ return DlsaLatency(); ++ case kLoong64And: ++ return AndLatency(instr->InputAt(1)->IsRegister()); ++ case kLoong64And32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = AndLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLoong64Or: ++ return OrLatency(instr->InputAt(1)->IsRegister()); ++ case kLoong64Or32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = OrLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLoong64Nor: ++ return NorLatency(instr->InputAt(1)->IsRegister()); ++ case kLoong64Nor32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = NorLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLoong64Xor: ++ return XorLatency(instr->InputAt(1)->IsRegister()); ++ case kLoong64Xor32: { ++ bool is_operand_register = instr->InputAt(1)->IsRegister(); ++ int latency = XorLatency(is_operand_register); ++ if (is_operand_register) { ++ return latency + 2; ++ } else { ++ return latency + 1; ++ } ++ } ++ case kLoong64Clz: ++ case kLoong64Dclz: ++ return DclzLatency(); ++ case kLoong64Ctz: ++ return CtzLatency(); ++ case kLoong64Dctz: ++ return DctzLatency(); ++ case kLoong64Popcnt: ++ return PopcntLatency(); ++ case kLoong64Dpopcnt: ++ return DpopcntLatency(); ++ case kLoong64Shl: ++ return 1; ++ case kLoong64Shr: ++ case kLoong64Sar: ++ return 2; ++ case kLoong64Ext: ++ case kLoong64Ins: ++ case kLoong64Dext: ++ case kLoong64Dins: ++ case kLoong64Dshl: ++ case kLoong64Dshr: ++ case kLoong64Dsar: ++ case kLoong64Ror: ++ case kLoong64Dror: ++ return 1; ++ case kLoong64Tst: ++ return AndLatency(instr->InputAt(1)->IsRegister()); ++ case kLoong64Mov: ++ return 1; ++ case kLoong64CmpS: ++ return MoveLatency() + CompareF32Latency(); ++ case kLoong64AddS: ++ return Latency::ADD_S; ++ case kLoong64SubS: ++ return Latency::SUB_S; ++ case kLoong64MulS: ++ return Latency::MUL_S; ++ case kLoong64DivS: ++ return Latency::DIV_S; ++ case kLoong64ModS: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kLoong64AbsS: ++ return Latency::ABS_S; ++ case kLoong64NegS: ++ return NegdLatency(); ++ case kLoong64SqrtS: ++ return Latency::SQRT_S; ++ case kLoong64MaxS: ++ return Latency::MAX_S; ++ case kLoong64MinS: ++ return Latency::MIN_S; ++ case kLoong64CmpD: ++ return MoveLatency() + CompareF64Latency(); ++ case kLoong64AddD: ++ return Latency::ADD_D; ++ case kLoong64SubD: ++ return Latency::SUB_D; ++ case kLoong64MulD: ++ return Latency::MUL_D; ++ case kLoong64DivD: ++ return Latency::DIV_D; ++ case kLoong64ModD: ++ return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + ++ CallCFunctionLatency() + MovFromFloatResultLatency(); ++ case kLoong64AbsD: ++ return Latency::ABS_D; ++ case kLoong64NegD: ++ return NegdLatency(); ++ case kLoong64SqrtD: ++ return Latency::SQRT_D; ++ case kLoong64MaxD: ++ return Latency::MAX_D; ++ case kLoong64MinD: ++ return Latency::MIN_D; ++ case kLoong64Float64RoundDown: ++ case kLoong64Float64RoundTruncate: ++ case kLoong64Float64RoundUp: ++ case kLoong64Float64RoundTiesEven: ++ return Float64RoundLatency(); ++ case kLoong64Float32RoundDown: ++ case kLoong64Float32RoundTruncate: ++ case kLoong64Float32RoundUp: ++ case kLoong64Float32RoundTiesEven: ++ return Float32RoundLatency(); ++ case kLoong64Float32Max: ++ return Float32MaxLatency(); ++ case kLoong64Float64Max: ++ return Float64MaxLatency(); ++ case kLoong64Float32Min: ++ return Float32MinLatency(); ++ case kLoong64Float64Min: ++ return Float64MinLatency(); ++ case kLoong64Float64SilenceNaN: ++ return Latency::SUB_D; ++ case kLoong64CvtSD: ++ return Latency::CVT_S_D; ++ case kLoong64CvtDS: ++ return Latency::CVT_D_S; ++ case kLoong64CvtDW: ++ return Latency::MTC1 + Latency::CVT_D_W; ++ case kLoong64CvtSW: ++ return Latency::MTC1 + Latency::CVT_S_W; ++ case kLoong64CvtSUw: ++ return 1 + Latency::DMTC1 + Latency::CVT_S_L; ++ case kLoong64CvtSL: ++ return Latency::DMTC1 + Latency::CVT_S_L; ++ case kLoong64CvtDL: ++ return Latency::DMTC1 + Latency::CVT_D_L; ++ case kLoong64CvtDUw: ++ return 1 + Latency::DMTC1 + Latency::CVT_D_L; ++ case kLoong64CvtDUl: ++ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 + ++ 2 * Latency::CVT_D_L + Latency::ADD_D; ++ case kLoong64CvtSUl: ++ return 2 * Latency::BRANCH + 3 + 2 * Latency::DMTC1 + ++ 2 * Latency::CVT_S_L + Latency::ADD_S; ++ case kLoong64FloorWD: ++ return Latency::FLOOR_W_D + Latency::MFC1; ++ case kLoong64CeilWD: ++ return Latency::CEIL_W_D + Latency::MFC1; ++ case kLoong64RoundWD: ++ return Latency::ROUND_W_D + Latency::MFC1; ++ case kLoong64TruncWD: ++ return Latency::TRUNC_W_D + Latency::MFC1; ++ case kLoong64FloorWS: ++ return Latency::FLOOR_W_S + Latency::MFC1; ++ case kLoong64CeilWS: ++ return Latency::CEIL_W_S + Latency::MFC1; ++ case kLoong64RoundWS: ++ return Latency::ROUND_W_S + Latency::MFC1; ++ case kLoong64TruncWS: ++ return Latency::TRUNC_W_S + Latency::MFC1 + 2 + MovnLatency(); ++ case kLoong64TruncLS: ++ return TruncLSLatency(instr->OutputCount() > 1); ++ case kLoong64TruncLD: ++ return TruncLDLatency(instr->OutputCount() > 1); ++ case kLoong64TruncUwD: ++ // Estimated max. ++ return CompareF64Latency() + 2 * Latency::BRANCH + ++ 2 * Latency::TRUNC_W_D + Latency::SUB_D + OrLatency() + ++ Latency::MTC1 + Latency::MFC1 + Latency::MTHC1 + 1; ++ case kLoong64TruncUwS: ++ // Estimated max. ++ return CompareF32Latency() + 2 * Latency::BRANCH + ++ 2 * Latency::TRUNC_W_S + Latency::SUB_S + OrLatency() + ++ Latency::MTC1 + 2 * Latency::MFC1 + 2 + MovzLatency(); ++ case kLoong64TruncUlS: ++ return TruncUlSLatency(); ++ case kLoong64TruncUlD: ++ return TruncUlDLatency(); ++ case kLoong64BitcastDL: ++ return Latency::DMFC1; ++ case kLoong64BitcastLD: ++ return Latency::DMTC1; ++ case kLoong64Float64ExtractLowWord32: ++ return Latency::MFC1; ++ case kLoong64Float64InsertLowWord32: ++ return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1; ++ case kLoong64Float64ExtractHighWord32: ++ return Latency::MFHC1; ++ case kLoong64Float64InsertHighWord32: ++ return Latency::MTHC1; ++ case kLoong64Seb: ++ case kLoong64Seh: ++ return 1; ++ case kLoong64Lbu: ++ case kLoong64Lb: ++ case kLoong64Lhu: ++ case kLoong64Lh: ++ case kLoong64Lwu: ++ case kLoong64Lw: ++ case kLoong64Ld: ++ case kLoong64Sb: ++ case kLoong64Sh: ++ case kLoong64Sw: ++ case kLoong64Sd: ++ return AlignedMemoryLatency(); ++ case kLoong64Lwc1: ++ return Lwc1Latency(); ++ case kLoong64Ldc1: ++ return Ldc1Latency(); ++ case kLoong64Swc1: ++ return Swc1Latency(); ++ case kLoong64Sdc1: ++ return Sdc1Latency(); ++ case kLoong64Ulhu: ++ case kLoong64Ulh: ++ return UlhuLatency(); ++ case kLoong64Ulwu: ++ return UlwuLatency(); ++ case kLoong64Ulw: ++ return UlwLatency(); ++ case kLoong64Uld: ++ return UldLatency(); ++ case kLoong64Ulwc1: ++ return Ulwc1Latency(); ++ case kLoong64Uldc1: ++ return Uldc1Latency(); ++ case kLoong64Ush: ++ return UshLatency(); ++ case kLoong64Usw: ++ return UswLatency(); ++ case kLoong64Usd: ++ return UsdLatency(); ++ case kLoong64Uswc1: ++ return Uswc1Latency(); ++ case kLoong64Usdc1: ++ return Usdc1Latency(); ++ case kLoong64Push: { ++ int latency = 0; ++ if (instr->InputAt(0)->IsFPRegister()) { ++ latency = Sdc1Latency() + DsubuLatency(false); ++ } else { ++ latency = PushLatency(); ++ } ++ return latency; ++ } ++ case kLoong64Peek: { ++ int latency = 0; ++ if (instr->OutputAt(0)->IsFPRegister()) { ++ auto op = LocationOperand::cast(instr->OutputAt(0)); ++ switch (op->representation()) { ++ case MachineRepresentation::kFloat64: ++ latency = Ldc1Latency(); ++ break; ++ case MachineRepresentation::kFloat32: ++ latency = Latency::LWC1; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } else { ++ latency = AlignedMemoryLatency(); ++ } ++ return latency; ++ } ++ case kLoong64StackClaim: ++ return DsubuLatency(false); ++ case kLoong64StoreToStackSlot: { ++ int latency = 0; ++ if (instr->InputAt(0)->IsFPRegister()) { ++ if (instr->InputAt(0)->IsSimd128Register()) { ++ latency = 1; // Estimated value. ++ } else { ++ latency = Sdc1Latency(); ++ } ++ } else { ++ latency = AlignedMemoryLatency(); ++ } ++ return latency; ++ } ++ case kLoong64ByteSwap64: ++ return ByteSwapSignedLatency(); ++ case kLoong64ByteSwap32: ++ return ByteSwapSignedLatency(); ++ case kWord32AtomicLoadInt8: ++ case kWord32AtomicLoadUint8: ++ case kWord32AtomicLoadInt16: ++ case kWord32AtomicLoadUint16: ++ case kWord32AtomicLoadWord32: ++ return 2; ++ case kWord32AtomicStoreWord8: ++ case kWord32AtomicStoreWord16: ++ case kWord32AtomicStoreWord32: ++ return 3; ++ case kWord32AtomicExchangeInt8: ++ return Word32AtomicExchangeLatency(true, 8); ++ case kWord32AtomicExchangeUint8: ++ return Word32AtomicExchangeLatency(false, 8); ++ case kWord32AtomicExchangeInt16: ++ return Word32AtomicExchangeLatency(true, 16); ++ case kWord32AtomicExchangeUint16: ++ return Word32AtomicExchangeLatency(false, 16); ++ case kWord32AtomicExchangeWord32: ++ return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1; ++ case kWord32AtomicCompareExchangeInt8: ++ return Word32AtomicCompareExchangeLatency(true, 8); ++ case kWord32AtomicCompareExchangeUint8: ++ return Word32AtomicCompareExchangeLatency(false, 8); ++ case kWord32AtomicCompareExchangeInt16: ++ return Word32AtomicCompareExchangeLatency(true, 16); ++ case kWord32AtomicCompareExchangeUint16: ++ return Word32AtomicCompareExchangeLatency(false, 16); ++ case kWord32AtomicCompareExchangeWord32: ++ return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) + ++ BranchShortLatency() + 1; ++ case kLoong64AssertEqual: ++ return AssertLatency(); ++ default: ++ return 1; ++ } ++} ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc +new file mode 100644 +index 00000000..deb7d220 +--- /dev/null ++++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc +@@ -0,0 +1,3101 @@ ++// Copyright 2014 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/base/bits.h" ++#include "src/compiler/backend/instruction-selector-impl.h" ++#include "src/compiler/node-matchers.h" ++#include "src/compiler/node-properties.h" ++ ++namespace v8 { ++namespace internal { ++namespace compiler { ++ ++#define TRACE_UNIMPL() \ ++ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) ++ ++#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) ++ ++// Adds loong64-specific methods for generating InstructionOperands. ++class Loong64OperandGenerator final : public OperandGenerator { ++ public: ++ explicit Loong64OperandGenerator(InstructionSelector* selector) ++ : OperandGenerator(selector) {} ++ ++ InstructionOperand UseOperand(Node* node, InstructionCode opcode) { ++ if (CanBeImmediate(node, opcode)) { ++ return UseImmediate(node); ++ } ++ return UseRegister(node); ++ } ++ ++ // Use the zero register if the node has the immediate value zero, otherwise ++ // assign a register. ++ InstructionOperand UseRegisterOrImmediateZero(Node* node) { ++ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) || ++ (IsFloatConstant(node) && ++ (bit_cast(GetFloatConstantValue(node)) == 0))) { ++ return UseImmediate(node); ++ } ++ return UseRegister(node); ++ } ++ ++ bool IsIntegerConstant(Node* node) { ++ return (node->opcode() == IrOpcode::kInt32Constant) || ++ (node->opcode() == IrOpcode::kInt64Constant); ++ } ++ ++ int64_t GetIntegerConstantValue(Node* node) { ++ if (node->opcode() == IrOpcode::kInt32Constant) { ++ return OpParameter(node->op()); ++ } ++ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode()); ++ return OpParameter(node->op()); ++ } ++ ++ bool IsFloatConstant(Node* node) { ++ return (node->opcode() == IrOpcode::kFloat32Constant) || ++ (node->opcode() == IrOpcode::kFloat64Constant); ++ } ++ ++ double GetFloatConstantValue(Node* node) { ++ if (node->opcode() == IrOpcode::kFloat32Constant) { ++ return OpParameter(node->op()); ++ } ++ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode()); ++ return OpParameter(node->op()); ++ } ++ ++ bool CanBeImmediate(Node* node, InstructionCode mode) { ++ return IsIntegerConstant(node) && ++ CanBeImmediate(GetIntegerConstantValue(node), mode); ++ } ++ ++ bool CanBeImmediate(int64_t value, InstructionCode opcode) { ++ switch (ArchOpcodeField::decode(opcode)) { ++ case kLoong64Shl: ++ case kLoong64Sar: ++ case kLoong64Shr: ++ return is_uint5(value); ++ case kLoong64Dshl: ++ case kLoong64Dsar: ++ case kLoong64Dshr: ++ return is_uint6(value); ++ case kLoong64Add: ++ case kLoong64And32: ++ case kLoong64And: ++ case kLoong64Dadd: ++ case kLoong64Or32: ++ case kLoong64Or: ++ case kLoong64Tst: ++ case kLoong64Xor: ++ return is_uint12(value); ++ case kLoong64Lb: ++ case kLoong64Lbu: ++ case kLoong64Sb: ++ case kLoong64Lh: ++ case kLoong64Lhu: ++ case kLoong64Sh: ++ case kLoong64Lw: ++ case kLoong64Sw: ++ case kLoong64Ld: ++ case kLoong64Sd: ++ case kLoong64Lwc1: ++ case kLoong64Swc1: ++ case kLoong64Ldc1: ++ case kLoong64Sdc1: ++ return is_int12(value); ++ default: ++ return is_int12(value); ++ } ++ } ++ ++ private: ++ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { ++ TRACE_UNIMPL(); ++ return false; ++ } ++}; ++ ++static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Loong64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Loong64OperandGenerator g(selector); ++ int32_t imm = OpParameter(node->op()); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); ++} ++ ++static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Loong64OperandGenerator g(selector); ++ if (g.IsIntegerConstant(node->InputAt(1))) { ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseImmediate(node->InputAt(1))); ++ } else { ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++ } ++} ++ ++static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Loong64OperandGenerator g(selector); ++ int32_t imm = OpParameter(node->op()); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm), ++ g.UseRegister(node->InputAt(1))); ++} ++ ++static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Loong64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++} ++ ++void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { ++ Loong64OperandGenerator g(selector); ++ selector->Emit( ++ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); ++} ++ ++static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, ++ Node* node) { ++ Loong64OperandGenerator g(selector); ++ selector->Emit(opcode, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), ++ g.UseOperand(node->InputAt(1), opcode)); ++} ++ ++struct ExtendingLoadMatcher { ++ ExtendingLoadMatcher(Node* node, InstructionSelector* selector) ++ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) { ++ Initialize(node); ++ } ++ ++ bool Matches() const { return matches_; } ++ ++ Node* base() const { ++ DCHECK(Matches()); ++ return base_; ++ } ++ int64_t immediate() const { ++ DCHECK(Matches()); ++ return immediate_; ++ } ++ ArchOpcode opcode() const { ++ DCHECK(Matches()); ++ return opcode_; ++ } ++ ++ private: ++ bool matches_; ++ InstructionSelector* selector_; ++ Node* base_; ++ int64_t immediate_; ++ ArchOpcode opcode_; ++ ++ void Initialize(Node* node) { ++ Int64BinopMatcher m(node); ++ // When loading a 64-bit value and shifting by 32, we should ++ // just load and sign-extend the interesting 4 bytes instead. ++ // This happens, for example, when we're loading and untagging SMIs. ++ DCHECK(m.IsWord64Sar()); ++ if (m.left().IsLoad() && m.right().Is(32) && ++ selector_->CanCover(m.node(), m.left().node())) { ++ DCHECK_EQ(selector_->GetEffectLevel(node), ++ selector_->GetEffectLevel(m.left().node())); ++ MachineRepresentation rep = ++ LoadRepresentationOf(m.left().node()->op()).representation(); ++ DCHECK_EQ(3, ElementSizeLog2Of(rep)); ++ if (rep != MachineRepresentation::kTaggedSigned && ++ rep != MachineRepresentation::kTaggedPointer && ++ rep != MachineRepresentation::kTagged && ++ rep != MachineRepresentation::kWord64) { ++ return; ++ } ++ ++ Loong64OperandGenerator g(selector_); ++ Node* load = m.left().node(); ++ Node* offset = load->InputAt(1); ++ base_ = load->InputAt(0); ++ opcode_ = kLoong64Lw; ++ if (g.CanBeImmediate(offset, opcode_)) { ++ immediate_ = g.GetIntegerConstantValue(offset) + 4; ++ matches_ = g.CanBeImmediate(immediate_, kLoong64Lw); ++ } ++ } ++ } ++}; ++ ++bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node, ++ Node* output_node) { ++ ExtendingLoadMatcher m(node, selector); ++ Loong64OperandGenerator g(selector); ++ if (m.Matches()) { ++ InstructionOperand inputs[2]; ++ inputs[0] = g.UseRegister(m.base()); ++ InstructionCode opcode = ++ m.opcode() | AddressingModeField::encode(kMode_MRI); ++ DCHECK(is_int32(m.immediate())); ++ inputs[1] = g.TempImmediate(static_cast(m.immediate())); ++ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)}; ++ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs), ++ inputs); ++ return true; ++ } ++ return false; ++} ++ ++bool TryMatchImmediate(InstructionSelector* selector, ++ InstructionCode* opcode_return, Node* node, ++ size_t* input_count_return, InstructionOperand* inputs) { ++ Loong64OperandGenerator g(selector); ++ if (g.CanBeImmediate(node, *opcode_return)) { ++ *opcode_return |= AddressingModeField::encode(kMode_MRI); ++ inputs[0] = g.UseImmediate(node); ++ *input_count_return = 1; ++ return true; ++ } ++ return false; ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, bool has_reverse_opcode, ++ InstructionCode reverse_opcode, ++ FlagsContinuation* cont) { ++ Loong64OperandGenerator g(selector); ++ Int32BinopMatcher m(node); ++ InstructionOperand inputs[2]; ++ size_t input_count = 0; ++ InstructionOperand outputs[1]; ++ size_t output_count = 0; ++ ++ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count, ++ &inputs[1])) { ++ inputs[0] = g.UseRegister(m.left().node()); ++ input_count++; ++ } else if (has_reverse_opcode && ++ TryMatchImmediate(selector, &reverse_opcode, m.left().node(), ++ &input_count, &inputs[1])) { ++ inputs[0] = g.UseRegister(m.right().node()); ++ opcode = reverse_opcode; ++ input_count++; ++ } else { ++ inputs[input_count++] = g.UseRegister(m.left().node()); ++ inputs[input_count++] = g.UseOperand(m.right().node(), opcode); ++ } ++ ++ if (cont->IsDeoptimize()) { ++ // If we can deoptimize as a result of the binop, we need to make sure that ++ // the deopt inputs are not overwritten by the binop result. One way ++ // to achieve that is to declare the output register as same-as-first. ++ outputs[output_count++] = g.DefineSameAsFirst(node); ++ } else { ++ outputs[output_count++] = g.DefineAsRegister(node); ++ } ++ ++ DCHECK_NE(0u, input_count); ++ DCHECK_EQ(1u, output_count); ++ DCHECK_GE(arraysize(inputs), input_count); ++ DCHECK_GE(arraysize(outputs), output_count); ++ ++ selector->EmitWithContinuation(opcode, output_count, outputs, input_count, ++ inputs, cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, bool has_reverse_opcode, ++ InstructionCode reverse_opcode) { ++ FlagsContinuation cont; ++ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont) { ++ VisitBinop(selector, node, opcode, false, kArchNop, cont); ++} ++ ++static void VisitBinop(InstructionSelector* selector, Node* node, ++ InstructionCode opcode) { ++ VisitBinop(selector, node, opcode, false, kArchNop); ++} ++ ++void InstructionSelector::VisitStackSlot(Node* node) { ++ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); ++ int alignment = rep.alignment(); ++ int slot = frame_->AllocateSpillSlot(rep.size(), alignment); ++ OperandGenerator g(this); ++ ++ Emit(kArchStackSlot, g.DefineAsRegister(node), ++ sequence()->AddImmediate(Constant(slot)), ++ sequence()->AddImmediate(Constant(alignment)), 0, nullptr); ++} ++ ++void InstructionSelector::VisitAbortCSAAssert(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); ++} ++ ++void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, ++ Node* output = nullptr) { ++ Loong64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(output == nullptr ? node : output), ++ g.UseRegister(base), g.UseImmediate(index)); ++ } else { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR), ++ g.DefineAsRegister(output == nullptr ? node : output), ++ g.UseRegister(base), g.UseRegister(index)); ++ } ++} ++ ++void InstructionSelector::VisitLoadTransform(Node* node) { ++ LoadTransformParameters params = LoadTransformParametersOf(node->op()); ++ ++ InstructionCode opcode = kArchNop; ++ switch (params.transformation) { ++ case LoadTransformation::kS8x16LoadSplat: ++ opcode = kLoong64S8x16LoadSplat; ++ break; ++ case LoadTransformation::kS16x8LoadSplat: ++ opcode = kLoong64S16x8LoadSplat; ++ break; ++ case LoadTransformation::kS32x4LoadSplat: ++ opcode = kLoong64S32x4LoadSplat; ++ break; ++ case LoadTransformation::kS64x2LoadSplat: ++ opcode = kLoong64S64x2LoadSplat; ++ break; ++ case LoadTransformation::kI16x8Load8x8S: ++ opcode = kLoong64I16x8Load8x8S; ++ break; ++ case LoadTransformation::kI16x8Load8x8U: ++ opcode = kLoong64I16x8Load8x8U; ++ break; ++ case LoadTransformation::kI32x4Load16x4S: ++ opcode = kLoong64I32x4Load16x4S; ++ break; ++ case LoadTransformation::kI32x4Load16x4U: ++ opcode = kLoong64I32x4Load16x4U; ++ break; ++ case LoadTransformation::kI64x2Load32x2S: ++ opcode = kLoong64I64x2Load32x2S; ++ break; ++ case LoadTransformation::kI64x2Load32x2U: ++ opcode = kLoong64I64x2Load32x2U; ++ break; ++ default: ++ UNIMPLEMENTED(); ++ } ++ ++ EmitLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ++ InstructionCode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLoong64Lwc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLoong64Ldc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = load_rep.IsUnsigned() ? kLoong64Lbu : kLoong64Lb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kLoong64Lhu : kLoong64Lh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = load_rep.IsUnsigned() ? kLoong64Lwu : kLoong64Lw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLoong64Ld; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ } ++ if (node->opcode() == IrOpcode::kPoisonedLoad) { ++ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); ++ opcode |= MiscField::encode(kMemoryAccessPoisoned); ++ } ++ ++ EmitLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } ++ ++void InstructionSelector::VisitProtectedLoad(Node* node) { ++ // TODO(eholk) ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitStore(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ StoreRepresentation store_rep = StoreRepresentationOf(node->op()); ++ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); ++ MachineRepresentation rep = store_rep.representation(); ++ ++ // TODO(loong64): I guess this could be done in a better way. ++ if (write_barrier_kind != kNoWriteBarrier && ++ V8_LIKELY(!FLAG_disable_write_barriers)) { ++ DCHECK(CanBeTaggedPointer(rep)); ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ RecordWriteMode record_write_mode = ++ WriteBarrierKindToRecordWriteMode(write_barrier_kind); ++ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; ++ size_t const temp_count = arraysize(temps); ++ InstructionCode code = kArchStoreWithWriteBarrier; ++ code |= MiscField::encode(static_cast(record_write_mode)); ++ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); ++ } else { ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLoong64Swc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLoong64Sdc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = kLoong64Sb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kLoong64Sh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLoong64Sw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLoong64Sd; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ return; ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(), ++ g.UseRegister(base), g.UseRegister(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } ++ } ++} ++ ++void InstructionSelector::VisitProtectedStore(Node* node) { ++ // TODO(eholk) ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitWord32And(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && ++ m.right().HasValue()) { ++ uint32_t mask = m.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { ++ // The mask must be contiguous, and occupy the least-significant bits. ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); ++ ++ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least ++ // significant bits. ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ // Any shift value can match; int32 shifts use `value % 32`. ++ uint32_t lsb = mleft.right().Value() & 0x1F; ++ ++ // Ext cannot extract bits past the register size, however since ++ // shifting the original value would have introduced some zeros we can ++ // still use Ext with a smaller mask and the remaining bits will be ++ // zeros. ++ if (lsb + mask_width > 32) mask_width = 32 - lsb; ++ ++ Emit(kLoong64Ext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ // Other cases fall through to the normal And operation. ++ } ++ } ++ if (m.right().HasValue()) { ++ uint32_t mask = m.right().Value(); ++ uint32_t shift = base::bits::CountPopulation(~mask); ++ uint32_t msb = base::bits::CountLeadingZeros32(~mask); ++ if (shift != 0 && shift != 32 && msb + shift == 32) { ++ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction ++ // and remove constant loading of inverted mask. ++ Emit(kLoong64Ins, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0), g.TempImmediate(shift)); ++ return; ++ } ++ } ++ VisitBinop(this, node, kLoong64And32, true, kLoong64And32); ++} ++ ++void InstructionSelector::VisitWord64And(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && ++ m.right().HasValue()) { ++ uint64_t mask = m.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { ++ // The mask must be contiguous, and occupy the least-significant bits. ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); ++ ++ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least ++ // significant bits. ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ // Any shift value can match; int64 shifts use `value % 64`. ++ uint32_t lsb = static_cast(mleft.right().Value() & 0x3F); ++ ++ // Dext cannot extract bits past the register size, however since ++ // shifting the original value would have introduced some zeros we can ++ // still use Dext with a smaller mask and the remaining bits will be ++ // zeros. ++ if (lsb + mask_width > 64) mask_width = 64 - lsb; ++ ++ if (lsb == 0 && mask_width == 64) { ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node())); ++ } else { ++ Emit(kLoong64Dext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(static_cast(mask_width))); ++ } ++ return; ++ } ++ // Other cases fall through to the normal And operation. ++ } ++ } ++ if (m.right().HasValue()) { ++ uint64_t mask = m.right().Value(); ++ uint32_t shift = base::bits::CountPopulation(~mask); ++ uint32_t msb = base::bits::CountLeadingZeros64(~mask); ++ if (shift != 0 && shift < 32 && msb + shift == 64) { ++ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction ++ // and remove constant loading of inverted mask. Dins cannot insert bits ++ // past word size, so shifts smaller than 32 are covered. ++ Emit(kLoong64Dins, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0), g.TempImmediate(shift)); ++ return; ++ } ++ } ++ VisitBinop(this, node, kLoong64And, true, kLoong64And); ++} ++ ++void InstructionSelector::VisitWord32Or(Node* node) { ++ VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32); ++} ++ ++void InstructionSelector::VisitWord64Or(Node* node) { ++ VisitBinop(this, node, kLoong64Or, true, kLoong64Or); ++} ++ ++void InstructionSelector::VisitWord32Xor(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) && ++ m.right().Is(-1)) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (!mleft.right().HasValue()) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Nor32, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(mleft.right().node())); ++ return; ++ } ++ } ++ if (m.right().Is(-1)) { ++ // Use Nor for bit negation and eliminate constant loading for xori. ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0)); ++ return; ++ } ++ VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32); ++} ++ ++void InstructionSelector::VisitWord64Xor(Node* node) { ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) && ++ m.right().Is(-1)) { ++ Int64BinopMatcher mleft(m.left().node()); ++ if (!mleft.right().HasValue()) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Nor, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(mleft.right().node())); ++ return; ++ } ++ } ++ if (m.right().Is(-1)) { ++ // Use Nor for bit negation and eliminate constant loading for xori. ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(0)); ++ return; ++ } ++ VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor); ++} ++ ++void InstructionSelector::VisitWord32Shl(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32And() && CanCover(node, m.left().node()) && ++ m.right().IsInRange(1, 31)) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher mleft(m.left().node()); ++ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is ++ // contiguous, and the shift immediate non-zero. ++ if (mleft.right().HasValue()) { ++ uint32_t mask = mleft.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 32)) { ++ uint32_t shift = m.right().Value(); ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); ++ DCHECK_NE(0u, shift); ++ if ((shift + mask_width) >= 32) { ++ // If the mask is contiguous and reaches or extends beyond the top ++ // bit, only the shift is needed. ++ Emit(kLoong64Shl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ } ++ } ++ VisitRRO(this, kLoong64Shl, node); ++} ++ ++void InstructionSelector::VisitWord32Shr(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32And() && m.right().HasValue()) { ++ uint32_t lsb = m.right().Value() & 0x1F; ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && mleft.right().Value() != 0) { ++ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is ++ // shifted into the least-significant bits. ++ uint32_t mask = (mleft.right().Value() >> lsb) << lsb; ++ unsigned mask_width = base::bits::CountPopulation(mask); ++ unsigned mask_msb = base::bits::CountLeadingZeros32(mask); ++ if ((mask_msb + mask_width + lsb) == 32) { ++ Loong64OperandGenerator g(this); ++ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); ++ Emit(kLoong64Ext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kLoong64Shr, node); ++} ++ ++void InstructionSelector::VisitWord32Sar(Node* node) { ++ Int32BinopMatcher m(node); ++ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (m.right().HasValue() && mleft.right().HasValue()) { ++ Loong64OperandGenerator g(this); ++ uint32_t sar = m.right().Value(); ++ uint32_t shl = mleft.right().Value(); ++ if ((sar == shl) && (sar == 16)) { ++ Emit(kLoong64Seh, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node())); ++ return; ++ } else if ((sar == shl) && (sar == 24)) { ++ Emit(kLoong64Seb, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node())); ++ return; ++ } else if ((sar == shl) && (sar == 32)) { ++ Emit(kLoong64Shl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(0)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kLoong64Sar, node); ++} ++ ++void InstructionSelector::VisitWord64Shl(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && ++ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) { ++ // There's no need to sign/zero-extend to 64-bit if we shift out the upper ++ // 32 bits anyway. ++ Emit(kLoong64Dshl, g.DefineSameAsFirst(node), ++ g.UseRegister(m.left().node()->InputAt(0)), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ if (m.left().IsWord64And() && CanCover(node, m.left().node()) && ++ m.right().IsInRange(1, 63)) { ++ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is ++ // contiguous, and the shift immediate non-zero. ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue()) { ++ uint64_t mask = mleft.right().Value(); ++ uint32_t mask_width = base::bits::CountPopulation(mask); ++ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_width != 0) && (mask_msb + mask_width == 64)) { ++ uint64_t shift = m.right().Value(); ++ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); ++ DCHECK_NE(0u, shift); ++ ++ if ((shift + mask_width) >= 64) { ++ // If the mask is contiguous and reaches or extends beyond the top ++ // bit, only the shift is needed. ++ Emit(kLoong64Dshl, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ } ++ } ++ VisitRRO(this, kLoong64Dshl, node); ++} ++ ++void InstructionSelector::VisitWord64Shr(Node* node) { ++ Int64BinopMatcher m(node); ++ if (m.left().IsWord64And() && m.right().HasValue()) { ++ uint32_t lsb = m.right().Value() & 0x3F; ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && mleft.right().Value() != 0) { ++ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is ++ // shifted into the least-significant bits. ++ uint64_t mask = (mleft.right().Value() >> lsb) << lsb; ++ unsigned mask_width = base::bits::CountPopulation(mask); ++ unsigned mask_msb = base::bits::CountLeadingZeros64(mask); ++ if ((mask_msb + mask_width + lsb) == 64) { ++ Loong64OperandGenerator g(this); ++ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); ++ Emit(kLoong64Dext, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), ++ g.TempImmediate(mask_width)); ++ return; ++ } ++ } ++ } ++ VisitRRO(this, kLoong64Dshr, node); ++} ++ ++void InstructionSelector::VisitWord64Sar(Node* node) { ++ if (TryEmitExtendingLoad(this, node, node)) return; ++ VisitRRO(this, kLoong64Dsar, node); ++} ++ ++void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord32Ror(Node* node) { ++ VisitRRO(this, kLoong64Ror, node); ++} ++ ++void InstructionSelector::VisitWord32Clz(Node* node) { ++ VisitRR(this, kLoong64Clz, node); ++} ++ ++void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); } ++ ++void InstructionSelector::VisitWord64ReverseBytes(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64ByteSwap64, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord32ReverseBytes(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64ByteSwap32, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitWord32Ctz(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord64Ctz(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord32Popcnt(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Popcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord64Popcnt(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Dpopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitWord64Ror(Node* node) { ++ VisitRRO(this, kLoong64Dror, node); ++} ++ ++void InstructionSelector::VisitWord64Clz(Node* node) { ++ VisitRR(this, kLoong64Dclz, node); ++} ++ ++void InstructionSelector::VisitInt32Add(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ ++ // Select Lsa for (left + (left_of_right << imm)). ++ if (m.right().opcode() == IrOpcode::kWord32Shl && ++ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { ++ Int32BinopMatcher mright(m.right().node()); ++ if (mright.right().HasValue() && !m.left().HasValue()) { ++ int32_t shift_value = static_cast(mright.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLoong64Lsa, g.DefineAsRegister(node), ++ g.UseRegister(mright.left().node()), ++ g.UseRegister(m.left().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ // Select Lsa for ((left_of_left << imm) + right). ++ if (m.left().opcode() == IrOpcode::kWord32Shl && ++ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { ++ Int32BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && !m.right().HasValue()) { ++ int32_t shift_value = static_cast(mleft.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLoong64Lsa, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(m.right().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ VisitBinop(this, node, kLoong64Add, true, kLoong64Add); ++} ++ ++void InstructionSelector::VisitInt64Add(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ ++ // Select Dlsa for (left + (left_of_right << imm)). ++ if (m.right().opcode() == IrOpcode::kWord64Shl && ++ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { ++ Int64BinopMatcher mright(m.right().node()); ++ if (mright.right().HasValue() && !m.left().HasValue()) { ++ int32_t shift_value = static_cast(mright.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLoong64Dlsa, g.DefineAsRegister(node), ++ g.UseRegister(mright.left().node()), ++ g.UseRegister(m.left().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ // Select Dlsa for ((left_of_left << imm) + right). ++ if (m.left().opcode() == IrOpcode::kWord64Shl && ++ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { ++ Int64BinopMatcher mleft(m.left().node()); ++ if (mleft.right().HasValue() && !m.right().HasValue()) { ++ int32_t shift_value = static_cast(mleft.right().Value()); ++ if (shift_value > 0 && shift_value <= 31) { ++ Emit(kLoong64Dlsa, g.DefineAsRegister(node), ++ g.UseRegister(mleft.left().node()), ++ g.UseRegister(m.right().node()), g.TempImmediate(shift_value)); ++ return; ++ } ++ } ++ } ++ ++ VisitBinop(this, node, kLoong64Dadd, true, kLoong64Dadd); ++} ++ ++void InstructionSelector::VisitInt32Sub(Node* node) { ++ VisitBinop(this, node, kLoong64Sub); ++} ++ ++void InstructionSelector::VisitInt64Sub(Node* node) { ++ VisitBinop(this, node, kLoong64Dsub); ++} ++ ++void InstructionSelector::VisitInt32Mul(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ if (m.right().HasValue() && m.right().Value() > 0) { ++ uint32_t value = static_cast(m.right().Value()); ++ if (base::bits::IsPowerOfTwo(value)) { ++ Emit(kLoong64Shl | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value - 1) && /*kArchVariant == kLoong64r6 &&*/ ++ value - 1 > 0 && value - 1 <= 31) { ++ Emit(kLoong64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value + 1)) { ++ InstructionOperand temp = g.TempRegister(); ++ Emit(kLoong64Shl | AddressingModeField::encode(kMode_None), temp, ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); ++ Emit(kLoong64Sub | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); ++ return; ++ } ++ } ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher leftInput(left), rightInput(right); ++ if (leftInput.right().Is(32) && rightInput.right().Is(32)) { ++ // Combine untagging shifts with Dmul high. ++ Emit(kLoong64DMulHigh, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ VisitRRR(this, kLoong64Mul, node); ++} ++ ++void InstructionSelector::VisitInt32MulHigh(Node* node) { ++ VisitRRR(this, kLoong64MulHigh, node); ++} ++ ++void InstructionSelector::VisitUint32MulHigh(Node* node) { ++ VisitRRR(this, kLoong64MulHighU, node); ++} ++ ++void InstructionSelector::VisitInt64Mul(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ // TODO(dusmil): Add optimization for shifts larger than 32. ++ if (m.right().HasValue() && m.right().Value() > 0) { ++ uint32_t value = static_cast(m.right().Value()); ++ if (base::bits::IsPowerOfTwo(value)) { ++ Emit(kLoong64Dshl | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value - 1) && /*kArchVariant == kLoong64r6 &&*/ ++ value - 1 > 0 && value - 1 <= 31) { ++ // Dlsa macro will handle the shifting value out of bound cases. ++ Emit(kLoong64Dlsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); ++ return; ++ } ++ if (base::bits::IsPowerOfTwo(value + 1)) { ++ InstructionOperand temp = g.TempRegister(); ++ Emit(kLoong64Dshl | AddressingModeField::encode(kMode_None), temp, ++ g.UseRegister(m.left().node()), ++ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); ++ Emit(kLoong64Dsub | AddressingModeField::encode(kMode_None), ++ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); ++ return; ++ } ++ } ++ Emit(kLoong64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt32Div(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher rightInput(right), leftInput(left); ++ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { ++ // Combine both shifted operands with Ddiv. ++ Emit(kLoong64Ddiv, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ Emit(kLoong64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint32Div(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Emit(kLoong64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt32Mod(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ if (CanCover(node, left) && CanCover(node, right)) { ++ if (left->opcode() == IrOpcode::kWord64Sar && ++ right->opcode() == IrOpcode::kWord64Sar) { ++ Int64BinopMatcher rightInput(right), leftInput(left); ++ if (rightInput.right().Is(32) && leftInput.right().Is(32)) { ++ // Combine both shifted operands with Dmod. ++ Emit(kLoong64Dmod, g.DefineSameAsFirst(node), ++ g.UseRegister(leftInput.left().node()), ++ g.UseRegister(rightInput.left().node())); ++ return; ++ } ++ } ++ } ++ Emit(kLoong64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint32Mod(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int32BinopMatcher m(node); ++ Emit(kLoong64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt64Div(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLoong64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint64Div(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLoong64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitInt64Mod(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLoong64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitUint64Mod(Node* node) { ++ Loong64OperandGenerator g(this); ++ Int64BinopMatcher m(node); ++ Emit(kLoong64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), ++ g.UseRegister(m.right().node())); ++} ++ ++void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { ++ VisitRR(this, kLoong64CvtDS, node); ++} ++ ++void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) { ++ VisitRR(this, kLoong64CvtSW, node); ++} ++ ++void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) { ++ VisitRR(this, kLoong64CvtSUw, node); ++} ++ ++void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { ++ VisitRR(this, kLoong64CvtDW, node); ++} ++ ++void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) { ++ VisitRR(this, kLoong64CvtDL, node); ++} ++ ++void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { ++ VisitRR(this, kLoong64CvtDUw, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { ++ VisitRR(this, kLoong64TruncWS, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) { ++ VisitRR(this, kLoong64TruncUwS, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction ++ // which does rounding and conversion to integer format. ++ if (CanCover(node, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kFloat64RoundDown: ++ Emit(kLoong64FloorWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundUp: ++ Emit(kLoong64CeilWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundTiesEven: ++ Emit(kLoong64RoundWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ case IrOpcode::kFloat64RoundTruncate: ++ Emit(kLoong64TruncWD, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ default: ++ break; ++ } ++ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) { ++ Node* next = value->InputAt(0); ++ if (CanCover(value, next)) { ++ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP)) ++ switch (next->opcode()) { ++ case IrOpcode::kFloat32RoundDown: ++ Emit(kLoong64FloorWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundUp: ++ Emit(kLoong64CeilWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundTiesEven: ++ Emit(kLoong64RoundWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ case IrOpcode::kFloat32RoundTruncate: ++ Emit(kLoong64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(next->InputAt(0))); ++ return; ++ default: ++ Emit(kLoong64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ } else { ++ // Match float32 -> float64 -> int32 representation change path. ++ Emit(kLoong64TruncWS, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ } ++ } ++ VisitRR(this, kLoong64TruncWD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) { ++ VisitRR(this, kLoong64TruncLD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { ++ VisitRR(this, kLoong64TruncUwD, node); ++} ++ ++void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) { ++ VisitRR(this, kLoong64TruncUlD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) { ++ VisitRR(this, kLoong64TruncUwD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) { ++ VisitRR(this, kLoong64TruncLD, node); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) { ++ Loong64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ this->Emit(kLoong64TruncLS, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) { ++ Loong64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kLoong64TruncLD, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) { ++ Loong64OperandGenerator g(this); ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kLoong64TruncUlS, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) { ++ Loong64OperandGenerator g(this); ++ ++ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; ++ InstructionOperand outputs[2]; ++ size_t output_count = 0; ++ outputs[output_count++] = g.DefineAsRegister(node); ++ ++ Node* success_output = NodeProperties::FindProjection(node, 1); ++ if (success_output) { ++ outputs[output_count++] = g.DefineAsRegister(success_output); ++ } ++ ++ Emit(kLoong64TruncUlD, output_count, outputs, 1, inputs); ++} ++ ++void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { ++ UNIMPLEMENTED(); ++} ++ ++void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { ++ Node* value = node->InputAt(0); ++ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) { ++ // Generate sign-extending load. ++ LoadRepresentation load_rep = LoadRepresentationOf(value->op()); ++ InstructionCode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ opcode = load_rep.IsUnsigned() ? kLoong64Lbu : kLoong64Lb; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kLoong64Lhu : kLoong64Lh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLoong64Lw; ++ break; ++ default: ++ UNREACHABLE(); ++ return; ++ } ++ EmitLoad(this, value, opcode, node); ++ } else { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0)); ++ } ++} ++ ++void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ switch (value->opcode()) { ++ // 32-bit operations will write their result in a 64 bit register, ++ // clearing the top 32 bits of the destination register. ++ case IrOpcode::kUint32Div: ++ case IrOpcode::kUint32Mod: ++ case IrOpcode::kUint32MulHigh: { ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); ++ return; ++ } ++ case IrOpcode::kLoad: { ++ LoadRepresentation load_rep = LoadRepresentationOf(value->op()); ++ if (load_rep.IsUnsigned()) { ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ case MachineRepresentation::kWord16: ++ case MachineRepresentation::kWord32: ++ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); ++ return; ++ default: ++ break; ++ } ++ } ++ break; ++ } ++ default: ++ break; ++ } ++ Emit(kLoong64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0), g.TempImmediate(32)); ++} ++ ++void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ if (CanCover(node, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kWord64Sar: { ++ if (CanCoverTransitively(node, value, value->InputAt(0)) && ++ TryEmitExtendingLoad(this, value, node)) { ++ return; ++ } else { ++ Int64BinopMatcher m(value); ++ if (m.right().IsInRange(32, 63)) { ++ // After smi untagging no need for truncate. Combine sequence. ++ Emit(kLoong64Dsar, g.DefineSameAsFirst(node), ++ g.UseRegister(m.left().node()), ++ g.UseImmediate(m.right().node())); ++ return; ++ } ++ } ++ break; ++ } ++ default: ++ break; ++ } ++ } ++ Emit(kLoong64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0), g.TempImmediate(32)); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* value = node->InputAt(0); ++ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding ++ // instruction. ++ if (CanCover(node, value) && ++ value->opcode() == IrOpcode::kChangeInt32ToFloat64) { ++ Emit(kLoong64CvtSW, g.DefineAsRegister(node), ++ g.UseRegister(value->InputAt(0))); ++ return; ++ } ++ VisitRR(this, kLoong64CvtSD, node); ++} ++ ++void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { ++ VisitRR(this, kArchTruncateDoubleToI, node); ++} ++ ++void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { ++ VisitRR(this, kLoong64TruncWD, node); ++} ++ ++void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) { ++ VisitRR(this, kLoong64CvtSL, node); ++} ++ ++void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) { ++ VisitRR(this, kLoong64CvtDL, node); ++} ++ ++void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) { ++ VisitRR(this, kLoong64CvtSUl, node); ++} ++ ++void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) { ++ VisitRR(this, kLoong64CvtDUl, node); ++} ++ ++void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { ++ VisitRR(this, kLoong64Float64ExtractLowWord32, node); ++} ++ ++void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { ++ VisitRR(this, kLoong64BitcastDL, node); ++} ++ ++void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Float64InsertLowWord32, g.DefineAsRegister(node), ++ ImmediateOperand(ImmediateOperand::INLINE, 0), ++ g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { ++ VisitRR(this, kLoong64BitcastLD, node); ++} ++ ++void InstructionSelector::VisitFloat32Add(Node* node) { ++ // Optimization with Madd.S(z, x, y) is intentionally removed. ++ // See explanation for madd_s in assembler-loong64.cc. ++ VisitRRR(this, kLoong64AddS, node); ++} ++ ++void InstructionSelector::VisitFloat64Add(Node* node) { ++ // Optimization with Madd.D(z, x, y) is intentionally removed. ++ // See explanation for madd_d in assembler-loong64.cc. ++ VisitRRR(this, kLoong64AddD, node); ++} ++ ++void InstructionSelector::VisitFloat32Sub(Node* node) { ++ // Optimization with Msub.S(z, x, y) is intentionally removed. ++ // See explanation for madd_s in assembler-loong64.cc. ++ VisitRRR(this, kLoong64SubS, node); ++} ++ ++void InstructionSelector::VisitFloat64Sub(Node* node) { ++ // Optimization with Msub.D(z, x, y) is intentionally removed. ++ // See explanation for madd_d in assembler-loong64.cc. ++ VisitRRR(this, kLoong64SubD, node); ++} ++ ++void InstructionSelector::VisitFloat32Mul(Node* node) { ++ VisitRRR(this, kLoong64MulS, node); ++} ++ ++void InstructionSelector::VisitFloat64Mul(Node* node) { ++ VisitRRR(this, kLoong64MulD, node); ++} ++ ++void InstructionSelector::VisitFloat32Div(Node* node) { ++ VisitRRR(this, kLoong64DivS, node); ++} ++ ++void InstructionSelector::VisitFloat64Div(Node* node) { ++ VisitRRR(this, kLoong64DivD, node); ++} ++ ++void InstructionSelector::VisitFloat64Mod(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64ModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0), ++ g.UseFixed(node->InputAt(1), f1)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::VisitFloat32Max(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Float32Max, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat64Max(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Float64Max, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat32Min(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Float32Min, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat64Min(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Float64Min, g.DefineAsRegister(node), ++ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); ++} ++ ++void InstructionSelector::VisitFloat32Abs(Node* node) { ++ VisitRR(this, kLoong64AbsS, node); ++} ++ ++void InstructionSelector::VisitFloat64Abs(Node* node) { ++ VisitRR(this, kLoong64AbsD, node); ++} ++ ++void InstructionSelector::VisitFloat32Sqrt(Node* node) { ++ VisitRR(this, kLoong64SqrtS, node); ++} ++ ++void InstructionSelector::VisitFloat64Sqrt(Node* node) { ++ VisitRR(this, kLoong64SqrtD, node); ++} ++ ++void InstructionSelector::VisitFloat32RoundDown(Node* node) { ++ VisitRR(this, kLoong64Float32RoundDown, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundDown(Node* node) { ++ VisitRR(this, kLoong64Float64RoundDown, node); ++} ++ ++void InstructionSelector::VisitFloat32RoundUp(Node* node) { ++ VisitRR(this, kLoong64Float32RoundUp, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundUp(Node* node) { ++ VisitRR(this, kLoong64Float64RoundUp, node); ++} ++ ++void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { ++ VisitRR(this, kLoong64Float32RoundTruncate, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { ++ VisitRR(this, kLoong64Float64RoundTruncate, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { ++ VisitRR(this, kLoong64Float32RoundTiesEven, node); ++} ++ ++void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { ++ VisitRR(this, kLoong64Float64RoundTiesEven, node); ++} ++ ++void InstructionSelector::VisitFloat32Neg(Node* node) { ++ VisitRR(this, kLoong64NegS, node); ++} ++ ++void InstructionSelector::VisitFloat64Neg(Node* node) { ++ VisitRR(this, kLoong64NegD, node); ++} ++ ++void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, ++ InstructionCode opcode) { ++ Loong64OperandGenerator g(this); ++ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2), ++ g.UseFixed(node->InputAt(1), f4)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, ++ InstructionCode opcode) { ++ Loong64OperandGenerator g(this); ++ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0)) ++ ->MarkAsCall(); ++} ++ ++void InstructionSelector::EmitPrepareArguments( ++ ZoneVector* arguments, const CallDescriptor* call_descriptor, ++ Node* node) { ++ Loong64OperandGenerator g(this); ++ ++ // Prepare for C function call. ++ if (call_descriptor->IsCFunctionCall()) { ++ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast( ++ call_descriptor->ParameterCount())), ++ 0, nullptr, 0, nullptr); ++ ++ // Poke any stack arguments. ++ int slot = kCArgSlotCount; ++ for (PushParameter input : (*arguments)) { ++ Emit(kLoong64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), ++ g.TempImmediate(slot << kSystemPointerSizeLog2)); ++ ++slot; ++ } ++ } else { ++ int push_count = static_cast(call_descriptor->StackParameterCount()); ++ if (push_count > 0) { ++ // Calculate needed space ++ int stack_size = 0; ++ for (PushParameter input : (*arguments)) { ++ if (input.node) { ++ stack_size += input.location.GetSizeInPointers(); ++ } ++ } ++ Emit(kLoong64StackClaim, g.NoOutput(), ++ g.TempImmediate(stack_size << kSystemPointerSizeLog2)); ++ } ++ for (size_t n = 0; n < arguments->size(); ++n) { ++ PushParameter input = (*arguments)[n]; ++ if (input.node) { ++ Emit(kLoong64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), ++ g.TempImmediate(static_cast(n << kSystemPointerSizeLog2))); ++ } ++ } ++ } ++} ++ ++void InstructionSelector::EmitPrepareResults( ++ ZoneVector* results, const CallDescriptor* call_descriptor, ++ Node* node) { ++ Loong64OperandGenerator g(this); ++ ++ int reverse_slot = 0; ++ for (PushParameter output : *results) { ++ if (!output.location.IsCallerFrameSlot()) continue; ++ // Skip any alignment holes in nodes. ++ if (output.node != nullptr) { ++ DCHECK(!call_descriptor->IsCFunctionCall()); ++ if (output.location.GetType() == MachineType::Float32()) { ++ MarkAsFloat32(output.node); ++ } else if (output.location.GetType() == MachineType::Float64()) { ++ MarkAsFloat64(output.node); ++ } ++ Emit(kLoong64Peek, g.DefineAsRegister(output.node), ++ g.UseImmediate(reverse_slot)); ++ } ++ reverse_slot += output.location.GetSizeInPointers(); ++ } ++} ++ ++bool InstructionSelector::IsTailCallAddressImmediate() { return false; } ++ ++int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } ++ ++void InstructionSelector::VisitUnalignedLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ Loong64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLoong64Ulwc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLoong64Uldc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ UNREACHABLE(); ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsUnsigned() ? kLoong64Ulhu : kLoong64Ulh; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = load_rep.IsUnsigned() ? kLoong64Ulwu : kLoong64Ulw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLoong64Uld; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), addr_reg, ++ g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired load opcode, using temp addr_reg. ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); ++ } ++} ++ ++void InstructionSelector::VisitUnalignedStore(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kFloat32: ++ opcode = kLoong64Uswc1; ++ break; ++ case MachineRepresentation::kFloat64: ++ opcode = kLoong64Usdc1; ++ break; ++ case MachineRepresentation::kBit: // Fall through. ++ case MachineRepresentation::kWord8: ++ UNREACHABLE(); ++ case MachineRepresentation::kWord16: ++ opcode = kLoong64Ush; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLoong64Usw; ++ break; ++ case MachineRepresentation::kTaggedSigned: // Fall through. ++ case MachineRepresentation::kTaggedPointer: // Fall through. ++ case MachineRepresentation::kTagged: // Fall through. ++ case MachineRepresentation::kWord64: ++ opcode = kLoong64Usd; ++ break; ++ case MachineRepresentation::kCompressedPointer: // Fall through. ++ case MachineRepresentation::kCompressed: // Fall through. ++ case MachineRepresentation::kNone: ++ case MachineRepresentation::kSimd128: ++ UNREACHABLE(); ++ } ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), addr_reg, ++ g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired store opcode, using temp addr_reg. ++ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), ++ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); ++ } ++} ++ ++namespace { ++ ++// Shared routine for multiple compare operations. ++static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, ++ InstructionOperand left, InstructionOperand right, ++ FlagsContinuation* cont) { ++ selector->EmitWithContinuation(opcode, left, right, cont); ++} ++ ++// Shared routine for multiple float32 compare operations. ++void VisitFloat32Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ Loong64OperandGenerator g(selector); ++ Float32BinopMatcher m(node); ++ InstructionOperand lhs, rhs; ++ ++ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) ++ : g.UseRegister(m.left().node()); ++ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) ++ : g.UseRegister(m.right().node()); ++ VisitCompare(selector, kLoong64CmpS, lhs, rhs, cont); ++} ++ ++// Shared routine for multiple float64 compare operations. ++void VisitFloat64Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ Loong64OperandGenerator g(selector); ++ Float64BinopMatcher m(node); ++ InstructionOperand lhs, rhs; ++ ++ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) ++ : g.UseRegister(m.left().node()); ++ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) ++ : g.UseRegister(m.right().node()); ++ VisitCompare(selector, kLoong64CmpD, lhs, rhs, cont); ++} ++ ++// Shared routine for multiple word compare operations. ++void VisitWordCompare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont, ++ bool commutative) { ++ Loong64OperandGenerator g(selector); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ ++ // Match immediates on left or right side of comparison. ++ if (g.CanBeImmediate(right, opcode)) { ++ if (opcode == kLoong64Tst) { ++ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), ++ cont); ++ } else { ++ switch (cont->condition()) { ++ case kEqual: ++ case kNotEqual: ++ if (cont->IsSet()) { ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseImmediate(right), cont); ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseRegister(right), cont); ++ } ++ break; ++ case kSignedLessThan: ++ case kSignedGreaterThanOrEqual: ++ case kUnsignedLessThan: ++ case kUnsignedGreaterThanOrEqual: ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseImmediate(right), cont); ++ break; ++ default: ++ VisitCompare(selector, opcode, g.UseRegister(left), ++ g.UseRegister(right), cont); ++ } ++ } ++ } else if (g.CanBeImmediate(left, opcode)) { ++ if (!commutative) cont->Commute(); ++ if (opcode == kLoong64Tst) { ++ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), ++ cont); ++ } else { ++ switch (cont->condition()) { ++ case kEqual: ++ case kNotEqual: ++ if (cont->IsSet()) { ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseImmediate(left), cont); ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseRegister(left), cont); ++ } ++ break; ++ case kSignedLessThan: ++ case kSignedGreaterThanOrEqual: ++ case kUnsignedLessThan: ++ case kUnsignedGreaterThanOrEqual: ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseImmediate(left), cont); ++ break; ++ default: ++ VisitCompare(selector, opcode, g.UseRegister(right), ++ g.UseRegister(left), cont); ++ } ++ } ++ } else { ++ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), ++ cont); ++ } ++} ++ ++bool IsNodeUnsigned(Node* n) { ++ NodeMatcher m(n); ++ ++ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() || ++ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { ++ LoadRepresentation load_rep = LoadRepresentationOf(n->op()); ++ return load_rep.IsUnsigned(); ++ } else { ++ return m.IsUint32Div() || m.IsUint32LessThan() || ++ m.IsUint32LessThanOrEqual() || m.IsUint32Mod() || ++ m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() || ++ m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32(); ++ } ++} ++ ++// Shared routine for multiple word compare operations. ++void VisitFullWord32Compare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, FlagsContinuation* cont) { ++ Loong64OperandGenerator g(selector); ++ InstructionOperand leftOp = g.TempRegister(); ++ InstructionOperand rightOp = g.TempRegister(); ++ ++ selector->Emit(kLoong64Dshl, leftOp, g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(32)); ++ selector->Emit(kLoong64Dshl, rightOp, g.UseRegister(node->InputAt(1)), ++ g.TempImmediate(32)); ++ ++ VisitCompare(selector, opcode, leftOp, rightOp, cont); ++} ++ ++void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node, ++ InstructionCode opcode, ++ FlagsContinuation* cont) { ++ if (FLAG_debug_code) { ++ Loong64OperandGenerator g(selector); ++ InstructionOperand leftOp = g.TempRegister(); ++ InstructionOperand rightOp = g.TempRegister(); ++ InstructionOperand optimizedResult = g.TempRegister(); ++ InstructionOperand fullResult = g.TempRegister(); ++ FlagsCondition condition = cont->condition(); ++ InstructionCode testOpcode = opcode | ++ FlagsConditionField::encode(condition) | ++ FlagsModeField::encode(kFlags_set); ++ ++ selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)), ++ g.UseRegister(node->InputAt(1))); ++ ++ selector->Emit(kLoong64Dshl, leftOp, g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(32)); ++ selector->Emit(kLoong64Dshl, rightOp, g.UseRegister(node->InputAt(1)), ++ g.TempImmediate(32)); ++ selector->Emit(testOpcode, fullResult, leftOp, rightOp); ++ ++ selector->Emit(kLoong64AssertEqual, g.NoOutput(), optimizedResult, fullResult, ++ g.TempImmediate(static_cast( ++ AbortReason::kUnsupportedNonPrimitiveCompare))); ++ } ++ ++ VisitWordCompare(selector, node, opcode, cont, false); ++} ++ ++void VisitWord32Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ // LOONG64 doesn't support Word32 compare instructions. Instead it relies ++ // that the values in registers are correctly sign-extended and uses ++ // Word64 comparison instead. This behavior is correct in most cases, ++ // but doesn't work when comparing signed with unsigned operands. ++ // We could simulate full Word32 compare in all cases but this would ++ // create an unnecessary overhead since unsigned integers are rarely ++ // used in JavaScript. ++ // The solution proposed here tries to match a comparison of signed ++ // with unsigned operand, and perform full Word32Compare only ++ // in those cases. Unfortunately, the solution is not complete because ++ // it might skip cases where Word32 full compare is needed, so ++ // basically it is a hack. ++ // When call to a host function in simulator, if the function return a ++ // int32 value, the simulator do not sign-extended to int64 because in ++ // simulator we do not know the function whether return a int32 or int64. ++ // so we need do a full word32 compare in this case. ++#ifndef USE_SIMULATOR ++ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) { ++#else ++ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) || ++ node->InputAt(0)->opcode() == IrOpcode::kCall || ++ node->InputAt(1)->opcode() == IrOpcode::kCall) { ++#endif ++ VisitFullWord32Compare(selector, node, kLoong64Cmp, cont); ++ } else { ++ VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont); ++ } ++} ++ ++void VisitWord64Compare(InstructionSelector* selector, Node* node, ++ FlagsContinuation* cont) { ++ VisitWordCompare(selector, node, kLoong64Cmp, cont, false); ++} ++ ++void EmitWordCompareZero(InstructionSelector* selector, Node* value, ++ FlagsContinuation* cont) { ++ Loong64OperandGenerator g(selector); ++ selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value), ++ g.TempImmediate(0), cont); ++} ++ ++void VisitAtomicLoad(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Loong64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), g.UseRegister(base), ++ g.UseImmediate(index)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ selector->Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), ++ addr_reg, g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired load opcode, using temp addr_reg. ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); ++ } ++} ++ ++void VisitAtomicStore(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Loong64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ if (g.CanBeImmediate(index, opcode)) { ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index), ++ g.UseRegisterOrImmediateZero(value)); ++ } else { ++ InstructionOperand addr_reg = g.TempRegister(); ++ selector->Emit(kLoong64Dadd | AddressingModeField::encode(kMode_None), ++ addr_reg, g.UseRegister(index), g.UseRegister(base)); ++ // Emit desired store opcode, using temp addr_reg. ++ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI), ++ g.NoOutput(), addr_reg, g.TempImmediate(0), ++ g.UseRegisterOrImmediateZero(value)); ++ } ++} ++ ++void VisitAtomicExchange(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Loong64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temp[3]; ++ temp[0] = g.TempRegister(); ++ temp[1] = g.TempRegister(); ++ temp[2] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); ++} ++ ++void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Loong64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* old_value = node->InputAt(2); ++ Node* new_value = node->InputAt(3); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[4]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(old_value); ++ inputs[input_count++] = g.UseUniqueRegister(new_value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temp[3]; ++ temp[0] = g.TempRegister(); ++ temp[1] = g.TempRegister(); ++ temp[2] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp); ++} ++ ++void VisitAtomicBinop(InstructionSelector* selector, Node* node, ++ ArchOpcode opcode) { ++ Loong64OperandGenerator g(selector); ++ Node* base = node->InputAt(0); ++ Node* index = node->InputAt(1); ++ Node* value = node->InputAt(2); ++ ++ AddressingMode addressing_mode = kMode_MRI; ++ InstructionOperand inputs[3]; ++ size_t input_count = 0; ++ inputs[input_count++] = g.UseUniqueRegister(base); ++ inputs[input_count++] = g.UseUniqueRegister(index); ++ inputs[input_count++] = g.UseUniqueRegister(value); ++ InstructionOperand outputs[1]; ++ outputs[0] = g.UseUniqueRegister(node); ++ InstructionOperand temps[4]; ++ temps[0] = g.TempRegister(); ++ temps[1] = g.TempRegister(); ++ temps[2] = g.TempRegister(); ++ temps[3] = g.TempRegister(); ++ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); ++ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps); ++} ++ ++} // namespace ++ ++void InstructionSelector::VisitStackPointerGreaterThan( ++ Node* node, FlagsContinuation* cont) { ++ StackCheckKind kind = StackCheckKindOf(node->op()); ++ InstructionCode opcode = ++ kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); ++ ++ Loong64OperandGenerator g(this); ++ ++ // No outputs. ++ InstructionOperand* const outputs = nullptr; ++ const int output_count = 0; ++ ++ // Applying an offset to this stack check requires a temp register. Offsets ++ // are only applied to the first stack check. If applying an offset, we must ++ // ensure the input and temp registers do not alias, thus kUniqueRegister. ++ InstructionOperand temps[] = {g.TempRegister()}; ++ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0); ++ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) ++ ? OperandGenerator::kUniqueRegister ++ : OperandGenerator::kRegister; ++ ++ Node* const value = node->InputAt(0); ++ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; ++ static constexpr int input_count = arraysize(inputs); ++ ++ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, ++ temp_count, temps, cont); ++} ++ ++// Shared routine for word comparisons against zero. ++void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, ++ FlagsContinuation* cont) { ++ // Try to combine with comparisons against 0 by simply inverting the branch. ++ while (CanCover(user, value)) { ++ if (value->opcode() == IrOpcode::kWord32Equal) { ++ Int32BinopMatcher m(value); ++ if (!m.right().Is(0)) break; ++ user = value; ++ value = m.left().node(); ++ } else if (value->opcode() == IrOpcode::kWord64Equal) { ++ Int64BinopMatcher m(value); ++ if (!m.right().Is(0)) break; ++ user = value; ++ value = m.left().node(); ++ } else { ++ break; ++ } ++ ++ cont->Negate(); ++ } ++ ++ if (CanCover(user, value)) { ++ switch (value->opcode()) { ++ case IrOpcode::kWord32Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kInt32LessThan: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThan); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kInt32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kUint32LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kUint32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitWord32Compare(this, value, cont); ++ case IrOpcode::kWord64Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kInt64LessThan: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThan); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kInt64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kUint64LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kUint64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitWord64Compare(this, value, cont); ++ case IrOpcode::kFloat32Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat32LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat32LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitFloat32Compare(this, value, cont); ++ case IrOpcode::kFloat64Equal: ++ cont->OverwriteAndNegateIfEqual(kEqual); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kFloat64LessThan: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kFloat64LessThanOrEqual: ++ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); ++ return VisitFloat64Compare(this, value, cont); ++ case IrOpcode::kProjection: ++ // Check if this is the overflow output projection of an ++ // WithOverflow node. ++ if (ProjectionIndexOf(value->op()) == 1u) { ++ // We cannot combine the WithOverflow with this branch ++ // unless the 0th projection (the use of the actual value of the ++ // is either nullptr, which means there's no use of the ++ // actual value, or was already defined, which means it is scheduled ++ // *AFTER* this branch). ++ Node* const node = value->InputAt(0); ++ Node* const result = NodeProperties::FindProjection(node, 0); ++ if (result == nullptr || IsDefined(result)) { ++ switch (node->opcode()) { ++ case IrOpcode::kInt32AddWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLoong64Dadd, cont); ++ case IrOpcode::kInt32SubWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLoong64Dsub, cont); ++ case IrOpcode::kInt32MulWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLoong64MulOvf, cont); ++ case IrOpcode::kInt64AddWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLoong64DaddOvf, cont); ++ case IrOpcode::kInt64SubWithOverflow: ++ cont->OverwriteAndNegateIfEqual(kOverflow); ++ return VisitBinop(this, node, kLoong64DsubOvf, cont); ++ default: ++ break; ++ } ++ } ++ } ++ break; ++ case IrOpcode::kWord32And: ++ case IrOpcode::kWord64And: ++ return VisitWordCompare(this, value, kLoong64Tst, cont, true); ++ case IrOpcode::kStackPointerGreaterThan: ++ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); ++ return VisitStackPointerGreaterThan(value, cont); ++ default: ++ break; ++ } ++ } ++ ++ // Continuation could not be combined with a compare, emit compare against 0. ++ EmitWordCompareZero(this, value, cont); ++} ++ ++void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ++ Loong64OperandGenerator g(this); ++ InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); ++ ++ // Emit either ArchTableSwitch or ArchBinarySearchSwitch. ++ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { ++ static const size_t kMaxTableSwitchValueRange = 2 << 16; ++ size_t table_space_cost = 10 + 2 * sw.value_range(); ++ size_t table_time_cost = 3; ++ size_t lookup_space_cost = 2 + 2 * sw.case_count(); ++ size_t lookup_time_cost = sw.case_count(); ++ if (sw.case_count() > 0 && ++ table_space_cost + 3 * table_time_cost <= ++ lookup_space_cost + 3 * lookup_time_cost && ++ sw.min_value() > std::numeric_limits::min() && ++ sw.value_range() <= kMaxTableSwitchValueRange) { ++ InstructionOperand index_operand = value_operand; ++ if (sw.min_value()) { ++ index_operand = g.TempRegister(); ++ Emit(kLoong64Sub, index_operand, value_operand, ++ g.TempImmediate(sw.min_value())); ++ } ++ // Generate a table lookup. ++ return EmitTableSwitch(sw, index_operand); ++ } ++ } ++ ++ // Generate a tree of conditional jumps. ++ return EmitBinarySearchSwitch(sw, value_operand); ++} ++ ++void InstructionSelector::VisitWord32Equal(Node* const node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ Int32BinopMatcher m(node); ++ if (m.right().Is(0)) { ++ return VisitWordCompareZero(m.node(), m.left().node(), &cont); ++ } ++ ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitWord32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLoong64Dadd, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLoong64Dadd, &cont); ++} ++ ++void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLoong64Dsub, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLoong64Dsub, &cont); ++} ++ ++void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLoong64MulOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLoong64MulOvf, &cont); ++} ++ ++void InstructionSelector::VisitInt64AddWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLoong64DaddOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLoong64DaddOvf, &cont); ++} ++ ++void InstructionSelector::VisitInt64SubWithOverflow(Node* node) { ++ if (Node* ovf = NodeProperties::FindProjection(node, 1)) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); ++ return VisitBinop(this, node, kLoong64DsubOvf, &cont); ++ } ++ FlagsContinuation cont; ++ VisitBinop(this, node, kLoong64DsubOvf, &cont); ++} ++ ++void InstructionSelector::VisitWord64Equal(Node* const node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ Int64BinopMatcher m(node); ++ if (m.right().Is(0)) { ++ return VisitWordCompareZero(m.node(), m.left().node(), &cont); ++ } ++ ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitWord64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat32Equal(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat32LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitFloat32Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64Equal(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64LessThan(Node* node) { ++ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { ++ FlagsContinuation cont = ++ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); ++ VisitFloat64Compare(this, node, &cont); ++} ++ ++void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { ++ VisitRR(this, kLoong64Float64ExtractLowWord32, node); ++} ++ ++void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { ++ VisitRR(this, kLoong64Float64ExtractHighWord32, node); ++} ++ ++void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { ++ VisitRR(this, kLoong64Float64SilenceNaN, node); ++} ++ ++void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ Emit(kLoong64Float64InsertLowWord32, g.DefineSameAsFirst(node), ++ g.UseRegister(left), g.UseRegister(right)); ++} ++ ++void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Node* left = node->InputAt(0); ++ Node* right = node->InputAt(1); ++ Emit(kLoong64Float64InsertHighWord32, g.DefineSameAsFirst(node), ++ g.UseRegister(left), g.UseRegister(right)); ++} ++ ++void InstructionSelector::VisitMemoryBarrier(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Sync, g.NoOutput()); ++} ++ ++void InstructionSelector::VisitWord32AtomicLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ opcode = ++ load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16 ++ : kWord32AtomicLoadUint16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kWord32AtomicLoadWord32; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ VisitAtomicLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicStore(Node* node) { ++ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kWord8: ++ opcode = kWord32AtomicStoreWord8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kWord32AtomicStoreWord16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kWord32AtomicStoreWord32; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++ VisitAtomicStore(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicLoad(Node* node) { ++ LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (load_rep.representation()) { ++ case MachineRepresentation::kWord8: ++ opcode = kLoong64Word64AtomicLoadUint8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kLoong64Word64AtomicLoadUint16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLoong64Word64AtomicLoadUint32; ++ break; ++ case MachineRepresentation::kWord64: ++ opcode = kLoong64Word64AtomicLoadUint64; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ VisitAtomicLoad(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicStore(Node* node) { ++ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); ++ ArchOpcode opcode = kArchNop; ++ switch (rep) { ++ case MachineRepresentation::kWord8: ++ opcode = kLoong64Word64AtomicStoreWord8; ++ break; ++ case MachineRepresentation::kWord16: ++ opcode = kLoong64Word64AtomicStoreWord16; ++ break; ++ case MachineRepresentation::kWord32: ++ opcode = kLoong64Word64AtomicStoreWord32; ++ break; ++ case MachineRepresentation::kWord64: ++ opcode = kLoong64Word64AtomicStoreWord64; ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ ++ VisitAtomicStore(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = kWord32AtomicExchangeInt8; ++ } else if (type == MachineType::Uint8()) { ++ opcode = kWord32AtomicExchangeUint8; ++ } else if (type == MachineType::Int16()) { ++ opcode = kWord32AtomicExchangeInt16; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kWord32AtomicExchangeUint16; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = kWord32AtomicExchangeWord32; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = kLoong64Word64AtomicExchangeUint8; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kLoong64Word64AtomicExchangeUint16; ++ } else if (type == MachineType::Uint32()) { ++ opcode = kLoong64Word64AtomicExchangeUint32; ++ } else if (type == MachineType::Uint64()) { ++ opcode = kLoong64Word64AtomicExchangeUint64; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = kWord32AtomicCompareExchangeInt8; ++ } else if (type == MachineType::Uint8()) { ++ opcode = kWord32AtomicCompareExchangeUint8; ++ } else if (type == MachineType::Int16()) { ++ opcode = kWord32AtomicCompareExchangeInt16; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kWord32AtomicCompareExchangeUint16; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = kWord32AtomicCompareExchangeWord32; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicCompareExchange(this, node, opcode); ++} ++ ++void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = kLoong64Word64AtomicCompareExchangeUint8; ++ } else if (type == MachineType::Uint16()) { ++ opcode = kLoong64Word64AtomicCompareExchangeUint16; ++ } else if (type == MachineType::Uint32()) { ++ opcode = kLoong64Word64AtomicCompareExchangeUint32; ++ } else if (type == MachineType::Uint64()) { ++ opcode = kLoong64Word64AtomicCompareExchangeUint64; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicCompareExchange(this, node, opcode); ++} ++void InstructionSelector::VisitWord32AtomicBinaryOperation( ++ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ++ ArchOpcode uint16_op, ArchOpcode word32_op) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Int8()) { ++ opcode = int8_op; ++ } else if (type == MachineType::Uint8()) { ++ opcode = uint8_op; ++ } else if (type == MachineType::Int16()) { ++ opcode = int16_op; ++ } else if (type == MachineType::Uint16()) { ++ opcode = uint16_op; ++ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { ++ opcode = word32_op; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ ++ VisitAtomicBinop(this, node, opcode); ++} ++ ++#define VISIT_ATOMIC_BINOP(op) \ ++ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ ++ VisitWord32AtomicBinaryOperation( \ ++ node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ ++ kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ ++ kWord32Atomic##op##Word32); \ ++ } ++VISIT_ATOMIC_BINOP(Add) ++VISIT_ATOMIC_BINOP(Sub) ++VISIT_ATOMIC_BINOP(And) ++VISIT_ATOMIC_BINOP(Or) ++VISIT_ATOMIC_BINOP(Xor) ++#undef VISIT_ATOMIC_BINOP ++ ++void InstructionSelector::VisitWord64AtomicBinaryOperation( ++ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, ++ ArchOpcode uint64_op) { ++ ArchOpcode opcode = kArchNop; ++ MachineType type = AtomicOpType(node->op()); ++ if (type == MachineType::Uint8()) { ++ opcode = uint8_op; ++ } else if (type == MachineType::Uint16()) { ++ opcode = uint16_op; ++ } else if (type == MachineType::Uint32()) { ++ opcode = uint32_op; ++ } else if (type == MachineType::Uint64()) { ++ opcode = uint64_op; ++ } else { ++ UNREACHABLE(); ++ return; ++ } ++ VisitAtomicBinop(this, node, opcode); ++} ++ ++#define VISIT_ATOMIC_BINOP(op) \ ++ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ ++ VisitWord64AtomicBinaryOperation( \ ++ node, kLoong64Word64Atomic##op##Uint8, kLoong64Word64Atomic##op##Uint16, \ ++ kLoong64Word64Atomic##op##Uint32, kLoong64Word64Atomic##op##Uint64); \ ++ } ++VISIT_ATOMIC_BINOP(Add) ++VISIT_ATOMIC_BINOP(Sub) ++VISIT_ATOMIC_BINOP(And) ++VISIT_ATOMIC_BINOP(Or) ++VISIT_ATOMIC_BINOP(Xor) ++#undef VISIT_ATOMIC_BINOP ++ ++void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { ++ UNREACHABLE(); ++} ++ ++void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { ++ UNREACHABLE(); ++} ++ ++#define SIMD_TYPE_LIST(V) \ ++ V(F64x2) \ ++ V(F32x4) \ ++ V(I64x2) \ ++ V(I32x4) \ ++ V(I16x8) \ ++ V(I8x16) ++ ++#define SIMD_UNOP_LIST(V) \ ++ V(F64x2Abs, kLoong64F64x2Abs) \ ++ V(F64x2Neg, kLoong64F64x2Neg) \ ++ V(F64x2Sqrt, kLoong64F64x2Sqrt) \ ++ V(I64x2Neg, kLoong64I64x2Neg) \ ++ V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4) \ ++ V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4) \ ++ V(F32x4Abs, kLoong64F32x4Abs) \ ++ V(F32x4Neg, kLoong64F32x4Neg) \ ++ V(F32x4Sqrt, kLoong64F32x4Sqrt) \ ++ V(F32x4RecipApprox, kLoong64F32x4RecipApprox) \ ++ V(F32x4RecipSqrtApprox, kLoong64F32x4RecipSqrtApprox) \ ++ V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4) \ ++ V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4) \ ++ V(I32x4Neg, kLoong64I32x4Neg) \ ++ V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low) \ ++ V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High) \ ++ V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low) \ ++ V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High) \ ++ V(I32x4Abs, kLoong64I32x4Abs) \ ++ V(I16x8Neg, kLoong64I16x8Neg) \ ++ V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low) \ ++ V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High) \ ++ V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low) \ ++ V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High) \ ++ V(I16x8Abs, kLoong64I16x8Abs) \ ++ V(I8x16Neg, kLoong64I8x16Neg) \ ++ V(I8x16Abs, kLoong64I8x16Abs) \ ++ V(S128Not, kLoong64S128Not) \ ++ V(S1x4AnyTrue, kLoong64S1x4AnyTrue) \ ++ V(S1x4AllTrue, kLoong64S1x4AllTrue) \ ++ V(S1x8AnyTrue, kLoong64S1x8AnyTrue) \ ++ V(S1x8AllTrue, kLoong64S1x8AllTrue) \ ++ V(S1x16AnyTrue, kLoong64S1x16AnyTrue) \ ++ V(S1x16AllTrue, kLoong64S1x16AllTrue) ++ ++#define SIMD_SHIFT_OP_LIST(V) \ ++ V(I64x2Shl) \ ++ V(I64x2ShrS) \ ++ V(I64x2ShrU) \ ++ V(I32x4Shl) \ ++ V(I32x4ShrS) \ ++ V(I32x4ShrU) \ ++ V(I16x8Shl) \ ++ V(I16x8ShrS) \ ++ V(I16x8ShrU) \ ++ V(I8x16Shl) \ ++ V(I8x16ShrS) \ ++ V(I8x16ShrU) ++ ++#define SIMD_BINOP_LIST(V) \ ++ V(F64x2Add, kLoong64F64x2Add) \ ++ V(F64x2Sub, kLoong64F64x2Sub) \ ++ V(F64x2Mul, kLoong64F64x2Mul) \ ++ V(F64x2Div, kLoong64F64x2Div) \ ++ V(F64x2Min, kLoong64F64x2Min) \ ++ V(F64x2Max, kLoong64F64x2Max) \ ++ V(F64x2Eq, kLoong64F64x2Eq) \ ++ V(F64x2Ne, kLoong64F64x2Ne) \ ++ V(F64x2Lt, kLoong64F64x2Lt) \ ++ V(F64x2Le, kLoong64F64x2Le) \ ++ V(I64x2Add, kLoong64I64x2Add) \ ++ V(I64x2Sub, kLoong64I64x2Sub) \ ++ V(I64x2Mul, kLoong64I64x2Mul) \ ++ V(F32x4Add, kLoong64F32x4Add) \ ++ V(F32x4AddHoriz, kLoong64F32x4AddHoriz) \ ++ V(F32x4Sub, kLoong64F32x4Sub) \ ++ V(F32x4Mul, kLoong64F32x4Mul) \ ++ V(F32x4Div, kLoong64F32x4Div) \ ++ V(F32x4Max, kLoong64F32x4Max) \ ++ V(F32x4Min, kLoong64F32x4Min) \ ++ V(F32x4Eq, kLoong64F32x4Eq) \ ++ V(F32x4Ne, kLoong64F32x4Ne) \ ++ V(F32x4Lt, kLoong64F32x4Lt) \ ++ V(F32x4Le, kLoong64F32x4Le) \ ++ V(I32x4Add, kLoong64I32x4Add) \ ++ V(I32x4AddHoriz, kLoong64I32x4AddHoriz) \ ++ V(I32x4Sub, kLoong64I32x4Sub) \ ++ V(I32x4Mul, kLoong64I32x4Mul) \ ++ V(I32x4MaxS, kLoong64I32x4MaxS) \ ++ V(I32x4MinS, kLoong64I32x4MinS) \ ++ V(I32x4MaxU, kLoong64I32x4MaxU) \ ++ V(I32x4MinU, kLoong64I32x4MinU) \ ++ V(I32x4Eq, kLoong64I32x4Eq) \ ++ V(I32x4Ne, kLoong64I32x4Ne) \ ++ V(I32x4GtS, kLoong64I32x4GtS) \ ++ V(I32x4GeS, kLoong64I32x4GeS) \ ++ V(I32x4GtU, kLoong64I32x4GtU) \ ++ V(I32x4GeU, kLoong64I32x4GeU) \ ++ V(I16x8Add, kLoong64I16x8Add) \ ++ V(I16x8AddSaturateS, kLoong64I16x8AddSaturateS) \ ++ V(I16x8AddSaturateU, kLoong64I16x8AddSaturateU) \ ++ V(I16x8AddHoriz, kLoong64I16x8AddHoriz) \ ++ V(I16x8Sub, kLoong64I16x8Sub) \ ++ V(I16x8SubSaturateS, kLoong64I16x8SubSaturateS) \ ++ V(I16x8SubSaturateU, kLoong64I16x8SubSaturateU) \ ++ V(I16x8Mul, kLoong64I16x8Mul) \ ++ V(I16x8MaxS, kLoong64I16x8MaxS) \ ++ V(I16x8MinS, kLoong64I16x8MinS) \ ++ V(I16x8MaxU, kLoong64I16x8MaxU) \ ++ V(I16x8MinU, kLoong64I16x8MinU) \ ++ V(I16x8Eq, kLoong64I16x8Eq) \ ++ V(I16x8Ne, kLoong64I16x8Ne) \ ++ V(I16x8GtS, kLoong64I16x8GtS) \ ++ V(I16x8GeS, kLoong64I16x8GeS) \ ++ V(I16x8GtU, kLoong64I16x8GtU) \ ++ V(I16x8GeU, kLoong64I16x8GeU) \ ++ V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \ ++ V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4) \ ++ V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4) \ ++ V(I8x16Add, kLoong64I8x16Add) \ ++ V(I8x16AddSaturateS, kLoong64I8x16AddSaturateS) \ ++ V(I8x16AddSaturateU, kLoong64I8x16AddSaturateU) \ ++ V(I8x16Sub, kLoong64I8x16Sub) \ ++ V(I8x16SubSaturateS, kLoong64I8x16SubSaturateS) \ ++ V(I8x16SubSaturateU, kLoong64I8x16SubSaturateU) \ ++ V(I8x16Mul, kLoong64I8x16Mul) \ ++ V(I8x16MaxS, kLoong64I8x16MaxS) \ ++ V(I8x16MinS, kLoong64I8x16MinS) \ ++ V(I8x16MaxU, kLoong64I8x16MaxU) \ ++ V(I8x16MinU, kLoong64I8x16MinU) \ ++ V(I8x16Eq, kLoong64I8x16Eq) \ ++ V(I8x16Ne, kLoong64I8x16Ne) \ ++ V(I8x16GtS, kLoong64I8x16GtS) \ ++ V(I8x16GeS, kLoong64I8x16GeS) \ ++ V(I8x16GtU, kLoong64I8x16GtU) \ ++ V(I8x16GeU, kLoong64I8x16GeU) \ ++ V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \ ++ V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8) \ ++ V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8) \ ++ V(S128And, kLoong64S128And) \ ++ V(S128Or, kLoong64S128Or) \ ++ V(S128Xor, kLoong64S128Xor) \ ++ V(S128AndNot, kLoong64S128AndNot) ++ ++void InstructionSelector::VisitS128Zero(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64S128Zero, g.DefineAsRegister(node)); ++} ++ ++#define SIMD_VISIT_SPLAT(Type) \ ++ void InstructionSelector::Visit##Type##Splat(Node* node) { \ ++ VisitRR(this, kLoong64##Type##Splat, node); \ ++ } ++SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) ++#undef SIMD_VISIT_SPLAT ++ ++#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \ ++ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \ ++ VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node); \ ++ } ++SIMD_VISIT_EXTRACT_LANE(F64x2, ) ++SIMD_VISIT_EXTRACT_LANE(F32x4, ) ++SIMD_VISIT_EXTRACT_LANE(I64x2, ) ++SIMD_VISIT_EXTRACT_LANE(I32x4, ) ++SIMD_VISIT_EXTRACT_LANE(I16x8, U) ++SIMD_VISIT_EXTRACT_LANE(I16x8, S) ++SIMD_VISIT_EXTRACT_LANE(I8x16, U) ++SIMD_VISIT_EXTRACT_LANE(I8x16, S) ++#undef SIMD_VISIT_EXTRACT_LANE ++ ++#define SIMD_VISIT_REPLACE_LANE(Type) \ ++ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ ++ VisitRRIR(this, kLoong64##Type##ReplaceLane, node); \ ++ } ++SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) ++#undef SIMD_VISIT_REPLACE_LANE ++ ++#define SIMD_VISIT_UNOP(Name, instruction) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitRR(this, instruction, node); \ ++ } ++SIMD_UNOP_LIST(SIMD_VISIT_UNOP) ++#undef SIMD_VISIT_UNOP ++ ++#define SIMD_VISIT_SHIFT_OP(Name) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitSimdShift(this, kLoong64##Name, node); \ ++ } ++SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) ++#undef SIMD_VISIT_SHIFT_OP ++ ++#define SIMD_VISIT_BINOP(Name, instruction) \ ++ void InstructionSelector::Visit##Name(Node* node) { \ ++ VisitRRR(this, instruction, node); \ ++ } ++SIMD_BINOP_LIST(SIMD_VISIT_BINOP) ++#undef SIMD_VISIT_BINOP ++ ++void InstructionSelector::VisitS128Select(Node* node) { ++ VisitRRRR(this, kLoong64S128Select, node); ++} ++ ++namespace { ++ ++struct ShuffleEntry { ++ uint8_t shuffle[kSimd128Size]; ++ ArchOpcode opcode; ++}; ++ ++static const ShuffleEntry arch_shuffles[] = { ++ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, ++ kLoong64S32x4InterleaveRight}, ++ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, ++ kLoong64S32x4InterleaveLeft}, ++ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}, ++ kLoong64S32x4PackEven}, ++ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}, ++ kLoong64S32x4PackOdd}, ++ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}, ++ kLoong64S32x4InterleaveEven}, ++ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}, ++ kLoong64S32x4InterleaveOdd}, ++ ++ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, ++ kLoong64S16x8InterleaveRight}, ++ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, ++ kLoong64S16x8InterleaveLeft}, ++ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, ++ kLoong64S16x8PackEven}, ++ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, ++ kLoong64S16x8PackOdd}, ++ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}, ++ kLoong64S16x8InterleaveEven}, ++ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}, ++ kLoong64S16x8InterleaveOdd}, ++ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kLoong64S16x4Reverse}, ++ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kLoong64S16x2Reverse}, ++ ++ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, ++ kLoong64S8x16InterleaveRight}, ++ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, ++ kLoong64S8x16InterleaveLeft}, ++ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, ++ kLoong64S8x16PackEven}, ++ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, ++ kLoong64S8x16PackOdd}, ++ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, ++ kLoong64S8x16InterleaveEven}, ++ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, ++ kLoong64S8x16InterleaveOdd}, ++ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kLoong64S8x8Reverse}, ++ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kLoong64S8x4Reverse}, ++ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kLoong64S8x2Reverse}}; ++ ++bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, ++ size_t num_entries, bool is_swizzle, ++ ArchOpcode* opcode) { ++ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; ++ for (size_t i = 0; i < num_entries; ++i) { ++ const ShuffleEntry& entry = table[i]; ++ int j = 0; ++ for (; j < kSimd128Size; ++j) { ++ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { ++ break; ++ } ++ } ++ if (j == kSimd128Size) { ++ *opcode = entry.opcode; ++ return true; ++ } ++ } ++ return false; ++} ++ ++} // namespace ++ ++void InstructionSelector::VisitS8x16Shuffle(Node* node) { ++ uint8_t shuffle[kSimd128Size]; ++ bool is_swizzle; ++ CanonicalizeShuffle(node, shuffle, &is_swizzle); ++ uint8_t shuffle32x4[4]; ++ ArchOpcode opcode; ++ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles), ++ is_swizzle, &opcode)) { ++ VisitRRR(this, opcode, node); ++ return; ++ } ++ Node* input0 = node->InputAt(0); ++ Node* input1 = node->InputAt(1); ++ uint8_t offset; ++ Loong64OperandGenerator g(this); ++ if (TryMatchConcat(shuffle, &offset)) { ++ Emit(kLoong64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1), ++ g.UseRegister(input0), g.UseImmediate(offset)); ++ return; ++ } ++ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) { ++ Emit(kLoong64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), ++ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4))); ++ return; ++ } ++ Emit(kLoong64S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), ++ g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)), ++ g.UseImmediate(Pack4Lanes(shuffle + 4)), ++ g.UseImmediate(Pack4Lanes(shuffle + 8)), ++ g.UseImmediate(Pack4Lanes(shuffle + 12))); ++} ++ ++void InstructionSelector::VisitS8x16Swizzle(Node* node) { ++ Loong64OperandGenerator g(this); ++ InstructionOperand temps[] = {g.TempSimd128Register()}; ++ // We don't want input 0 or input 1 to be the same as output, since we will ++ // modify output before do the calculation. ++ Emit(kLoong64S8x16Swizzle, g.DefineAsRegister(node), ++ g.UseUniqueRegister(node->InputAt(0)), ++ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); ++} ++ ++void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); ++} ++ ++void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) { ++ Loong64OperandGenerator g(this); ++ Emit(kLoong64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), ++ g.TempImmediate(0)); ++} ++ ++// static ++MachineOperatorBuilder::Flags ++InstructionSelector::SupportedMachineOperatorFlags() { ++ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags; ++ return flags | MachineOperatorBuilder::kWord32Ctz | ++ MachineOperatorBuilder::kWord64Ctz | ++ MachineOperatorBuilder::kWord32Popcnt | ++ MachineOperatorBuilder::kWord64Popcnt | ++ MachineOperatorBuilder::kWord32ShiftIsSafe | ++ MachineOperatorBuilder::kInt32DivIsSafe | ++ MachineOperatorBuilder::kUint32DivIsSafe | ++ MachineOperatorBuilder::kFloat64RoundDown | ++ MachineOperatorBuilder::kFloat32RoundDown | ++ MachineOperatorBuilder::kFloat64RoundUp | ++ MachineOperatorBuilder::kFloat32RoundUp | ++ MachineOperatorBuilder::kFloat64RoundTruncate | ++ MachineOperatorBuilder::kFloat32RoundTruncate | ++ MachineOperatorBuilder::kFloat64RoundTiesEven | ++ MachineOperatorBuilder::kFloat32RoundTiesEven; ++} ++ ++// static ++MachineOperatorBuilder::AlignmentRequirements ++InstructionSelector::AlignmentRequirements() { ++ return MachineOperatorBuilder::AlignmentRequirements:: ++ FullUnalignedAccessSupport(); ++} ++ ++#undef SIMD_BINOP_LIST ++#undef SIMD_SHIFT_OP_LIST ++#undef SIMD_UNOP_LIST ++#undef SIMD_TYPE_LIST ++#undef TRACE_UNIMPL ++#undef TRACE ++ ++} // namespace compiler ++} // namespace internal ++} // namespace v8 +diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc +index 4967f2bb..a1110b73 100644 +--- a/deps/v8/src/compiler/c-linkage.cc ++++ b/deps/v8/src/compiler/c-linkage.cc +@@ -94,9 +94,22 @@ namespace { + #define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7 + #define CALLEE_SAVE_REGISTERS \ + s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \ +- s7.bit() +-#define CALLEE_SAVE_FP_REGISTERS \ +- f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit() ++ s7.bit() | fp.bit() ++#define CALLEE_SAVE_FP_REGISTERS \ ++ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \ ++ f30.bit() | f31.bit() ++ ++#elif V8_TARGET_ARCH_LOONG64 ++// =========================================================================== ++// == loong64 ================================================================= ++// =========================================================================== ++#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7 ++#define CALLEE_SAVE_REGISTERS \ ++ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \ ++ s7.bit() | fp.bit() ++#define CALLEE_SAVE_FP_REGISTERS \ ++ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \ ++ f30.bit() | f31.bit() + + #elif V8_TARGET_ARCH_PPC64 + // =========================================================================== +diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc +index ccf45202..abbc3493 100644 +--- a/deps/v8/src/debug/debug-evaluate.cc ++++ b/deps/v8/src/debug/debug-evaluate.cc +@@ -1062,7 +1062,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) { + } + CHECK(!failed); + #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ +- defined(V8_TARGET_ARCH_MIPS64) ++ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64) + // Isolate-independent builtin calls and jumps do not emit reloc infos + // on PPC. We try to avoid using PC relative code due to performance + // issue with especially older hardwares. +diff --git a/deps/v8/src/debug/loong64/debug-loong64.cc b/deps/v8/src/debug/loong64/debug-loong64.cc +new file mode 100644 +index 00000000..cf350101 +--- /dev/null ++++ b/deps/v8/src/debug/loong64/debug-loong64.cc +@@ -0,0 +1,56 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/debug/debug.h" ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/debug/liveedit.h" ++#include "src/execution/frames-inl.h" ++ ++namespace v8 { ++namespace internal { ++ ++#define __ ACCESS_MASM(masm) ++ ++void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) { ++ { ++ FrameScope scope(masm, StackFrame::INTERNAL); ++ __ CallRuntime(Runtime::kHandleDebuggerStatement, 0); ++ } ++ __ MaybeDropFrames(); ++ ++ // Return to caller. ++ __ Ret(); ++} ++ ++void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) { ++ // Frame is being dropped: ++ // - Drop to the target frame specified by a1. ++ // - Look up current function on the frame. ++ // - Leave the frame. ++ // - Restart the frame by calling the function. ++ __ mov(fp, a1); ++ __ Ld_d(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ ++ // Pop return address and frame. ++ __ LeaveFrame(StackFrame::INTERNAL); ++ ++ __ Ld_d(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ++ __ Ld_hu( ++ a0, FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset)); ++ __ mov(a2, a0); ++ ++ __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION); ++} ++ ++const bool LiveEdit::kFrameDropperSupported = true; ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc +new file mode 100644 +index 00000000..23a0051d +--- /dev/null ++++ b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc +@@ -0,0 +1,241 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/codegen/register-configuration.h" ++#include "src/codegen/safepoint-table.h" ++#include "src/deoptimizer/deoptimizer.h" ++ ++namespace v8 { ++namespace internal { ++ ++const bool Deoptimizer::kSupportsFixedDeoptExitSizes = false; ++const int Deoptimizer::kNonLazyDeoptExitSize = 0; ++const int Deoptimizer::kLazyDeoptExitSize = 0; ++ ++#define __ masm-> ++ ++// This code tries to be close to ia32 code so that any changes can be ++// easily ported. ++void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, ++ Isolate* isolate, ++ DeoptimizeKind deopt_kind) { ++ NoRootArrayScope no_root_array(masm); ++ ++ // Unlike on ARM we don't save all the registers, just the useful ones. ++ // For the rest, there are gaps on the stack, so the offsets remain the same. ++ const int kNumberOfRegisters = Register::kNumRegisters; ++ ++ RegList restored_regs = kJSCallerSaved | kCalleeSaved; ++ RegList saved_regs = restored_regs | sp.bit() | ra.bit(); ++ ++ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; ++ ++ // Save all double FPU registers before messing with them. ++ __ Sub_d(sp, sp, Operand(kDoubleRegsSize)); ++ const RegisterConfiguration* config = RegisterConfiguration::Default(); ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); ++ int offset = code * kDoubleSize; ++ __ Fst_d(fpu_reg, MemOperand(sp, offset)); ++ } ++ ++ // Push saved_regs (needed to populate FrameDescription::registers_). ++ // Leave gaps for other registers. ++ __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize); ++ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { ++ if ((saved_regs & (1 << i)) != 0) { ++ __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i)); ++ } ++ } ++ ++ __ li(a2, Operand(ExternalReference::Create( ++ IsolateAddressId::kCEntryFPAddress, isolate))); ++ __ St_d(fp, MemOperand(a2, 0)); ++ ++ const int kSavedRegistersAreaSize = ++ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; ++ ++ // Get the bailout is passed as kRootRegister by the caller. ++ __ mov(a2, kRootRegister); ++ ++ // Get the address of the location in the code object (a3) (return ++ // address for lazy deoptimization) and compute the fp-to-sp delta in ++ // register a4. ++ __ mov(a3, ra); ++ __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize)); ++ ++ __ Sub_d(a4, fp, a4); ++ ++ // Allocate a new deoptimizer object. ++ __ PrepareCallCFunction(6, a5); ++ // Pass six arguments, according to n64 ABI. ++ __ mov(a0, zero_reg); ++ Label context_check; ++ __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); ++ __ JumpIfSmi(a1, &context_check); ++ __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); ++ __ bind(&context_check); ++ __ li(a1, Operand(static_cast(deopt_kind))); ++ // a2: bailout id already loaded. ++ // a3: code address or 0 already loaded. ++ // a4: already has fp-to-sp delta. ++ __ li(a5, Operand(ExternalReference::isolate_address(isolate))); ++ ++ // Call Deoptimizer::New(). ++ { ++ AllowExternalCallThatCantCauseGC scope(masm); ++ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6); ++ } ++ ++ // Preserve "deoptimizer" object in register v0 and get the input ++ // frame descriptor pointer to a1 (deoptimizer->input_); ++ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. ++ // TODO save a0 ++ //__ mov(a0, v0); ++ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset())); ++ ++ // Copy core registers into FrameDescription::registers_[kNumRegisters]. ++ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); ++ for (int i = 0; i < kNumberOfRegisters; i++) { ++ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); ++ if ((saved_regs & (1 << i)) != 0) { ++ __ Ld_d(a2, MemOperand(sp, i * kPointerSize)); ++ __ St_d(a2, MemOperand(a1, offset)); ++ } else if (FLAG_debug_code) { ++ __ li(a2, Operand(kDebugZapValue)); ++ __ St_d(a2, MemOperand(a1, offset)); ++ } ++ } ++ ++ int double_regs_offset = FrameDescription::double_registers_offset(); ++ // Copy FPU registers to ++ // double_registers_[DoubleRegister::kNumAllocatableRegisters] ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ int dst_offset = code * kDoubleSize + double_regs_offset; ++ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; ++ __ Fld_d(f0, MemOperand(sp, src_offset)); ++ __ Fst_d(f0, MemOperand(a1, dst_offset)); ++ } ++ ++ // Remove the saved registers from the stack. ++ __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize)); ++ ++ // Compute a pointer to the unwinding limit in register a2; that is ++ // the first stack slot not part of the input frame. ++ __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset())); ++ __ Add_d(a2, a2, sp); ++ ++ // Unwind the stack down to - but not including - the unwinding ++ // limit and copy the contents of the activation frame to the input ++ // frame description. ++ __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset())); ++ Label pop_loop; ++ Label pop_loop_header; ++ __ Branch(&pop_loop_header); ++ __ bind(&pop_loop); ++ __ pop(a4); ++ __ St_d(a4, MemOperand(a3, 0)); ++ __ addi_d(a3, a3, sizeof(uint64_t)); ++ __ bind(&pop_loop_header); ++ __ BranchShort(&pop_loop, ne, a2, Operand(sp)); ++ // Compute the output frame in the deoptimizer. ++ __ push(a0); // Preserve deoptimizer object across call. ++ // a0: deoptimizer object; a1: scratch. ++ __ PrepareCallCFunction(1, a1); ++ // Call Deoptimizer::ComputeOutputFrames(). ++ { ++ AllowExternalCallThatCantCauseGC scope(masm); ++ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); ++ } ++ __ pop(a0); // Restore deoptimizer object (class Deoptimizer). ++ ++ __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); ++ ++ // Replace the current (input) frame with the output frames. ++ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; ++ // Outer loop state: a4 = current "FrameDescription** output_", ++ // a1 = one past the last FrameDescription**. ++ __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset())); ++ __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_. ++ __ Alsl_d(a1, a1, a4, kPointerSizeLog2, t7); ++ __ Branch(&outer_loop_header); ++ __ bind(&outer_push_loop); ++ // Inner loop state: a2 = current FrameDescription*, a3 = loop index. ++ __ Ld_d(a2, MemOperand(a4, 0)); // output_[ix] ++ __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset())); ++ __ Branch(&inner_loop_header); ++ __ bind(&inner_push_loop); ++ __ Sub_d(a3, a3, Operand(sizeof(uint64_t))); ++ __ Add_d(a6, a2, Operand(a3)); ++ __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset())); ++ __ push(a7); ++ __ bind(&inner_loop_header); ++ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); ++ ++ __ Add_d(a4, a4, Operand(kPointerSize)); ++ __ bind(&outer_loop_header); ++ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1)); ++ ++ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset())); ++ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { ++ int code = config->GetAllocatableDoubleCode(i); ++ const DoubleRegister fpu_reg = DoubleRegister::from_code(code); ++ int src_offset = code * kDoubleSize + double_regs_offset; ++ __ Fld_d(fpu_reg, MemOperand(a1, src_offset)); ++ } ++ ++ // Push pc and continuation from the last output frame. ++ __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset())); ++ __ push(a6); ++ __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset())); ++ __ push(a6); ++ ++ // Technically restoring 'at' should work unless zero_reg is also restored ++ // but it's safer to check for this. ++ DCHECK(!(t7.bit() & restored_regs)); ++ // Restore the registers from the last output frame. ++ __ mov(t7, a2); ++ for (int i = kNumberOfRegisters - 1; i >= 0; i--) { ++ int offset = (i * kPointerSize) + FrameDescription::registers_offset(); ++ if ((restored_regs & (1 << i)) != 0) { ++ __ Ld_d(ToRegister(i), MemOperand(t7, offset)); ++ } ++ } ++ ++ __ pop(t7); // Get continuation, leave pc on stack. ++ __ pop(ra); ++ __ Jump(t7); ++ __ stop(); ++} ++ ++// Maximum size of a table entry generated below. ++const int Deoptimizer::table_entry_size_ = 2 * kInstrSize; ++ ++Float32 RegisterValues::GetFloatRegister(unsigned n) const { ++ return Float32::FromBits( ++ static_cast(double_registers_[n].get_bits())); ++} ++ ++void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { ++ SetFrameSlot(offset, value); ++} ++ ++void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { ++ SetFrameSlot(offset, value); ++} ++ ++void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { ++ // No embedded constant pool support. ++ UNREACHABLE(); ++} ++ ++void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; } ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 +diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc +index 5f364373..ec8d0340 100644 +--- a/deps/v8/src/diagnostics/gdb-jit.cc ++++ b/deps/v8/src/diagnostics/gdb-jit.cc +@@ -1077,6 +1077,8 @@ class DebugInfoSection : public DebugSection { + UNIMPLEMENTED(); + #elif V8_TARGET_ARCH_MIPS64 + UNIMPLEMENTED(); ++#elif V8_TARGET_ARCH_LOONG64 ++ UNIMPLEMENTED(); + #elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX + w->Write(DW_OP_reg31); // The frame pointer is here on PPC64. + #elif V8_TARGET_ARCH_S390 +diff --git a/deps/v8/src/diagnostics/loong64/disasm-loong64.cc b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc +new file mode 100644 +index 00000000..6fe44186 +--- /dev/null ++++ b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc +@@ -0,0 +1,1695 @@ ++#include ++#include ++#include ++#include ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/base/platform/platform.h" ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/diagnostics/disasm.h" ++ ++namespace v8 { ++namespace internal { ++ ++//------------------------------------------------------------------------------ ++ ++// Decoder decodes and disassembles instructions into an output buffer. ++// It uses the converter to convert register names and call destinations into ++// more informative description. ++class Decoder { ++ public: ++ Decoder(const disasm::NameConverter& converter, ++ v8::internal::Vector out_buffer) ++ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) { ++ out_buffer_[out_buffer_pos_] = '\0'; ++ } ++ ++ ~Decoder() {} ++ ++ // Writes one disassembled instruction into 'buffer' (0-terminated). ++ // Returns the length of the disassembled machine instruction in bytes. ++ int InstructionDecode(byte* instruction); ++ ++ private: ++ // Bottleneck functions to print into the out_buffer. ++ void PrintChar(const char ch); ++ void Print(const char* str); ++ ++ // Printing of common values. ++ void PrintRegister(int reg); ++ void PrintFPURegister(int freg); ++ void PrintFPUStatusRegister(int freg); ++ void PrintRj(Instruction* instr); ++ void PrintRk(Instruction* instr); ++ void PrintRd(Instruction* instr); ++ void PrintFj(Instruction* instr); ++ void PrintFk(Instruction* instr); ++ void PrintFd(Instruction* instr); ++ void PrintFa(Instruction* instr); ++ void PrintSa2(Instruction* instr); ++ void PrintSa3(Instruction* instr); ++ void PrintUi5(Instruction* instr); ++ void PrintUi6(Instruction* instr); ++ void PrintUi12(Instruction* instr); ++ void PrintXi12(Instruction* instr); ++ void PrintMsbw(Instruction* instr); ++ void PrintLsbw(Instruction* instr); ++ void PrintMsbd(Instruction* instr); ++ void PrintLsbd(Instruction* instr); ++ // void PrintCond(Instruction* instr); ++ void PrintSi12(Instruction* instr); ++ void PrintSi14(Instruction* instr); ++ void PrintSi16(Instruction* instr); ++ void PrintSi20(Instruction* instr); ++ void PrintCj(Instruction* instr); ++ void PrintCd(Instruction* instr); ++ void PrintCa(Instruction* instr); ++ void PrintCode(Instruction* instr); ++ void PrintHint5(Instruction* instr); ++ void PrintHint15(Instruction* instr); ++ void PrintPCOffs16(Instruction* instr); ++ void PrintPCOffs21(Instruction* instr); ++ void PrintPCOffs26(Instruction* instr); ++ void PrintOffs16(Instruction* instr); ++ void PrintOffs21(Instruction* instr); ++ void PrintOffs26(Instruction* instr); ++ ++ // Handle formatting of instructions and their options. ++ int FormatRegister(Instruction* instr, const char* option); ++ int FormatFPURegister(Instruction* instr, const char* option); ++ int FormatOption(Instruction* instr, const char* option); ++ void Format(Instruction* instr, const char* format); ++ void Unknown(Instruction* instr); ++ int DecodeBreakInstr(Instruction* instr); ++ ++ // Each of these functions decodes one particular instruction type. ++ int InstructionDecode(Instruction* instr); ++ void DecodeTypekOp6(Instruction* instr); ++ void DecodeTypekOp7(Instruction* instr); ++ void DecodeTypekOp8(Instruction* instr); ++ void DecodeTypekOp10(Instruction* instr); ++ void DecodeTypekOp12(Instruction* instr); ++ void DecodeTypekOp14(Instruction* instr); ++ int DecodeTypekOp17(Instruction* instr); ++ void DecodeTypekOp22(Instruction* instr); ++ ++ const disasm::NameConverter& converter_; ++ v8::internal::Vector out_buffer_; ++ int out_buffer_pos_; ++ ++ DISALLOW_COPY_AND_ASSIGN(Decoder); ++}; ++ ++// Support for assertions in the Decoder formatting functions. ++#define STRING_STARTS_WITH(string, compare_string) \ ++ (strncmp(string, compare_string, strlen(compare_string)) == 0) ++ ++// Append the ch to the output buffer. ++void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; } ++ ++// Append the str to the output buffer. ++void Decoder::Print(const char* str) { ++ char cur = *str++; ++ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) { ++ PrintChar(cur); ++ cur = *str++; ++ } ++ out_buffer_[out_buffer_pos_] = 0; ++} ++ ++// Print the register name according to the active name converter. ++void Decoder::PrintRegister(int reg) { ++ Print(converter_.NameOfCPURegister(reg)); ++} ++ ++void Decoder::PrintRj(Instruction* instr) { ++ int reg = instr->RjValue(); ++ PrintRegister(reg); ++} ++ ++void Decoder::PrintRk(Instruction* instr) { ++ int reg = instr->RkValue(); ++ PrintRegister(reg); ++} ++ ++void Decoder::PrintRd(Instruction* instr) { ++ int reg = instr->RdValue(); ++ PrintRegister(reg); ++} ++ ++// Print the FPUregister name according to the active name converter. ++void Decoder::PrintFPURegister(int freg) { ++ Print(converter_.NameOfXMMRegister(freg)); ++} ++ ++void Decoder::PrintFj(Instruction* instr) { ++ int freg = instr->FjValue(); ++ PrintFPURegister(freg); ++} ++ ++void Decoder::PrintFk(Instruction* instr) { ++ int freg = instr->FkValue(); ++ PrintFPURegister(freg); ++} ++ ++void Decoder::PrintFd(Instruction* instr) { ++ int freg = instr->FdValue(); ++ PrintFPURegister(freg); ++} ++ ++void Decoder::PrintFa(Instruction* instr) { ++ int freg = instr->FaValue(); ++ PrintFPURegister(freg); ++} ++ ++// Print the integer value of the sa field. ++void Decoder::PrintSa2(Instruction* instr) { ++ int sa = instr->Sa2Value(); ++ uint32_t opcode = (instr->InstructionBits() >> 18) << 18; ++ if (opcode == ALSL || opcode == ALSL_D) { ++ sa += 1; ++ } ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); ++} ++ ++void Decoder::PrintSa3(Instruction* instr) { ++ int sa = instr->Sa3Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); ++} ++ ++void Decoder::PrintUi5(Instruction* instr) { ++ int ui = instr->Ui5Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); ++} ++ ++void Decoder::PrintUi6(Instruction* instr) { ++ int ui = instr->Ui6Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); ++} ++ ++void Decoder::PrintUi12(Instruction* instr) { ++ int ui = instr->Ui12Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui); ++} ++ ++void Decoder::PrintXi12(Instruction* instr) { ++ int xi = instr->Ui12Value(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi); ++} ++ ++void Decoder::PrintMsbd(Instruction* instr) { ++ int msbd = instr->MsbdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbd); ++} ++ ++void Decoder::PrintLsbd(Instruction* instr) { ++ int lsbd = instr->LsbdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbd); ++} ++ ++void Decoder::PrintMsbw(Instruction* instr) { ++ int msbw = instr->MsbwValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbw); ++} ++ ++void Decoder::PrintLsbw(Instruction* instr) { ++ int lsbw = instr->LsbwValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbw); ++} ++ ++void Decoder::PrintSi12(Instruction* instr) { ++ int si = ((instr->Si12Value()) << (32 - kSi12Bits)) >> (32 - kSi12Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintSi14(Instruction* instr) { ++ int si = ((instr->Si14Value()) << (32 - kSi14Bits)) >> (32 - kSi14Bits); ++ si <<= 2; ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintSi16(Instruction* instr) { ++ int si = ((instr->Si16Value()) << (32 - kSi16Bits)) >> (32 - kSi16Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintSi20(Instruction* instr) { ++ int si = ((instr->Si20Value()) << (32 - kSi20Bits)) >> (32 - kSi20Bits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si); ++} ++ ++void Decoder::PrintCj(Instruction* instr) { ++ int cj = instr->CjValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cj); ++} ++ ++void Decoder::PrintCd(Instruction* instr) { ++ int cd = instr->CdValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cd); ++} ++ ++void Decoder::PrintCa(Instruction* instr) { ++ int ca = instr->CaValue(); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ca); ++} ++ ++void Decoder::PrintCode(Instruction* instr) { ++ int code = instr->CodeValue(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", code, code); ++} ++ ++void Decoder::PrintHint5(Instruction* instr) { ++ int hint = instr->Hint5Value(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint); ++} ++ ++void Decoder::PrintHint15(Instruction* instr) { ++ int hint = instr->Hint15Value(); ++ out_buffer_pos_ += ++ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint); ++} ++ ++void Decoder::PrintPCOffs16(Instruction* instr) { ++ int n_bits = 2; ++ int offs = instr->Offs16Value(); ++ int target = ((offs << n_bits) << (32 - kOffsLowBits - n_bits)) >> ++ (32 - kOffsLowBits - n_bits); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + target)); ++} ++ ++void Decoder::PrintPCOffs21(Instruction* instr) { ++ int n_bits = 2; ++ int offs = instr->Offs21Value(); ++ int target = ++ ((offs << n_bits) << (32 - kOffsLowBits - kOffs21HighBits - n_bits)) >> ++ (32 - kOffsLowBits - kOffs21HighBits - n_bits); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + target)); ++} ++ ++void Decoder::PrintPCOffs26(Instruction* instr) { ++ int n_bits = 2; ++ int offs = instr->Offs26Value(); ++ int target = ++ ((offs << n_bits) << (32 - kOffsLowBits - kOffs26HighBits - n_bits)) >> ++ (32 - kOffsLowBits - kOffs26HighBits - n_bits); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "%s", ++ converter_.NameOfAddress(reinterpret_cast(instr) + target)); ++} ++ ++void Decoder::PrintOffs16(Instruction* instr) { ++ int offs = instr->Offs16Value(); ++ offs <<= (32 - kOffsLowBits); ++ offs >>= (32 - kOffsLowBits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); ++} ++ ++void Decoder::PrintOffs21(Instruction* instr) { ++ int offs = instr->Offs21Value(); ++ offs <<= (32 - kOffsLowBits - kOffs21HighBits); ++ offs >>= (32 - kOffsLowBits - kOffs21HighBits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); ++} ++ ++void Decoder::PrintOffs26(Instruction* instr) { ++ int offs = instr->Offs26Value(); ++ offs <<= (32 - kOffsLowBits - kOffs26HighBits); ++ offs >>= (32 - kOffsLowBits - kOffs26HighBits); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs); ++} ++ ++// Handle all register based formatting in this function to reduce the ++// complexity of FormatOption. ++int Decoder::FormatRegister(Instruction* instr, const char* format) { ++ DCHECK_EQ(format[0], 'r'); ++ if (format[1] == 'j') { // 'rj: Rj register. ++ int reg = instr->RjValue(); ++ PrintRegister(reg); ++ return 2; ++ } else if (format[1] == 'k') { // 'rk: rk register. ++ int reg = instr->RkValue(); ++ PrintRegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { // 'rd: rd register. ++ int reg = instr->RdValue(); ++ PrintRegister(reg); ++ return 2; ++ } ++ UNREACHABLE(); ++ return 0; ++} ++ ++// Handle all FPUregister based formatting in this function to reduce the ++// complexity of FormatOption. ++int Decoder::FormatFPURegister(Instruction* instr, const char* format) { ++ DCHECK_EQ(format[0], 'f'); ++ if (format[1] == 'j') { // 'fj: fj register. ++ int reg = instr->FjValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'k') { // 'fk: fk register. ++ int reg = instr->FkValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'd') { // 'fd: fd register. ++ int reg = instr->FdValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } else if (format[1] == 'a') { // 'fa: fa register. ++ int reg = instr->FaValue(); ++ PrintFPURegister(reg); ++ return 2; ++ } ++ UNREACHABLE(); ++ return 0; ++} ++ ++// FormatOption takes a formatting string and interprets it based on ++// the current instructions. The format string points to the first ++// character of the option string (the option escape has already been ++// consumed by the caller.) FormatOption returns the number of ++// characters that were consumed from the formatting string. ++int Decoder::FormatOption(Instruction* instr, const char* format) { ++ switch (format[0]) { ++ case 'c': { ++ switch (format[1]) { ++ case 'a': ++ DCHECK(STRING_STARTS_WITH(format, "ca")); ++ PrintCa(instr); ++ return 2; ++ case 'd': ++ DCHECK(STRING_STARTS_WITH(format, "cd")); ++ PrintCd(instr); ++ return 2; ++ case 'j': ++ DCHECK(STRING_STARTS_WITH(format, "cj")); ++ PrintCj(instr); ++ return 2; ++ case 'o': ++ DCHECK(STRING_STARTS_WITH(format, "code")); ++ PrintCode(instr); ++ return 4; ++ } ++ } ++ case 'f': { ++ return FormatFPURegister(instr, format); ++ } ++ case 'h': { ++ if (format[4] == '5') { ++ DCHECK(STRING_STARTS_WITH(format, "hint5")); ++ PrintHint5(instr); ++ return 5; ++ } else if (format[4] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "hint15")); ++ PrintHint15(instr); ++ return 6; ++ } ++ break; ++ } ++ case 'l': { ++ switch (format[3]) { ++ case 'w': ++ DCHECK(STRING_STARTS_WITH(format, "lsbw")); ++ PrintLsbw(instr); ++ return 4; ++ case 'd': ++ DCHECK(STRING_STARTS_WITH(format, "lsbd")); ++ PrintLsbd(instr); ++ return 4; ++ default: ++ return 0; ++ } ++ } ++ case 'm': { ++ if (format[3] == 'w') { ++ DCHECK(STRING_STARTS_WITH(format, "msbw")); ++ PrintMsbw(instr); ++ } else if (format[3] == 'd') { ++ DCHECK(STRING_STARTS_WITH(format, "msbd")); ++ PrintMsbd(instr); ++ } ++ return 4; ++ } ++ case 'o': { ++ if (format[1] == 'f') { ++ if (format[4] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "offs16")); ++ PrintOffs16(instr); ++ return 6; ++ } else if (format[4] == '2') { ++ if (format[5] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "offs21")); ++ PrintOffs21(instr); ++ return 6; ++ } else if (format[5] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "offs26")); ++ PrintOffs26(instr); ++ return 6; ++ } ++ } ++ } ++ break; ++ } ++ case 'p': { ++ if (format[6] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "pcoffs16")); ++ PrintPCOffs16(instr); ++ return 8; ++ } else if (format[6] == '2') { ++ if (format[7] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "pcoffs21")); ++ PrintPCOffs21(instr); ++ return 8; ++ } else if (format[7] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "pcoffs26")); ++ PrintPCOffs26(instr); ++ return 8; ++ } ++ } ++ break; ++ } ++ case 'r': { ++ return FormatRegister(instr, format); ++ break; ++ } ++ case 's': { ++ switch (format[1]) { ++ case 'a': ++ if (format[2] == '2') { ++ DCHECK(STRING_STARTS_WITH(format, "sa2")); ++ PrintSa2(instr); ++ } else if (format[2] == '3') { ++ DCHECK(STRING_STARTS_WITH(format, "sa3")); ++ PrintSa3(instr); ++ } ++ return 3; ++ case 'i': ++ if (format[2] == '2') { ++ DCHECK(STRING_STARTS_WITH(format, "si20")); ++ PrintSi20(instr); ++ return 4; ++ } else if (format[2] == '1') { ++ switch (format[3]) { ++ case '2': ++ DCHECK(STRING_STARTS_WITH(format, "si12")); ++ PrintSi12(instr); ++ return 4; ++ case '4': ++ DCHECK(STRING_STARTS_WITH(format, "si14")); ++ PrintSi14(instr); ++ return 4; ++ case '6': ++ DCHECK(STRING_STARTS_WITH(format, "si16")); ++ PrintSi16(instr); ++ return 4; ++ default: ++ break; ++ } ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ } ++ case 'u': { ++ if (format[2] == '5') { ++ DCHECK(STRING_STARTS_WITH(format, "ui5")); ++ PrintUi5(instr); ++ return 3; ++ } else if (format[2] == '6') { ++ DCHECK(STRING_STARTS_WITH(format, "ui6")); ++ PrintUi6(instr); ++ return 3; ++ } else if (format[2] == '1') { ++ DCHECK(STRING_STARTS_WITH(format, "ui12")); ++ PrintUi12(instr); ++ return 4; ++ } ++ break; ++ } ++ case 'x': { ++ DCHECK(STRING_STARTS_WITH(format, "xi12")); ++ PrintXi12(instr); ++ return 4; ++ } ++ default: ++ UNREACHABLE(); ++ } ++ return 0; ++} ++ ++// Format takes a formatting string for a whole instruction and prints it into ++// the output buffer. All escaped options are handed to FormatOption to be ++// parsed further. ++void Decoder::Format(Instruction* instr, const char* format) { ++ char cur = *format++; ++ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) { ++ if (cur == '\'') { // Single quote is used as the formatting escape. ++ format += FormatOption(instr, format); ++ } else { ++ out_buffer_[out_buffer_pos_++] = cur; ++ } ++ cur = *format++; ++ } ++ out_buffer_[out_buffer_pos_] = '\0'; ++} ++ ++// For currently unimplemented decodings the disassembler calls Unknown(instr) ++// which will just print "unknown" of the instruction bits. ++void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); } ++ ++int Decoder::DecodeBreakInstr(Instruction* instr) { ++ // This is already known to be BREAK instr, just extract the code. ++ /*if (instr->Bits(14, 0) == static_cast(kMaxStopCode)) { ++ // This is stop(msg). ++ Format(instr, "break, code: 'code"); ++ out_buffer_pos_ += SNPrintF( ++ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64, ++ static_cast(reinterpret_cast(instr + kInstrSize)), ++ reinterpret_cast( ++ *reinterpret_cast(instr + kInstrSize))); ++ // Size 3: the break_ instr, plus embedded 64-bit char pointer. ++ return 3 * kInstrSize; ++ } else { ++ Format(instr, "break, code: 'code"); ++ return kInstrSize; ++ }*/ ++ Format(instr, "break code: 'code"); ++ return kInstrSize; ++} //=================================================== ++ ++void Decoder::DecodeTypekOp6(Instruction* instr) { ++ switch (instr->Bits(31, 26) << 26) { ++ case ADDU16I_D: ++ Format(instr, "addu16i.d 'rd, 'rj, 'si16"); ++ break; ++ case BEQZ: ++ Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21"); ++ break; ++ case BNEZ: ++ Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21"); ++ break; ++ case BCZ: ++ if (instr->Bit(8)) ++ Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21"); ++ else ++ Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21"); ++ break; ++ case JIRL: ++ Format(instr, "jirl 'rd, 'rj, 'offs16"); ++ break; ++ case B: ++ Format(instr, "b 'offs26 -> 'pcoffs26"); ++ break; ++ case BL: ++ Format(instr, "bl 'offs26 -> 'pcoffs26"); ++ break; ++ case BEQ: ++ Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BNE: ++ Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BLT: ++ Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BGE: ++ Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BLTU: ++ Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ case BGEU: ++ Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp7(Instruction* instr) { ++ switch (instr->Bits(31, 25) << 25) { ++ case LU12I_W: ++ Format(instr, "lu12i.w 'rd, 'si20"); ++ break; ++ case LU32I_D: ++ Format(instr, "lu32i.d 'rd, 'si20"); ++ break; ++ case PCADDI: ++ Format(instr, "pcaddi 'rd, 'si20"); ++ break; ++ case PCALAU12I: ++ Format(instr, "pcalau12i 'rd, 'si20"); ++ break; ++ case PCADDU12I: ++ Format(instr, "pcaddu12i 'rd, 'si20"); ++ break; ++ case PCADDU18I: ++ Format(instr, "pcaddu18i 'rd, 'si20"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp8(Instruction* instr) { ++ switch (instr->Bits(31, 24) << 24) { ++ case LDPTR_W: ++ Format(instr, "ldptr.w 'rd, 'rj, 'si14"); ++ break; ++ case STPTR_W: ++ Format(instr, "stptr.w 'rd, 'rj, 'si14"); ++ break; ++ case LDPTR_D: ++ Format(instr, "ldptr.d 'rd, 'rj, 'si14"); ++ break; ++ case STPTR_D: ++ Format(instr, "stptr.d 'rd, 'rj, 'si14"); ++ break; ++ case LL_W: ++ Format(instr, "ll.w 'rd, 'rj, 'si14"); ++ break; ++ case SC_W: ++ Format(instr, "sc.w 'rd, 'rj, 'si14"); ++ break; ++ case LL_D: ++ Format(instr, "ll.d 'rd, 'rj, 'si14"); ++ break; ++ case SC_D: ++ Format(instr, "sc.d 'rd, 'rj, 'si14"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp10(Instruction* instr) { ++ switch (instr->Bits(31, 22) << 22) { ++ case BSTR_W: { ++ if (instr->Bit(21) != 0) { ++ if (instr->Bit(15) == 0) { ++ Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw"); ++ } else { ++ Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw"); ++ } ++ } ++ break; ++ } ++ case BSTRINS_D: ++ Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd"); ++ break; ++ case BSTRPICK_D: ++ Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd"); ++ break; ++ case SLTI: ++ Format(instr, "slti 'rd, 'rj, 'si12"); ++ break; ++ case SLTUI: ++ Format(instr, "sltui 'rd, 'rj, 'si12"); ++ break; ++ case ADDI_W: ++ Format(instr, "addi.w 'rd, 'rj, 'si12"); ++ break; ++ case ADDI_D: ++ Format(instr, "addi.d 'rd, 'rj, 'si12"); ++ break; ++ case LU52I_D: ++ Format(instr, "lu52i.d 'rd, 'rj, 'si12"); ++ break; ++ case ANDI: ++ Format(instr, "andi 'rd, 'rj, 'xi12"); ++ break; ++ case ORI: ++ Format(instr, "ori 'rd, 'rj, 'xi12"); ++ break; ++ case XORI: ++ Format(instr, "xori 'rd, 'rj, 'xi12"); ++ break; ++ case LD_B: ++ Format(instr, "ld.b 'rd, 'rj, 'si12"); ++ break; ++ case LD_H: ++ Format(instr, "ld.h 'rd, 'rj, 'si12"); ++ break; ++ case LD_W: ++ Format(instr, "ld.w 'rd, 'rj, 'si12"); ++ break; ++ case LD_D: ++ Format(instr, "ld.d 'rd, 'rj, 'si12"); ++ break; ++ case ST_B: ++ Format(instr, "st.b 'rd, 'rj, 'si12"); ++ break; ++ case ST_H: ++ Format(instr, "st.h 'rd, 'rj, 'si12"); ++ break; ++ case ST_W: ++ Format(instr, "st.w 'rd, 'rj, 'si12"); ++ break; ++ case ST_D: ++ Format(instr, "st.d 'rd, 'rj, 'si12"); ++ break; ++ case LD_BU: ++ Format(instr, "ld.bu 'rd, 'rj, 'si12"); ++ break; ++ case LD_HU: ++ Format(instr, "ld.hu 'rd, 'rj, 'si12"); ++ break; ++ case LD_WU: ++ Format(instr, "ld.wu 'rd, 'rj, 'si12"); ++ break; ++ break; ++ case FLD_S: ++ Format(instr, "fld.s 'fd, 'rj, 'si12"); ++ break; ++ case FST_S: ++ Format(instr, "fst.s 'fd, 'rj, 'si12"); ++ break; ++ case FLD_D: ++ Format(instr, "fld.d 'fd, 'rj, 'si12"); ++ break; ++ case FST_D: ++ Format(instr, "fst.d 'fd, 'rj, 'si12"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp12(Instruction* instr) { ++ switch (instr->Bits(31, 20) << 20) { ++ case FMADD_S: ++ Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FMADD_D: ++ Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FMSUB_S: ++ Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FMSUB_D: ++ Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMADD_S: ++ Format(instr, "fnmadd.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMADD_D: ++ Format(instr, "fnmadd.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMSUB_S: ++ Format(instr, "fnmsub.s 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FNMSUB_D: ++ Format(instr, "fnmsub.d 'fd, 'fj, 'fk, 'fa"); ++ break; ++ case FCMP_COND_S: ++ switch (instr->Bits(19, 15)) { ++ case CAF: ++ Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SAF: ++ Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CLT: ++ Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CEQ: ++ Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SEQ: ++ Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CLE: ++ Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SLE: ++ Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CUN: ++ Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SUN: ++ Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CULT: ++ Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SULT: ++ Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CUEQ: ++ Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SUEQ: ++ Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CULE: ++ Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SULE: ++ Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CNE: ++ Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SNE: ++ Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk"); ++ break; ++ case COR: ++ Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SOR: ++ Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk"); ++ break; ++ case CUNE: ++ Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk"); ++ break; ++ case SUNE: ++ Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ case FCMP_COND_D: ++ switch (instr->Bits(19, 15)) { ++ case CAF: ++ Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SAF: ++ Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CLT: ++ Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CEQ: ++ Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SEQ: ++ Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CLE: ++ Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SLE: ++ Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CUN: ++ Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SUN: ++ Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CULT: ++ Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SULT: ++ Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CUEQ: ++ Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SUEQ: ++ Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CULE: ++ Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SULE: ++ Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CNE: ++ Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SNE: ++ Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk"); ++ break; ++ case COR: ++ Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SOR: ++ Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk"); ++ break; ++ case CUNE: ++ Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk"); ++ break; ++ case SUNE: ++ Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ case FSEL: ++ Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Decoder::DecodeTypekOp14(Instruction* instr) { ++ switch (instr->Bits(31, 18) << 18) { ++ case ALSL: ++ if (instr->Bit(17)) ++ Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2"); ++ else ++ Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2"); ++ break; ++ case BYTEPICK_W: ++ Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2"); ++ break; ++ case BYTEPICK_D: ++ Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3"); ++ break; ++ case ALSL_D: ++ Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2"); ++ break; ++ case SLLI: ++ if (instr->Bit(16)) ++ Format(instr, "slli.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "slli.w 'rd, 'rj, 'ui5"); ++ break; ++ case SRLI: ++ if (instr->Bit(16)) ++ Format(instr, "srli.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "srli.w 'rd, 'rj, 'ui5"); ++ break; ++ case SRAI: ++ if (instr->Bit(16)) ++ Format(instr, "srai.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "srai.w 'rd, 'rj, 'ui5"); ++ break; ++ case ROTRI: ++ if (instr->Bit(16)) ++ Format(instr, "rotri.d 'rd, 'rj, 'ui6"); ++ else ++ Format(instr, "rotri.w 'rd, 'rj, 'ui5"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++int Decoder::DecodeTypekOp17(Instruction* instr) { ++ switch (instr->Bits(31, 15) << 15) { ++ case ADD_W: ++ Format(instr, "add.w 'rd, 'rj, 'rk"); ++ break; ++ case ADD_D: ++ Format(instr, "add.d 'rd, 'rj, 'rk"); ++ break; ++ case SUB_W: ++ Format(instr, "sub.w 'rd, 'rj, 'rk"); ++ break; ++ case SUB_D: ++ Format(instr, "sub.d 'rd, 'rj, 'rk"); ++ break; ++ case SLT: ++ Format(instr, "slt 'rd, 'rj, 'rk"); ++ break; ++ case SLTU: ++ Format(instr, "sltu 'rd, 'rj, 'rk"); ++ break; ++ case MASKEQZ: ++ Format(instr, "maskeqz 'rd, 'rj, 'rk"); ++ break; ++ case MASKNEZ: ++ Format(instr, "masknez 'rd, 'rj, 'rk"); ++ break; ++ case NOR: ++ Format(instr, "nor 'rd, 'rj, 'rk"); ++ break; ++ case AND: ++ Format(instr, "and 'rd, 'rj, 'rk"); ++ break; ++ case OR: ++ Format(instr, "or 'rd, 'rj, 'rk"); ++ break; ++ case XOR: ++ Format(instr, "xor 'rd, 'rj, 'rk"); ++ break; ++ case ORN: ++ Format(instr, "orn 'rd, 'rj, 'rk"); ++ break; ++ case ANDN: ++ Format(instr, "andn 'rd, 'rj, 'rk"); ++ break; ++ case SLL_W: ++ Format(instr, "sll.w 'rd, 'rj, 'rk"); ++ break; ++ case SRL_W: ++ Format(instr, "srl.w 'rd, 'rj, 'rk"); ++ break; ++ case SRA_W: ++ Format(instr, "sra.w 'rd, 'rj, 'rk"); ++ break; ++ case SLL_D: ++ Format(instr, "sll.d 'rd, 'rj, 'rk"); ++ break; ++ case SRL_D: ++ Format(instr, "srl.d 'rd, 'rj, 'rk"); ++ break; ++ case SRA_D: ++ Format(instr, "sra.d 'rd, 'rj, 'rk"); ++ break; ++ case ROTR_D: ++ Format(instr, "rotr.d 'rd, 'rj, 'rk"); ++ break; ++ case ROTR_W: ++ Format(instr, "rotr.w 'rd, 'rj, 'rk"); ++ break; ++ case MUL_W: ++ Format(instr, "mul.w 'rd, 'rj, 'rk"); ++ break; ++ case MULH_W: ++ Format(instr, "mulh.w 'rd, 'rj, 'rk"); ++ break; ++ case MULH_WU: ++ Format(instr, "mulh.wu 'rd, 'rj, 'rk"); ++ break; ++ case MUL_D: ++ Format(instr, "mul.d 'rd, 'rj, 'rk"); ++ break; ++ case MULH_D: ++ Format(instr, "mulh.d 'rd, 'rj, 'rk"); ++ break; ++ case MULH_DU: ++ Format(instr, "mulh.du 'rd, 'rj, 'rk"); ++ break; ++ case MULW_D_W: ++ Format(instr, "mulw.d.w 'rd, 'rj, 'rk"); ++ break; ++ case MULW_D_WU: ++ Format(instr, "mulw.d.wu 'rd, 'rj, 'rk"); ++ break; ++ case DIV_W: ++ Format(instr, "div.w 'rd, 'rj, 'rk"); ++ break; ++ case MOD_W: ++ Format(instr, "mod.w 'rd, 'rj, 'rk"); ++ break; ++ case DIV_WU: ++ Format(instr, "div.wu 'rd, 'rj, 'rk"); ++ break; ++ case MOD_WU: ++ Format(instr, "mod.wu 'rd, 'rj, 'rk"); ++ break; ++ case DIV_D: ++ Format(instr, "div.d 'rd, 'rj, 'rk"); ++ break; ++ case MOD_D: ++ Format(instr, "mod.d 'rd, 'rj, 'rk"); ++ break; ++ case DIV_DU: ++ Format(instr, "div.du 'rd, 'rj, 'rk"); ++ break; ++ case MOD_DU: ++ Format(instr, "mod.du 'rd, 'rj, 'rk"); ++ break; ++ case BREAK: ++ return DecodeBreakInstr(instr); ++ case FADD_S: ++ Format(instr, "fadd.s 'fd, 'fj, 'fk"); ++ break; ++ case FADD_D: ++ Format(instr, "fadd.d 'fd, 'fj, 'fk"); ++ break; ++ case FSUB_S: ++ Format(instr, "fsub.s 'fd, 'fj, 'fk"); ++ break; ++ case FSUB_D: ++ Format(instr, "fsub.d 'fd, 'fj, 'fk"); ++ break; ++ case FMUL_S: ++ Format(instr, "fmul.s 'fd, 'fj, 'fk"); ++ break; ++ case FMUL_D: ++ Format(instr, "fmul.d 'fd, 'fj, 'fk"); ++ break; ++ case FDIV_S: ++ Format(instr, "fdiv.s 'fd, 'fj, 'fk"); ++ break; ++ case FDIV_D: ++ Format(instr, "fdiv.d 'fd, 'fj, 'fk"); ++ break; ++ case FMAX_S: ++ Format(instr, "fmax.s 'fd, 'fj, 'fk"); ++ break; ++ case FMAX_D: ++ Format(instr, "fmax.d 'fd, 'fj, 'fk"); ++ break; ++ case FMIN_S: ++ Format(instr, "fmin.s 'fd, 'fj, 'fk"); ++ break; ++ case FMIN_D: ++ Format(instr, "fmin.d 'fd, 'fj, 'fk"); ++ break; ++ case FMAXA_S: ++ Format(instr, "fmaxa.s 'fd, 'fj, 'fk"); ++ break; ++ case FMAXA_D: ++ Format(instr, "fmaxa.d 'fd, 'fj, 'fk"); ++ break; ++ case FMINA_S: ++ Format(instr, "fmina.s 'fd, 'fj, 'fk"); ++ break; ++ case FMINA_D: ++ Format(instr, "fmina.d 'fd, 'fj, 'fk"); ++ break; ++ case LDX_B: ++ Format(instr, "ldx.b 'rd, 'rj, 'rk"); ++ break; ++ case LDX_H: ++ Format(instr, "ldx.h 'rd, 'rj, 'rk"); ++ break; ++ case LDX_W: ++ Format(instr, "ldx.w 'rd, 'rj, 'rk"); ++ break; ++ case LDX_D: ++ Format(instr, "ldx.d 'rd, 'rj, 'rk"); ++ break; ++ case STX_B: ++ Format(instr, "stx.b 'rd, 'rj, 'rk"); ++ break; ++ case STX_H: ++ Format(instr, "stx.h 'rd, 'rj, 'rk"); ++ break; ++ case STX_W: ++ Format(instr, "stx.w 'rd, 'rj, 'rk"); ++ break; ++ case STX_D: ++ Format(instr, "stx.d 'rd, 'rj, 'rk"); ++ break; ++ case LDX_BU: ++ Format(instr, "ldx.bu 'rd, 'rj, 'rk"); ++ break; ++ case LDX_HU: ++ Format(instr, "ldx.hu 'rd, 'rj, 'rk"); ++ break; ++ case LDX_WU: ++ Format(instr, "ldx.wu 'rd, 'rj, 'rk"); ++ break; ++ case FLDX_S: ++ Format(instr, "fldx.s 'fd, 'rj, 'rk"); ++ break; ++ case FLDX_D: ++ Format(instr, "fldx.d 'fd, 'rj, 'rk"); ++ break; ++ case FSTX_S: ++ Format(instr, "fstx.s 'fd, 'rj, 'rk"); ++ break; ++ case FSTX_D: ++ Format(instr, "fstx.d 'fd, 'rj, 'rk"); ++ break; ++ case AMSWAP_W: ++ Format(instr, "amswap.w 'rd, 'rk, 'rj"); ++ break; ++ case AMSWAP_D: ++ Format(instr, "amswap.d 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_W: ++ Format(instr, "amadd.w 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_D: ++ Format(instr, "amadd.d 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_W: ++ Format(instr, "amand.w 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_D: ++ Format(instr, "amand.d 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_W: ++ Format(instr, "amor.w 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_D: ++ Format(instr, "amor.d 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_W: ++ Format(instr, "amxor.w 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_D: ++ Format(instr, "amxor.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_W: ++ Format(instr, "ammax.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_D: ++ Format(instr, "ammax.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_W: ++ Format(instr, "ammin.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_D: ++ Format(instr, "ammin.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_WU: ++ Format(instr, "ammax.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DU: ++ Format(instr, "ammax.du 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_WU: ++ Format(instr, "ammin.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DU: ++ Format(instr, "ammin.du 'rd, 'rk, 'rj"); ++ break; ++ case AMSWAP_DB_W: ++ Format(instr, "amswap_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMSWAP_DB_D: ++ Format(instr, "amswap_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_DB_W: ++ Format(instr, "amadd_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMADD_DB_D: ++ Format(instr, "amadd_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_DB_W: ++ Format(instr, "amand_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMAND_DB_D: ++ Format(instr, "amand_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_DB_W: ++ Format(instr, "amor_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMOR_DB_D: ++ Format(instr, "amor_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_DB_W: ++ Format(instr, "amxor_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMXOR_DB_D: ++ Format(instr, "amxor_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_W: ++ Format(instr, "ammax_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_D: ++ Format(instr, "ammax_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_W: ++ Format(instr, "ammin_db.w 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_D: ++ Format(instr, "ammin_db.d 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_WU: ++ Format(instr, "ammax_db.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMAX_DB_DU: ++ Format(instr, "ammax_db.du 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_WU: ++ Format(instr, "ammin_db.wu 'rd, 'rk, 'rj"); ++ break; ++ case AMMIN_DB_DU: ++ Format(instr, "ammin_db.du 'rd, 'rk, 'rj"); ++ break; ++ case DBAR: ++ Format(instr, "dbar 'hint15"); ++ break; ++ case IBAR: ++ Format(instr, "ibar 'hint15"); ++ break; ++ case FSCALEB_S: ++ Format(instr, "fscaleb.s 'fd, 'fj, 'fk"); ++ break; ++ case FSCALEB_D: ++ Format(instr, "fscaleb.d 'fd, 'fj, 'fk"); ++ break; ++ case FCOPYSIGN_S: ++ Format(instr, "fcopysign.s 'fd, 'fj, 'fk"); ++ break; ++ case FCOPYSIGN_D: ++ Format(instr, "fcopysign.d 'fd, 'fj, 'fk"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ return kInstrSize; ++} ++ ++void Decoder::DecodeTypekOp22(Instruction* instr) { ++ switch (instr->Bits(31, 10) << 10) { ++ case CLZ_W: ++ Format(instr, "clz.w 'rd, 'rj"); ++ break; ++ case CTZ_W: ++ Format(instr, "ctz.w 'rd, 'rj"); ++ break; ++ case CLZ_D: ++ Format(instr, "clz.d 'rd, 'rj"); ++ break; ++ case CTZ_D: ++ Format(instr, "ctz.d 'rd, 'rj"); ++ break; ++ case REVB_2H: ++ Format(instr, "revb.2h 'rd, 'rj"); ++ break; ++ case REVB_4H: ++ Format(instr, "revb.4h 'rd, 'rj"); ++ break; ++ case REVB_2W: ++ Format(instr, "revb.2w 'rd, 'rj"); ++ break; ++ case REVB_D: ++ Format(instr, "revb.d 'rd, 'rj"); ++ break; ++ case REVH_2W: ++ Format(instr, "revh.2w 'rd, 'rj"); ++ break; ++ case REVH_D: ++ Format(instr, "revh.d 'rd, 'rj"); ++ break; ++ case BITREV_4B: ++ Format(instr, "bitrev.4b 'rd, 'rj"); ++ break; ++ case BITREV_8B: ++ Format(instr, "bitrev.8b 'rd, 'rj"); ++ break; ++ case BITREV_W: ++ Format(instr, "bitrev.w 'rd, 'rj"); ++ break; ++ case BITREV_D: ++ Format(instr, "bitrev.d 'rd, 'rj"); ++ break; ++ case EXT_W_B: ++ Format(instr, "ext.w.b 'rd, 'rj"); ++ break; ++ case EXT_W_H: ++ Format(instr, "ext.w.h 'rd, 'rj"); ++ break; ++ case FABS_S: ++ Format(instr, "fabs.s 'fd, 'fj"); ++ break; ++ case FABS_D: ++ Format(instr, "fabs.d 'fd, 'fj"); ++ break; ++ case FNEG_S: ++ Format(instr, "fneg.s 'fd, 'fj"); ++ break; ++ case FNEG_D: ++ Format(instr, "fneg.d 'fd, 'fj"); ++ break; ++ case FSQRT_S: ++ Format(instr, "fsqrt.s 'fd, 'fj"); ++ break; ++ case FSQRT_D: ++ Format(instr, "fsqrt.d 'fd, 'fj"); ++ break; ++ case FMOV_S: ++ Format(instr, "fmov.s 'fd, 'fj"); ++ break; ++ case FMOV_D: ++ Format(instr, "fmov.d 'fd, 'fj"); ++ break; ++ case MOVGR2FR_W: ++ Format(instr, "movgr2fr.w 'fd, 'rj"); ++ break; ++ case MOVGR2FR_D: ++ Format(instr, "movgr2fr.d 'fd, 'rj"); ++ break; ++ case MOVGR2FRH_W: ++ Format(instr, "movgr2frh.w 'fd, 'rj"); ++ break; ++ case MOVFR2GR_S: ++ Format(instr, "movfr2gr.s 'rd, 'fj"); ++ break; ++ case MOVFR2GR_D: ++ Format(instr, "movfr2gr.d 'rd, 'fj"); ++ break; ++ case MOVFRH2GR_S: ++ Format(instr, "movfrh2gr.s 'rd, 'fj"); ++ break; ++ case MOVGR2FCSR: ++ Format(instr, "movgr2fcsr fcsr, 'rj"); ++ break; ++ case MOVFCSR2GR: ++ Format(instr, "movfcsr2gr 'rd, fcsr"); ++ break; ++ case FCVT_S_D: ++ Format(instr, "fcvt.s.d 'fd, 'fj"); ++ break; ++ case FCVT_D_S: ++ Format(instr, "fcvt.d.s 'fd, 'fj"); ++ break; ++ case FTINTRM_W_S: ++ Format(instr, "ftintrm.w.s 'fd, 'fj"); ++ break; ++ case FTINTRM_W_D: ++ Format(instr, "ftintrm.w.d 'fd, 'fj"); ++ break; ++ case FTINTRM_L_S: ++ Format(instr, "ftintrm.l.s 'fd, 'fj"); ++ break; ++ case FTINTRM_L_D: ++ Format(instr, "ftintrm.l.d 'fd, 'fj"); ++ break; ++ case FTINTRP_W_S: ++ Format(instr, "ftintrp.w.s 'fd, 'fj"); ++ break; ++ case FTINTRP_W_D: ++ Format(instr, "ftintrp.w.d 'fd, 'fj"); ++ break; ++ case FTINTRP_L_S: ++ Format(instr, "ftintrp.l.s 'fd, 'fj"); ++ break; ++ case FTINTRP_L_D: ++ Format(instr, "ftintrp.l.d 'fd, 'fj"); ++ break; ++ case FTINTRZ_W_S: ++ Format(instr, "ftintrz.w.s 'fd, 'fj"); ++ break; ++ case FTINTRZ_W_D: ++ Format(instr, "ftintrz.w.d 'fd, 'fj"); ++ break; ++ case FTINTRZ_L_S: ++ Format(instr, "ftintrz.l.s 'fd, 'fj"); ++ break; ++ case FTINTRZ_L_D: ++ Format(instr, "ftintrz.l.d 'fd, 'fj"); ++ break; ++ case FTINTRNE_W_S: ++ Format(instr, "ftintrne.w.s 'fd, 'fj"); ++ break; ++ case FTINTRNE_W_D: ++ Format(instr, "ftintrne.w.d 'fd, 'fj"); ++ break; ++ case FTINTRNE_L_S: ++ Format(instr, "ftintrne.l.s 'fd, 'fj"); ++ break; ++ case FTINTRNE_L_D: ++ Format(instr, "ftintrne.l.d 'fd, 'fj"); ++ break; ++ case FTINT_W_S: ++ Format(instr, "ftint.w.s 'fd, 'fj"); ++ break; ++ case FTINT_W_D: ++ Format(instr, "ftint.w.d 'fd, 'fj"); ++ break; ++ case FTINT_L_S: ++ Format(instr, "ftint.l.s 'fd, 'fj"); ++ break; ++ case FTINT_L_D: ++ Format(instr, "ftint.l.d 'fd, 'fj"); ++ break; ++ case FFINT_S_W: ++ Format(instr, "ffint.s.w 'fd, 'fj"); ++ break; ++ case FFINT_S_L: ++ Format(instr, "ffint.s.l 'fd, 'fj"); ++ break; ++ case FFINT_D_W: ++ Format(instr, "ffint.d.w 'fd, 'fj"); ++ break; ++ case FFINT_D_L: ++ Format(instr, "ffint.d.l 'fd, 'fj"); ++ break; ++ case FRINT_S: ++ Format(instr, "frint.s 'fd, 'fj"); ++ break; ++ case FRINT_D: ++ Format(instr, "frint.d 'fd, 'fj"); ++ break; ++ case MOVFR2CF: ++ Format(instr, "movfr2cf fcc'cd, 'fj"); ++ break; ++ case MOVCF2FR: ++ Format(instr, "movcf2fr 'fd, fcc'cj"); ++ break; ++ case MOVGR2CF: ++ Format(instr, "movgr2cf fcc'cd, 'rj"); ++ break; ++ case MOVCF2GR: ++ Format(instr, "movcf2gr 'rd, fcc'cj"); ++ break; ++ case FRECIP_S: ++ Format(instr, "frecip.s 'fd, 'fj"); ++ break; ++ case FRECIP_D: ++ Format(instr, "frecip.d 'fd, 'fj"); ++ break; ++ case FRSQRT_S: ++ Format(instr, "frsqrt.s 'fd, 'fj"); ++ break; ++ case FRSQRT_D: ++ Format(instr, "frsqrt.d 'fd, 'fj"); ++ break; ++ case FCLASS_S: ++ Format(instr, "fclass.s 'fd, 'fj"); ++ break; ++ case FCLASS_D: ++ Format(instr, "fclass.d 'fd, 'fj"); ++ break; ++ case FLOGB_S: ++ Format(instr, "flogb.s 'fd, 'fj"); ++ break; ++ case FLOGB_D: ++ Format(instr, "flogb.d 'fd, 'fj"); ++ break; ++ case CLO_W: ++ Format(instr, "clo.w 'rd, 'rj"); ++ break; ++ case CTO_W: ++ Format(instr, "cto.w 'rd, 'rj"); ++ break; ++ case CLO_D: ++ Format(instr, "clo.d 'rd, 'rj"); ++ break; ++ case CTO_D: ++ Format(instr, "cto.d 'rd, 'rj"); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++int Decoder::InstructionDecode(byte* instr_ptr) { ++ Instruction* instr = Instruction::At(instr_ptr); ++ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ", ++ instr->InstructionBits()); ++ switch (instr->InstructionType()) { ++ case Instruction::kOp6Type: { ++ DecodeTypekOp6(instr); ++ break; ++ } ++ case Instruction::kOp7Type: { ++ DecodeTypekOp7(instr); ++ break; ++ } ++ case Instruction::kOp8Type: { ++ DecodeTypekOp8(instr); ++ break; ++ } ++ case Instruction::kOp10Type: { ++ DecodeTypekOp10(instr); ++ break; ++ } ++ case Instruction::kOp12Type: { ++ DecodeTypekOp12(instr); ++ break; ++ } ++ case Instruction::kOp14Type: { ++ DecodeTypekOp14(instr); ++ break; ++ } ++ case Instruction::kOp17Type: { ++ return DecodeTypekOp17(instr); ++ } ++ case Instruction::kOp22Type: { ++ DecodeTypekOp22(instr); ++ break; ++ } ++ case Instruction::kUnsupported: { ++ Format(instr, "UNSUPPORTED"); ++ break; ++ } ++ default: { ++ Format(instr, "UNSUPPORTED"); ++ break; ++ } ++ } ++ return kInstrSize; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++//------------------------------------------------------------------------------ ++ ++namespace disasm { ++ ++const char* NameConverter::NameOfAddress(byte* addr) const { ++ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast(addr)); ++ return tmp_buffer_.begin(); ++} ++ ++const char* NameConverter::NameOfConstant(byte* addr) const { ++ return NameOfAddress(addr); ++} ++ ++const char* NameConverter::NameOfCPURegister(int reg) const { ++ return v8::internal::Registers::Name(reg); ++} ++ ++const char* NameConverter::NameOfXMMRegister(int reg) const { ++ return v8::internal::FPURegisters::Name(reg); ++} ++ ++const char* NameConverter::NameOfByteCPURegister(int reg) const { ++ UNREACHABLE(); ++ return "nobytereg"; ++} ++ ++const char* NameConverter::NameInCode(byte* addr) const { ++ // The default name converter is called for unknown code. So we will not try ++ // to access any memory. ++ return ""; ++} ++ ++//------------------------------------------------------------------------------ ++ ++int Disassembler::InstructionDecode(v8::internal::Vector buffer, ++ byte* instruction) { ++ v8::internal::Decoder d(converter_, buffer); ++ return d.InstructionDecode(instruction); ++} ++ ++int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; } ++ ++void Disassembler::Disassemble(FILE* f, byte* begin, byte* end, ++ UnimplementedOpcodeAction unimplemented_action) { ++ NameConverter converter; ++ Disassembler d(converter, unimplemented_action); ++ for (byte* pc = begin; pc < end;) { ++ v8::internal::EmbeddedVector buffer; ++ buffer[0] = '\0'; ++ byte* prev_pc = pc; ++ pc += d.InstructionDecode(buffer, pc); ++ v8::internal::PrintF(f, "%p %08x %s\n", static_cast(prev_pc), ++ *reinterpret_cast(prev_pc), buffer.begin()); ++ } ++} ++ ++#undef STRING_STARTS_WITH ++ ++} // namespace disasm ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h +index dbe78ddf..71f12991 100644 +--- a/deps/v8/src/diagnostics/perf-jit.h ++++ b/deps/v8/src/diagnostics/perf-jit.h +@@ -83,6 +83,7 @@ class PerfJitLogger : public CodeEventLogger { + static const uint32_t kElfMachARM = 40; + static const uint32_t kElfMachMIPS = 8; + static const uint32_t kElfMachMIPS64 = 8; ++ static const uint32_t kElfMachLOONG64 = 258; + static const uint32_t kElfMachARM64 = 183; + static const uint32_t kElfMachS390x = 22; + static const uint32_t kElfMachPPC64 = 21; +@@ -98,6 +99,8 @@ class PerfJitLogger : public CodeEventLogger { + return kElfMachMIPS; + #elif V8_TARGET_ARCH_MIPS64 + return kElfMachMIPS64; ++#elif V8_TARGET_ARCH_LOONG64 ++ return kElfMachLOONG64; + #elif V8_TARGET_ARCH_ARM64 + return kElfMachARM64; + #elif V8_TARGET_ARCH_S390X +diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h +index 4809eeca..39fc6343 100644 +--- a/deps/v8/src/execution/frame-constants.h ++++ b/deps/v8/src/execution/frame-constants.h +@@ -389,6 +389,8 @@ inline static int FrameSlotToFPOffset(int slot) { + #include "src/execution/mips/frame-constants-mips.h" // NOLINT + #elif V8_TARGET_ARCH_MIPS64 + #include "src/execution/mips64/frame-constants-mips64.h" // NOLINT ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/execution/loong64/frame-constants-loong64.h" // NOLINT + #elif V8_TARGET_ARCH_S390 + #include "src/execution/s390/frame-constants-s390.h" // NOLINT + #else +diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.cc b/deps/v8/src/execution/loong64/frame-constants-loong64.cc +new file mode 100644 +index 00000000..21925d03 +--- /dev/null ++++ b/deps/v8/src/execution/loong64/frame-constants-loong64.cc +@@ -0,0 +1,32 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/codegen/loong64/assembler-loong64-inl.h" ++#include "src/execution/frame-constants.h" ++#include "src/execution/frames.h" ++ ++#include "src/execution/loong64/frame-constants-loong64.h" ++ ++namespace v8 { ++namespace internal { ++ ++Register JavaScriptFrame::fp_register() { return v8::internal::fp; } ++Register JavaScriptFrame::context_register() { return cp; } ++Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); } ++ ++int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) { ++ return register_count; ++} ++ ++int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) { ++ USE(register_count); ++ return 0; ++} ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.h b/deps/v8/src/execution/loong64/frame-constants-loong64.h +new file mode 100644 +index 00000000..a11fedfb +--- /dev/null ++++ b/deps/v8/src/execution/loong64/frame-constants-loong64.h +@@ -0,0 +1,75 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_ ++#define V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_ ++ ++#include "src/base/bits.h" ++#include "src/base/macros.h" ++#include "src/execution/frame-constants.h" ++ ++namespace v8 { ++namespace internal { ++ ++class EntryFrameConstants : public AllStatic { ++ public: ++ // This is the offset to where JSEntry pushes the current value of ++ // Isolate::c_entry_fp onto the stack. ++ static constexpr int kCallerFPOffset = ++ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); ++}; ++ ++class WasmCompileLazyFrameConstants : public TypedFrameConstants { ++ public: ++ static constexpr int kNumberOfSavedGpParamRegs = 7; ++ static constexpr int kNumberOfSavedFpParamRegs = 7; ++ ++ // FP-relative. ++ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(7); ++ static constexpr int kFixedFrameSizeFromFp = ++ TypedFrameConstants::kFixedFrameSizeFromFp + ++ kNumberOfSavedGpParamRegs * kPointerSize + ++ kNumberOfSavedFpParamRegs * kDoubleSize; ++}; ++ ++// Frame constructed by the {WasmDebugBreak} builtin. ++// After pushing the frame type marker, the builtin pushes all Liftoff cache ++// registers (see liftoff-assembler-defs.h). ++class WasmDebugBreakFrameConstants : public TypedFrameConstants { ++ public: ++ // {a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, t6, t7, t8} ++ static constexpr uint32_t kPushedGpRegs = 0b111111111111111110000; ++ // {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26} ++ static constexpr uint32_t kPushedFpRegs = 0b101010101010101010101010101; ++ ++ static constexpr int kNumPushedGpRegisters = ++ base::bits::CountPopulation(kPushedGpRegs); ++ static constexpr int kNumPushedFpRegisters = ++ base::bits::CountPopulation(kPushedFpRegs); ++ ++ static constexpr int kLastPushedGpRegisterOffset = ++ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize; ++ static constexpr int kLastPushedFpRegisterOffset = ++ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize; ++ ++ // Offsets are fp-relative. ++ static int GetPushedGpRegisterOffset(int reg_code) { ++ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code)); ++ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1); ++ return kLastPushedGpRegisterOffset + ++ base::bits::CountPopulation(lower_regs) * kSystemPointerSize; ++ } ++ ++ static int GetPushedFpRegisterOffset(int reg_code) { ++ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code)); ++ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1); ++ return kLastPushedFpRegisterOffset + ++ base::bits::CountPopulation(lower_regs) * kDoubleSize; ++ } ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_ +diff --git a/deps/v8/src/execution/loong64/simulator-loong64.cc b/deps/v8/src/execution/loong64/simulator-loong64.cc +new file mode 100644 +index 00000000..030d57f1 +--- /dev/null ++++ b/deps/v8/src/execution/loong64/simulator-loong64.cc +@@ -0,0 +1,5563 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#include "src/execution/loong64/simulator-loong64.h" ++ ++// Only build the simulator if not compiling for real LOONG64 hardware. ++#if defined(USE_SIMULATOR) ++ ++#include ++#include ++#include ++#include ++ ++#include "src/base/bits.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/diagnostics/disasm.h" ++#include "src/heap/combined-heap.h" ++#include "src/runtime/runtime-utils.h" ++#include "src/utils/ostreams.h" ++#include "src/utils/vector.h" ++ ++namespace v8 { ++namespace internal { ++ ++DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor, ++ Simulator::GlobalMonitor::Get) ++ ++// #define PRINT_SIM_LOG ++ ++// Util functions. ++inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); } ++ ++uint32_t get_fcsr_condition_bit(uint32_t cc) { ++ if (cc == 0) { ++ return 23; ++ } else { ++ return 24 + cc; ++ } ++} ++ ++static int64_t MultiplyHighSigned(int64_t u, int64_t v) { ++ uint64_t u0, v0, w0; ++ int64_t u1, v1, w1, w2, t; ++ ++ u0 = u & 0xFFFFFFFFL; ++ u1 = u >> 32; ++ v0 = v & 0xFFFFFFFFL; ++ v1 = v >> 32; ++ ++ w0 = u0 * v0; ++ t = u1 * v0 + (w0 >> 32); ++ w1 = t & 0xFFFFFFFFL; ++ w2 = t >> 32; ++ w1 = u0 * v1 + w1; ++ ++ return u1 * v1 + w2 + (w1 >> 32); ++} ++ ++static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) { ++ uint64_t u0, v0, w0; ++ uint64_t u1, v1, w1, w2, t; ++ ++ u0 = u & 0xFFFFFFFFL; ++ u1 = u >> 32; ++ v0 = v & 0xFFFFFFFFL; ++ v1 = v >> 32; ++ ++ w0 = u0 * v0; ++ t = u1 * v0 + (w0 >> 32); ++ w1 = t & 0xFFFFFFFFL; ++ w2 = t >> 32; ++ w1 = u0 * v1 + w1; ++ ++ return u1 * v1 + w2 + (w1 >> 32); ++} ++ ++#ifdef PRINT_SIM_LOG ++inline void printf_instr(const char* _Format, ...) { ++ va_list varList; ++ va_start(varList, _Format); ++ vprintf(_Format, varList); ++ va_end(varList); ++} ++#else ++#define printf_instr(...) ++#endif ++ ++// This macro provides a platform independent use of sscanf. The reason for ++// SScanF not being implemented in a platform independent was through ++// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time ++// Library does not provide vsscanf. ++#define SScanF sscanf // NOLINT ++ ++// The Loong64Debugger class is used by the simulator while debugging simulated ++// code. ++class Loong64Debugger { ++ public: ++ explicit Loong64Debugger(Simulator* sim) : sim_(sim) {} ++ ++ void Stop(Instruction* instr); ++ void Debug(); ++ // Print all registers with a nice formatting. ++ void PrintAllRegs(); ++ void PrintAllRegsIncludingFPU(); ++ ++ private: ++ // We set the breakpoint code to 0xFFFF to easily recognize it. ++ static const Instr kBreakpointInstr = BREAK | 0xFFFF; ++ static const Instr kNopInstr = 0x0; ++ ++ Simulator* sim_; ++ ++ int64_t GetRegisterValue(int regnum); ++ int64_t GetFPURegisterValue(int regnum); ++ float GetFPURegisterValueFloat(int regnum); ++ double GetFPURegisterValueDouble(int regnum); ++ bool GetValue(const char* desc, int64_t* value); ++ ++ // Set or delete a breakpoint. Returns true if successful. ++ bool SetBreakpoint(Instruction* breakpc); ++ bool DeleteBreakpoint(Instruction* breakpc); ++ ++ // Undo and redo all breakpoints. This is needed to bracket disassembly and ++ // execution to skip past breakpoints when run from the debugger. ++ void UndoBreakpoints(); ++ void RedoBreakpoints(); ++}; ++ ++inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); } ++ ++void Loong64Debugger::Stop(Instruction* instr) { ++ // Get the stop code. ++ uint32_t code = instr->Bits(25, 6); ++ PrintF("Simulator hit (%u)\n", code); ++ Debug(); ++} ++ ++int64_t Loong64Debugger::GetRegisterValue(int regnum) { ++ if (regnum == kNumSimuRegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_register(regnum); ++ } ++} ++ ++int64_t Loong64Debugger::GetFPURegisterValue(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register(regnum); ++ } ++} ++ ++float Loong64Debugger::GetFPURegisterValueFloat(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register_float(regnum); ++ } ++} ++ ++double Loong64Debugger::GetFPURegisterValueDouble(int regnum) { ++ if (regnum == kNumFPURegisters) { ++ return sim_->get_pc(); ++ } else { ++ return sim_->get_fpu_register_double(regnum); ++ } ++} ++ ++bool Loong64Debugger::GetValue(const char* desc, int64_t* value) { ++ int regnum = Registers::Number(desc); ++ int fpuregnum = FPURegisters::Number(desc); ++ ++ if (regnum != kInvalidRegister) { ++ *value = GetRegisterValue(regnum); ++ return true; ++ } else if (fpuregnum != kInvalidFPURegister) { ++ *value = GetFPURegisterValue(fpuregnum); ++ return true; ++ } else if (strncmp(desc, "0x", 2) == 0) { ++ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast(value)) == ++ 1; ++ } else { ++ return SScanF(desc, "%" SCNu64, reinterpret_cast(value)) == 1; ++ } ++ return false; ++} ++ ++bool Loong64Debugger::SetBreakpoint(Instruction* breakpc) { ++ // Check if a breakpoint can be set. If not return without any side-effects. ++ if (sim_->break_pc_ != nullptr) { ++ return false; ++ } ++ ++ // Set the breakpoint. ++ sim_->break_pc_ = breakpc; ++ sim_->break_instr_ = breakpc->InstructionBits(); ++ // Not setting the breakpoint instruction in the code itself. It will be set ++ // when the debugger shell continues. ++ return true; ++} ++ ++bool Loong64Debugger::DeleteBreakpoint(Instruction* breakpc) { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); ++ } ++ ++ sim_->break_pc_ = nullptr; ++ sim_->break_instr_ = 0; ++ return true; ++} ++ ++void Loong64Debugger::UndoBreakpoints() { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(sim_->break_instr_); ++ } ++} ++ ++void Loong64Debugger::RedoBreakpoints() { ++ if (sim_->break_pc_ != nullptr) { ++ sim_->break_pc_->SetInstructionBits(kBreakpointInstr); ++ } ++} ++ ++void Loong64Debugger::PrintAllRegs() { ++#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n) ++ ++ PrintF("\n"); ++ // at, v0, a0. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64 ++ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n", ++ REG_INFO(1), REG_INFO(2), REG_INFO(4)); ++ // v1, a1. ++ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ "", REG_INFO(3), REG_INFO(5)); ++ // a2. ++ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", ++ REG_INFO(6)); ++ // a3. ++ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "", ++ REG_INFO(7)); ++ PrintF("\n"); ++ // a4-t3, s0-s7 ++ for (int i = 0; i < 8; i++) { ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ REG_INFO(8 + i), REG_INFO(16 + i)); ++ } ++ PrintF("\n"); ++ // t8, k0, LO. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(24), REG_INFO(26), REG_INFO(32)); ++ // t9, k1, HI. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(25), REG_INFO(27), REG_INFO(33)); ++ // sp, fp, gp. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", ++ REG_INFO(29), REG_INFO(30), REG_INFO(28)); ++ // pc. ++ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64 ++ " %14" PRId64 " \n", ++ REG_INFO(31), REG_INFO(34)); ++ ++#undef REG_INFO ++} ++ ++void Loong64Debugger::PrintAllRegsIncludingFPU() { ++#define FPU_REG_INFO(n) \ ++ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n) ++ ++ PrintAllRegs(); ++ ++ PrintF("\n\n"); ++ // f0, f1, f2, ... f31. ++ // TODO(plind): consider printing 2 columns for space efficiency. ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30)); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31)); ++ ++#undef FPU_REG_INFO ++} ++ ++void Loong64Debugger::Debug() { ++ intptr_t last_pc = -1; ++ bool done = false; ++ ++#define COMMAND_SIZE 63 ++#define ARG_SIZE 255 ++ ++#define STR(a) #a ++#define XSTR(a) STR(a) ++ ++ char cmd[COMMAND_SIZE + 1]; ++ char arg1[ARG_SIZE + 1]; ++ char arg2[ARG_SIZE + 1]; ++ char* argv[3] = {cmd, arg1, arg2}; ++ ++ // Make sure to have a proper terminating character if reaching the limit. ++ cmd[COMMAND_SIZE] = 0; ++ arg1[ARG_SIZE] = 0; ++ arg2[ARG_SIZE] = 0; ++ ++ // Undo all set breakpoints while running in the debugger shell. This will ++ // make them invisible to all commands. ++ UndoBreakpoints(); ++ ++ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) { ++ if (last_pc != sim_->get_pc()) { ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ dasm.InstructionDecode(buffer, reinterpret_cast(sim_->get_pc())); ++ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin()); ++ last_pc = sim_->get_pc(); ++ } ++ char* line = ReadLine("sim> "); ++ if (line == nullptr) { ++ break; ++ } else { ++ char* last_input = sim_->last_debugger_input(); ++ if (strcmp(line, "\n") == 0 && last_input != nullptr) { ++ line = last_input; ++ } else { ++ // Ownership is transferred to sim_; ++ sim_->set_last_debugger_input(line); ++ } ++ // Use sscanf to parse the individual parts of the command line. At the ++ // moment no command expects more than two parameters. ++ int argc = SScanF(line, ++ "%" XSTR(COMMAND_SIZE) "s " ++ "%" XSTR(ARG_SIZE) "s " ++ "%" XSTR(ARG_SIZE) "s", ++ cmd, arg1, arg2); ++ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { ++ Instruction* instr = reinterpret_cast(sim_->get_pc()); ++ if (!(instr->IsTrap()) || ++ instr->InstructionBits() == rtCallRedirInstr) { ++ sim_->InstructionDecode( ++ reinterpret_cast(sim_->get_pc())); ++ } else { ++ // Allow si to jump over generated breakpoints. ++ PrintF("/!\\ Jumping over generated breakpoint.\n"); ++ sim_->set_pc(sim_->get_pc() + kInstrSize); ++ } ++ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { ++ // Execute the one instruction we broke at with breakpoints disabled. ++ sim_->InstructionDecode(reinterpret_cast(sim_->get_pc())); ++ // Leave the debugger shell. ++ done = true; ++ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { ++ if (argc == 2) { ++ int64_t value; ++ double dvalue; ++ if (strcmp(arg1, "all") == 0) { ++ PrintAllRegs(); ++ } else if (strcmp(arg1, "allf") == 0) { ++ PrintAllRegsIncludingFPU(); ++ } else { ++ int regnum = Registers::Number(arg1); ++ int fpuregnum = FPURegisters::Number(arg1); ++ ++ if (regnum != kInvalidRegister) { ++ value = GetRegisterValue(regnum); ++ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value, ++ value); ++ } else if (fpuregnum != kInvalidFPURegister) { ++ value = GetFPURegisterValue(fpuregnum); ++ dvalue = GetFPURegisterValueDouble(fpuregnum); ++ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", ++ FPURegisters::Name(fpuregnum), value, dvalue); ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } ++ } else { ++ if (argc == 3) { ++ if (strcmp(arg2, "single") == 0) { ++ int64_t value; ++ float fvalue; ++ int fpuregnum = FPURegisters::Number(arg1); ++ ++ if (fpuregnum != kInvalidFPURegister) { ++ value = GetFPURegisterValue(fpuregnum); ++ value &= 0xFFFFFFFFUL; ++ fvalue = GetFPURegisterValueFloat(fpuregnum); ++ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue); ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } else { ++ PrintF("print single\n"); ++ } ++ } else { ++ PrintF("print or print single\n"); ++ } ++ } ++ } else if ((strcmp(cmd, "po") == 0) || ++ (strcmp(cmd, "printobject") == 0)) { ++ if (argc == 2) { ++ int64_t value; ++ StdoutStream os; ++ if (GetValue(arg1, &value)) { ++ Object obj(value); ++ os << arg1 << ": \n"; ++#ifdef DEBUG ++ obj.Print(os); ++ os << "\n"; ++#else ++ os << Brief(obj) << "\n"; ++#endif ++ } else { ++ os << arg1 << " unrecognized\n"; ++ } ++ } else { ++ PrintF("printobject \n"); ++ } ++ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 || ++ strcmp(cmd, "dump") == 0) { ++ int64_t* cur = nullptr; ++ int64_t* end = nullptr; ++ int next_arg = 1; ++ ++ if (strcmp(cmd, "stack") == 0) { ++ cur = reinterpret_cast(sim_->get_register(Simulator::sp)); ++ } else { // Command "mem". ++ int64_t value; ++ if (!GetValue(arg1, &value)) { ++ PrintF("%s unrecognized\n", arg1); ++ continue; ++ } ++ cur = reinterpret_cast(value); ++ next_arg++; ++ } ++ ++ int64_t words; ++ if (argc == next_arg) { ++ words = 10; ++ } else { ++ if (!GetValue(argv[next_arg], &words)) { ++ words = 10; ++ } ++ } ++ end = cur + words; ++ ++ bool skip_obj_print = (strcmp(cmd, "dump") == 0); ++ while (cur < end) { ++ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ", ++ reinterpret_cast(cur), *cur, *cur); ++ Object obj(*cur); ++ Heap* current_heap = sim_->isolate_->heap(); ++ if (!skip_obj_print) { ++ if (obj.IsSmi() || ++ IsValidHeapObject(current_heap, HeapObject::cast(obj))) { ++ PrintF(" ("); ++ if (obj.IsSmi()) { ++ PrintF("smi %d", Smi::ToInt(obj)); ++ } else { ++ obj.ShortPrint(); ++ } ++ PrintF(")"); ++ } ++ } ++ PrintF("\n"); ++ cur++; ++ } ++ ++ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) || ++ (strcmp(cmd, "di") == 0)) { ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ ++ byte* cur = nullptr; ++ byte* end = nullptr; ++ ++ if (argc == 1) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ end = cur + (10 * kInstrSize); ++ } else if (argc == 2) { ++ int regnum = Registers::Number(arg1); ++ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) { ++ // The argument is an address or a register name. ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(value); ++ // Disassemble 10 instructions at . ++ end = cur + (10 * kInstrSize); ++ } ++ } else { ++ // The argument is the number of instructions. ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ // Disassemble instructions. ++ end = cur + (value * kInstrSize); ++ } ++ } ++ } else { ++ int64_t value1; ++ int64_t value2; ++ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { ++ cur = reinterpret_cast(value1); ++ end = cur + (value2 * kInstrSize); ++ } ++ } ++ ++ while (cur < end) { ++ dasm.InstructionDecode(buffer, cur); ++ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), ++ buffer.begin()); ++ cur += kInstrSize; ++ } ++ } else if (strcmp(cmd, "gdb") == 0) { ++ PrintF("relinquishing control to gdb\n"); ++ v8::base::OS::DebugBreak(); ++ PrintF("regaining control from gdb\n"); ++ } else if (strcmp(cmd, "break") == 0) { ++ if (argc == 2) { ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ if (!SetBreakpoint(reinterpret_cast(value))) { ++ PrintF("setting breakpoint failed\n"); ++ } ++ } else { ++ PrintF("%s unrecognized\n", arg1); ++ } ++ } else { ++ PrintF("break
\n"); ++ } ++ } else if (strcmp(cmd, "del") == 0) { ++ if (!DeleteBreakpoint(nullptr)) { ++ PrintF("deleting breakpoint failed\n"); ++ } ++ } else if (strcmp(cmd, "flags") == 0) { ++ PrintF("No flags on LOONG64 !\n"); ++ } else if (strcmp(cmd, "stop") == 0) { ++ int64_t value; ++ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize; ++ Instruction* stop_instr = reinterpret_cast(stop_pc); ++ Instruction* msg_address = ++ reinterpret_cast(stop_pc + kInstrSize); ++ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) { ++ // Remove the current stop. ++ if (sim_->IsStopInstruction(stop_instr)) { ++ stop_instr->SetInstructionBits(kNopInstr); ++ msg_address->SetInstructionBits(kNopInstr); ++ } else { ++ PrintF("Not at debugger stop.\n"); ++ } ++ } else if (argc == 3) { ++ // Print information about all/the specified breakpoint(s). ++ if (strcmp(arg1, "info") == 0) { ++ if (strcmp(arg2, "all") == 0) { ++ PrintF("Stop information:\n"); ++ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; ++ i++) { ++ sim_->PrintStopInfo(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->PrintStopInfo(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } else if (strcmp(arg1, "enable") == 0) { ++ // Enable all/the specified breakpoint(s). ++ if (strcmp(arg2, "all") == 0) { ++ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; ++ i++) { ++ sim_->EnableStop(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->EnableStop(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } else if (strcmp(arg1, "disable") == 0) { ++ // Disable all/the specified breakpoint(s). ++ if (strcmp(arg2, "all") == 0) { ++ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; ++ i++) { ++ sim_->DisableStop(i); ++ } ++ } else if (GetValue(arg2, &value)) { ++ sim_->DisableStop(value); ++ } else { ++ PrintF("Unrecognized argument.\n"); ++ } ++ } ++ } else { ++ PrintF("Wrong usage. Use help command for more information.\n"); ++ } ++ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) { ++ // Print registers and disassemble. ++ PrintAllRegs(); ++ PrintF("\n"); ++ ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ v8::internal::EmbeddedVector buffer; ++ ++ byte* cur = nullptr; ++ byte* end = nullptr; ++ ++ if (argc == 1) { ++ cur = reinterpret_cast(sim_->get_pc()); ++ end = cur + (10 * kInstrSize); ++ } else if (argc == 2) { ++ int64_t value; ++ if (GetValue(arg1, &value)) { ++ cur = reinterpret_cast(value); ++ // no length parameter passed, assume 10 instructions ++ end = cur + (10 * kInstrSize); ++ } ++ } else { ++ int64_t value1; ++ int64_t value2; ++ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { ++ cur = reinterpret_cast(value1); ++ end = cur + (value2 * kInstrSize); ++ } ++ } ++ ++ while (cur < end) { ++ dasm.InstructionDecode(buffer, cur); ++ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), ++ buffer.begin()); ++ cur += kInstrSize; ++ } ++ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { ++ PrintF("cont\n"); ++ PrintF(" continue execution (alias 'c')\n"); ++ PrintF("stepi\n"); ++ PrintF(" step one instruction (alias 'si')\n"); ++ PrintF("print \n"); ++ PrintF(" print register content (alias 'p')\n"); ++ PrintF(" use register name 'all' to print all registers\n"); ++ PrintF("printobject \n"); ++ PrintF(" print an object from a register (alias 'po')\n"); ++ PrintF("stack []\n"); ++ PrintF(" dump stack content, default dump 10 words)\n"); ++ PrintF("mem
[]\n"); ++ PrintF(" dump memory content, default dump 10 words)\n"); ++ PrintF("dump []\n"); ++ PrintF( ++ " dump memory content without pretty printing JS objects, default " ++ "dump 10 words)\n"); ++ PrintF("flags\n"); ++ PrintF(" print flags\n"); ++ PrintF("disasm []\n"); ++ PrintF("disasm [
]\n"); ++ PrintF("disasm [[
] ]\n"); ++ PrintF(" disassemble code, default is 10 instructions\n"); ++ PrintF(" from pc (alias 'di')\n"); ++ PrintF("gdb\n"); ++ PrintF(" enter gdb\n"); ++ PrintF("break
\n"); ++ PrintF(" set a break point on the address\n"); ++ PrintF("del\n"); ++ PrintF(" delete the breakpoint\n"); ++ PrintF("stop feature:\n"); ++ PrintF(" Description:\n"); ++ PrintF(" Stops are debug instructions inserted by\n"); ++ PrintF(" the Assembler::stop() function.\n"); ++ PrintF(" When hitting a stop, the Simulator will\n"); ++ PrintF(" stop and give control to the Debugger.\n"); ++ PrintF(" All stop codes are watched:\n"); ++ PrintF(" - They can be enabled / disabled: the Simulator\n"); ++ PrintF(" will / won't stop when hitting them.\n"); ++ PrintF(" - The Simulator keeps track of how many times they \n"); ++ PrintF(" are met. (See the info command.) Going over a\n"); ++ PrintF(" disabled stop still increases its counter. \n"); ++ PrintF(" Commands:\n"); ++ PrintF(" stop info all/ : print infos about number \n"); ++ PrintF(" or all stop(s).\n"); ++ PrintF(" stop enable/disable all/ : enables / disables\n"); ++ PrintF(" all or number stop(s)\n"); ++ PrintF(" stop unstop\n"); ++ PrintF(" ignore the stop instruction at the current location\n"); ++ PrintF(" from now on\n"); ++ } else { ++ PrintF("Unknown command: %s\n", cmd); ++ } ++ } ++ } ++ ++ // Add all the breakpoints back to stop execution and enter the debugger ++ // shell when hit. ++ RedoBreakpoints(); ++ ++#undef COMMAND_SIZE ++#undef ARG_SIZE ++ ++#undef STR ++#undef XSTR ++} ++ ++bool Simulator::ICacheMatch(void* one, void* two) { ++ DCHECK_EQ(reinterpret_cast(one) & CachePage::kPageMask, 0); ++ DCHECK_EQ(reinterpret_cast(two) & CachePage::kPageMask, 0); ++ return one == two; ++} ++ ++static uint32_t ICacheHash(void* key) { ++ return static_cast(reinterpret_cast(key)) >> 2; ++} ++ ++static bool AllOnOnePage(uintptr_t start, size_t size) { ++ intptr_t start_page = (start & ~CachePage::kPageMask); ++ intptr_t end_page = ((start + size) & ~CachePage::kPageMask); ++ return start_page == end_page; ++} ++ ++void Simulator::set_last_debugger_input(char* input) { ++ DeleteArray(last_debugger_input_); ++ last_debugger_input_ = input; ++} ++ ++void Simulator::SetRedirectInstruction(Instruction* instruction) { ++ instruction->SetInstructionBits(rtCallRedirInstr); ++} ++ ++void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache, ++ void* start_addr, size_t size) { ++ int64_t start = reinterpret_cast(start_addr); ++ int64_t intra_line = (start & CachePage::kLineMask); ++ start -= intra_line; ++ size += intra_line; ++ size = ((size - 1) | CachePage::kLineMask) + 1; ++ int offset = (start & CachePage::kPageMask); ++ while (!AllOnOnePage(start, size - 1)) { ++ int bytes_to_flush = CachePage::kPageSize - offset; ++ FlushOnePage(i_cache, start, bytes_to_flush); ++ start += bytes_to_flush; ++ size -= bytes_to_flush; ++ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask); ++ offset = 0; ++ } ++ if (size != 0) { ++ FlushOnePage(i_cache, start, size); ++ } ++} ++ ++CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache, ++ void* page) { ++ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page)); ++ if (entry->value == nullptr) { ++ CachePage* new_page = new CachePage(); ++ entry->value = new_page; ++ } ++ return reinterpret_cast(entry->value); ++} ++ ++// Flush from start up to and not including start + size. ++void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache, ++ intptr_t start, size_t size) { ++ DCHECK_LE(size, CachePage::kPageSize); ++ DCHECK(AllOnOnePage(start, size - 1)); ++ DCHECK_EQ(start & CachePage::kLineMask, 0); ++ DCHECK_EQ(size & CachePage::kLineMask, 0); ++ void* page = reinterpret_cast(start & (~CachePage::kPageMask)); ++ int offset = (start & CachePage::kPageMask); ++ CachePage* cache_page = GetCachePage(i_cache, page); ++ char* valid_bytemap = cache_page->ValidityByte(offset); ++ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); ++} ++ ++void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache, ++ Instruction* instr) { ++ int64_t address = reinterpret_cast(instr); ++ void* page = reinterpret_cast(address & (~CachePage::kPageMask)); ++ void* line = reinterpret_cast(address & (~CachePage::kLineMask)); ++ int offset = (address & CachePage::kPageMask); ++ CachePage* cache_page = GetCachePage(i_cache, page); ++ char* cache_valid_byte = cache_page->ValidityByte(offset); ++ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); ++ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); ++ if (cache_hit) { ++ // Check that the data in memory matches the contents of the I-cache. ++ CHECK_EQ(0, memcmp(reinterpret_cast(instr), ++ cache_page->CachedData(offset), kInstrSize)); ++ } else { ++ // Cache miss. Load memory into the cache. ++ memcpy(cached_line, line, CachePage::kLineLength); ++ *cache_valid_byte = CachePage::LINE_VALID; ++ } ++} ++ ++Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { ++ // Set up simulator support first. Some of this information is needed to ++ // setup the architecture state. ++ stack_size_ = FLAG_sim_stack_size * KB; ++ stack_ = reinterpret_cast(malloc(stack_size_)); ++ pc_modified_ = false; ++ icount_ = 0; ++ break_count_ = 0; ++ break_pc_ = nullptr; ++ break_instr_ = 0; ++ ++ // Set up architecture state. ++ // All registers are initialized to zero to start with. ++ for (int i = 0; i < kNumSimuRegisters; i++) { ++ registers_[i] = 0; ++ } ++ for (int i = 0; i < kNumFPURegisters; i++) { ++ FPUregisters_[i] = 0; ++ } ++ for (int i = 0; i < kNumCFRegisters; i++) { ++ CFregisters_[i] = 0; ++ } ++ ++ FCSR_ = 0; ++ ++ // The sp is initialized to point to the bottom (high address) of the ++ // allocated stack area. To be safe in potential stack underflows we leave ++ // some buffer below. ++ registers_[sp] = reinterpret_cast(stack_) + stack_size_ - 64; ++ // The ra and pc are initialized to a known bad value that will cause an ++ // access violation if the simulator ever tries to execute it. ++ registers_[pc] = bad_ra; ++ registers_[ra] = bad_ra; ++ ++ last_debugger_input_ = nullptr; ++} ++ ++Simulator::~Simulator() { ++ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_); ++ free(stack_); ++} ++ ++// Get the active Simulator for the current thread. ++Simulator* Simulator::current(Isolate* isolate) { ++ v8::internal::Isolate::PerIsolateThreadData* isolate_data = ++ isolate->FindOrAllocatePerThreadDataForThisThread(); ++ DCHECK_NOT_NULL(isolate_data); ++ ++ Simulator* sim = isolate_data->simulator(); ++ if (sim == nullptr) { ++ // TODO(146): delete the simulator object when a thread/isolate goes away. ++ sim = new Simulator(isolate); ++ isolate_data->set_simulator(sim); ++ } ++ return sim; ++} ++ ++// Sets the register in the architecture state. It will also deal with updating ++// Simulator internal state for special registers such as PC. ++void Simulator::set_register(int reg, int64_t value) { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ if (reg == pc) { ++ pc_modified_ = true; ++ } ++ ++ // Zero register always holds 0. ++ registers_[reg] = (reg == 0) ? 0 : value; ++} ++ ++void Simulator::set_dw_register(int reg, const int* dbl) { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ registers_[reg] = dbl[1]; ++ registers_[reg] = registers_[reg] << 32; ++ registers_[reg] += dbl[0]; ++} ++ ++void Simulator::set_fpu_register(int fpureg, int64_t value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ FPUregisters_[fpureg] = value; ++} ++ ++void Simulator::set_fpu_register_word(int fpureg, int32_t value) { ++ // Set ONLY lower 32-bits, leaving upper bits untouched. ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ int32_t* pword; ++ pword = reinterpret_cast(&FPUregisters_[fpureg]); ++ ++ *pword = value; ++} ++ ++void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) { ++ // Set ONLY upper 32-bits, leaving lower bits untouched. ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ int32_t* phiword; ++ phiword = (reinterpret_cast(&FPUregisters_[fpureg])) + 1; ++ ++ *phiword = value; ++} ++ ++void Simulator::set_fpu_register_float(int fpureg, float value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ *bit_cast(&FPUregisters_[fpureg]) = value; ++} ++ ++void Simulator::set_fpu_register_double(int fpureg, double value) { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ *bit_cast(&FPUregisters_[fpureg]) = value; ++} ++ ++void Simulator::set_cf_register(int cfreg, bool value) { ++ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters)); ++ CFregisters_[cfreg] = value; ++} ++ ++// Get the register from the architecture state. This function does handle ++// the special case of accessing the PC register. ++int64_t Simulator::get_register(int reg) const { ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ if (reg == 0) ++ return 0; ++ else ++ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0); ++} ++ ++double Simulator::get_double_from_register_pair(int reg) { ++ // TODO(plind): bad ABI stuff, refactor or remove. ++ DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); ++ ++ double dm_val = 0.0; ++ // Read the bits from the unsigned integer register_[] array ++ // into the double precision floating point value and return it. ++ char buffer[sizeof(registers_[0])]; ++ memcpy(buffer, ®isters_[reg], sizeof(registers_[0])); ++ memcpy(&dm_val, buffer, sizeof(registers_[0])); ++ return (dm_val); ++} ++ ++int64_t Simulator::get_fpu_register(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return FPUregisters_[fpureg]; ++} ++ ++int32_t Simulator::get_fpu_register_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast(FPUregisters_[fpureg] & 0xFFFFFFFF); ++} ++ ++int32_t Simulator::get_fpu_register_signed_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast(FPUregisters_[fpureg] & 0xFFFFFFFF); ++} ++ ++int32_t Simulator::get_fpu_register_hi_word(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return static_cast((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF); ++} ++ ++float Simulator::get_fpu_register_float(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return *bit_cast(const_cast(&FPUregisters_[fpureg])); ++} ++ ++double Simulator::get_fpu_register_double(int fpureg) const { ++ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); ++ return *bit_cast(&FPUregisters_[fpureg]); ++} ++ ++bool Simulator::get_cf_register(int cfreg) const { ++ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters)); ++ return CFregisters_[cfreg]; ++} ++ ++// Runtime FP routines take up to two double arguments and zero ++// or one integer arguments. All are constructed here, ++// from a0-a3 or fa0 and fa1 (n64). ++void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { ++ const int fparg2 = f1; ++ *x = get_fpu_register_double(f0); ++ *y = get_fpu_register_double(fparg2); ++ *z = static_cast(get_register(a2)); ++} ++ ++// The return value is either in v0/v1 or f0. ++void Simulator::SetFpResult(const double& result) { ++ set_fpu_register_double(0, result); ++} ++ ++// Helper functions for setting and testing the FCSR register's bits. ++void Simulator::set_fcsr_bit(uint32_t cc, bool value) { ++ if (value) { ++ FCSR_ |= (1 << cc); ++ } else { ++ FCSR_ &= ~(1 << cc); ++ } ++} ++ ++bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); } ++ ++void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) { ++ FCSR_ |= mode & kFPURoundingModeMask; ++} ++ ++unsigned int Simulator::get_fcsr_rounding_mode() { ++ return FCSR_ & kFPURoundingModeMask; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round_error(double original, double rounded) { ++ bool ret = false; ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded > max_int32 || rounded < min_int32) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round64_error(double original, double rounded) { ++ bool ret = false; ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded >= max_int64 || rounded < min_int64) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round_error(float original, float rounded) { ++ bool ret = false; ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded > max_int32 || rounded < min_int32) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++void Simulator::set_fpu_register_word_invalid_result(float original, ++ float rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register_word(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result(float original, float rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result64(float original, ++ float rounded) { ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded >= max_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } else if (rounded < min_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_word_invalid_result(double original, ++ double rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register_word(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result(double original, ++ double rounded) { ++ double max_int32 = std::numeric_limits::max(); ++ double min_int32 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded > max_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResult); ++ } else if (rounded < min_int32) { ++ set_fpu_register(fd_reg(), kFPUInvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::set_fpu_register_invalid_result64(double original, ++ double rounded) { ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ if (std::isnan(original)) { ++ set_fpu_register(fd_reg(), 0); ++ } else if (rounded >= max_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResult); ++ } else if (rounded < min_int64) { ++ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++// Sets the rounding error codes in FCSR based on the result of the rounding. ++// Returns true if the operation was invalid. ++bool Simulator::set_fcsr_round64_error(float original, float rounded) { ++ bool ret = false; ++ // The value of INT64_MAX (2^63-1) can't be represented as double exactly, ++ // loading the most accurate representation into max_int64, which is 2^63. ++ double max_int64 = std::numeric_limits::max(); ++ double min_int64 = std::numeric_limits::min(); ++ ++ if (!std::isfinite(original) || !std::isfinite(rounded)) { ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ if (original != rounded) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ ++ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { ++ set_fcsr_bit(kFCSRUnderflowFlagBit, true); ++ ret = true; ++ } ++ ++ if (rounded >= max_int64 || rounded < min_int64) { ++ set_fcsr_bit(kFCSROverflowFlagBit, true); ++ // The reference is not really clear but it seems this is required: ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ ret = true; ++ } ++ ++ return ret; ++} ++ ++// For ftint instructions only ++void Simulator::round_according_to_fcsr(double toRound, double* rounded, ++ int32_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ // switch ((FCSR_ >> 8) & 3) { ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round64_according_to_fcsr(double toRound, double* rounded, ++ int64_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or. ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = std::trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round_according_to_fcsr(float toRound, float* rounded, ++ int32_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.f; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = std::trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++void Simulator::round64_according_to_fcsr(float toRound, float* rounded, ++ int64_t* rounded_int) { ++ // 0 RN (round to nearest): Round a result to the nearest ++ // representable value; if the result is exactly halfway between ++ // two representable values, round to zero. ++ ++ // 1 RZ (round toward zero): Round a result to the closest ++ // representable value whose absolute value is less than or. ++ // equal to the infinitely accurate result. ++ ++ // 2 RP (round up, or toward +infinity): Round a result to the ++ // next representable value up. ++ ++ // 3 RN (round down, or toward −infinity): Round a result to ++ // the next representable value down. ++ switch (FCSR_ & kFPURoundingModeMask) { ++ case kRoundToNearest: ++ *rounded = std::floor(toRound + 0.5); ++ *rounded_int = static_cast(*rounded); ++ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ *rounded_int -= 1; ++ *rounded -= 1.f; ++ } ++ break; ++ case kRoundToZero: ++ *rounded = trunc(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToPlusInf: ++ *rounded = std::ceil(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ case kRoundToMinusInf: ++ *rounded = std::floor(toRound); ++ *rounded_int = static_cast(*rounded); ++ break; ++ } ++} ++ ++// Raw access to the PC register. ++void Simulator::set_pc(int64_t value) { ++ pc_modified_ = true; ++ registers_[pc] = value; ++} ++ ++bool Simulator::has_bad_pc() const { ++ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc)); ++} ++ ++// Raw access to the PC register without the special adjustment when reading. ++int64_t Simulator::get_pc() const { return registers_[pc]; } ++ ++// TODO(plind): refactor this messy debug code when we do unaligned access. ++void Simulator::DieOrDebug() { ++ if ((1)) { // Flag for this was removed. ++ Loong64Debugger dbg(this); ++ dbg.Debug(); ++ } else { ++ base::OS::Abort(); ++ } ++} ++ ++void Simulator::TraceRegWr(int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ int64_t fmt_int64; ++ int32_t fmt_int32[2]; ++ float fmt_float[2]; ++ double fmt_double; ++ } v; ++ v.fmt_int64 = value; ++ ++ switch (t) { ++ case WORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 ++ " uint32:%" PRIu32, ++ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64 ++ " uint64:%" PRIu64, ++ value, icount_, value, value); ++ break; ++ case FLOAT: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e", ++ v.fmt_int64, icount_, v.fmt_float[0]); ++ break; ++ case DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e", ++ v.fmt_int64, icount_, v.fmt_double); ++ break; ++ case FLOAT_DOUBLE: ++ SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e", ++ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double); ++ break; ++ case WORD_DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32 ++ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64, ++ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0], ++ v.fmt_int64, v.fmt_int64); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++// TODO(plind): consider making icount_ printing a flag option. ++void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ union { ++ int64_t fmt_int64; ++ int32_t fmt_int32[2]; ++ float fmt_float[2]; ++ double fmt_double; ++ } v; ++ v.fmt_int64 = value; ++ ++ switch (t) { ++ case WORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") int32:%" PRId32 " uint32:%" PRIu32, ++ v.fmt_int64, addr, icount_, v.fmt_int32[0], v.fmt_int32[0]); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") int64:%" PRId64 " uint64:%" PRIu64, ++ value, addr, icount_, value, value); ++ break; ++ case FLOAT: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") flt:%e", ++ v.fmt_int64, addr, icount_, v.fmt_float[0]); ++ break; ++ case DOUBLE: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") dbl:%e", ++ v.fmt_int64, addr, icount_, v.fmt_double); ++ break; ++ case FLOAT_DOUBLE: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64 ++ ") flt:%e dbl:%e", ++ v.fmt_int64, addr, icount_, v.fmt_float[0], v.fmt_double); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (t) { ++ case BYTE: ++ SNPrintF(trace_buf_, ++ " %02" PRIx8 " --> [%016" PRIx64 "] (%" PRId64 ++ ")", ++ static_cast(value), addr, icount_); ++ break; ++ case HALF: ++ SNPrintF(trace_buf_, ++ " %04" PRIx16 " --> [%016" PRIx64 "] (%" PRId64 ++ ")", ++ static_cast(value), addr, icount_); ++ break; ++ case WORD: ++ SNPrintF(trace_buf_, ++ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case DWORD: ++ SNPrintF(trace_buf_, ++ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )", ++ value, addr, icount_); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMemRd(int64_t addr, T value) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (sizeof(T)) { ++ case 1: ++ SNPrintF(trace_buf_, ++ "%08" PRIx8 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int8:%" PRId8 " uint8:%" PRIu8, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 2: ++ SNPrintF(trace_buf_, ++ "%08" PRIx16 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int16:%" PRId16 " uint16:%" PRIu16, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 4: ++ SNPrintF(trace_buf_, ++ "%08" PRIx32 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int32:%" PRId32 " uint32:%" PRIu32, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ case 8: ++ SNPrintF(trace_buf_, ++ "%08" PRIx64 " <-- [%08" PRIx64 "] (%" PRIu64 ++ ") int64:%" PRId64 " uint64:%" PRIu64, ++ static_cast(value), addr, icount_, ++ static_cast(value), static_cast(value)); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++template ++void Simulator::TraceMemWr(int64_t addr, T value) { ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (sizeof(T)) { ++ case 1: ++ SNPrintF(trace_buf_, ++ " %02" PRIx8 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 2: ++ SNPrintF(trace_buf_, ++ " %04" PRIx16 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 4: ++ SNPrintF(trace_buf_, ++ "%08" PRIx32 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ case 8: ++ SNPrintF(trace_buf_, ++ "%16" PRIx64 " --> [%08" PRIx64 "] (%" PRIu64 ")", ++ static_cast(value), addr, icount_); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ } ++} ++ ++// TODO(plind): sign-extend and zero-extend not implmented properly ++// on all the ReadXX functions, I don't think re-interpret cast does it. ++int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /* if ((addr & 0x3) == 0)*/ { ++ local_monitor_.NotifyLoad(); ++ int32_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr), t); ++ return *ptr; ++ } ++ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ // if ((addr & 0x3) == 0) { ++ local_monitor_.NotifyLoad(); ++ uint32_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr), WORD); ++ return *ptr; ++ // } ++ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /*if ((addr & 0x3) == 0)*/ { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, WORD); ++ int* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++void Simulator::WriteConditionalW(int64_t addr, int32_t value, ++ Instruction* instr, int32_t rk_reg) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & 0x3) == 0) { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) && ++ GlobalMonitor::Get()->NotifyStoreConditional_Locked( ++ addr, &global_monitor_thread_)) { ++ local_monitor_.NotifyStore(); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, WORD); ++ int* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ set_register(rk_reg, 1); ++ } else { ++ set_register(rk_reg, 0); ++ } ++ return; ++ } ++ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++int64_t Simulator::Read2W(int64_t addr, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ " \n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /* if ((addr & kPointerAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyLoad(); ++ int64_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, *ptr); ++ return *ptr; ++ } ++ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ /*if ((addr & kPointerAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, DWORD); ++ int64_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, ++ // reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++void Simulator::WriteConditional2W(int64_t addr, int64_t value, ++ Instruction* instr, int32_t rk_reg) { ++ if (addr >= 0 && addr < 0x400) { ++ // This has to be a nullptr-dereference, drop into debugger. ++ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR ++ "\n", ++ addr, reinterpret_cast(instr)); ++ DieOrDebug(); ++ } ++ if ((addr & kPointerAlignmentMask) == 0) { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ if (local_monitor_.NotifyStoreConditional(addr, ++ TransactionSize::DoubleWord) && ++ GlobalMonitor::Get()->NotifyStoreConditional_Locked( ++ addr, &global_monitor_thread_)) { ++ local_monitor_.NotifyStore(); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, DWORD); ++ int64_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ set_register(rk_reg, 1); ++ } else { ++ set_register(rk_reg, 0); ++ } ++ return; ++ } ++ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr, ++ reinterpret_cast(instr)); ++ DieOrDebug(); ++} ++ ++double Simulator::ReadD(int64_t addr, Instruction* instr) { ++ /*if ((addr & kDoubleAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyLoad(); ++ double* ptr = reinterpret_cast(addr); ++ return *ptr; ++ } ++ // PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ // "\n", ++ // addr, reinterpret_cast(instr)); ++ // base::OS::Abort(); ++ // return 0; ++} ++ ++void Simulator::WriteD(int64_t addr, double value, Instruction* instr) { ++ /*if ((addr & kDoubleAlignmentMask) == 0)*/ { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ double* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ } ++ // PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ // "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyLoad(); ++ uint16_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++ // } ++ // PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64 ++ // " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++int16_t Simulator::ReadH(int64_t addr, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyLoad(); ++ int16_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++ // } ++ // PrintF("Unaligned signed halfword read at 0x%08" PRIx64 ++ // " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++ // return 0; ++} ++ ++void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, HALF); ++ uint16_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ // } ++ // PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64 ++ // " , pc=0x%08" V8PRIxPTR "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) { ++ // if ((addr & 1) == 0) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, HALF); ++ int16_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ return; ++ // } ++ // PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR ++ // "\n", ++ // addr, reinterpret_cast(instr)); ++ // DieOrDebug(); ++} ++ ++uint32_t Simulator::ReadBU(int64_t addr) { ++ local_monitor_.NotifyLoad(); ++ uint8_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr & 0xFF; ++} ++ ++int32_t Simulator::ReadB(int64_t addr) { ++ local_monitor_.NotifyLoad(); ++ int8_t* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, static_cast(*ptr)); ++ return *ptr; ++} ++ ++void Simulator::WriteB(int64_t addr, uint8_t value) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, BYTE); ++ uint8_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++} ++ ++void Simulator::WriteB(int64_t addr, int8_t value) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ TraceMemWr(addr, value, BYTE); ++ int8_t* ptr = reinterpret_cast(addr); ++ *ptr = value; ++} ++ ++template ++T Simulator::ReadMem(int64_t addr, Instruction* instr) { ++ int alignment_mask = (1 << sizeof(T)) - 1; ++ if ((addr & alignment_mask) == 0) { ++ local_monitor_.NotifyLoad(); ++ T* ptr = reinterpret_cast(addr); ++ TraceMemRd(addr, *ptr); ++ return *ptr; ++ } ++ PrintF("Unaligned read of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR ++ "\n", ++ sizeof(T), addr, reinterpret_cast(instr)); ++ base::OS::Abort(); ++ return 0; ++} ++ ++template ++void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) { ++ int alignment_mask = (1 << sizeof(T)) - 1; ++ if ((addr & alignment_mask) == 0) { ++ local_monitor_.NotifyStore(); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); ++ T* ptr = reinterpret_cast(addr); ++ *ptr = value; ++ TraceMemWr(addr, value); ++ return; ++ } ++ PrintF("Unaligned write of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR ++ "\n", ++ sizeof(T), addr, reinterpret_cast(instr)); ++ base::OS::Abort(); ++} ++ ++// Returns the limit of the stack area to enable checking for stack overflows. ++uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { ++ // The simulator uses a separate JS stack. If we have exhausted the C stack, ++ // we also drop down the JS limit to reflect the exhaustion on the JS stack. ++ if (GetCurrentStackPosition() < c_limit) { ++ return reinterpret_cast(get_sp()); ++ } ++ ++ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes ++ // to prevent overrunning the stack when pushing values. ++ return reinterpret_cast(stack_) + 1024; ++} ++ ++// Unsupported instructions use Format to print an error and stop execution. ++void Simulator::Format(Instruction* instr, const char* format) { ++ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n", ++ reinterpret_cast(instr), format); ++ UNIMPLEMENTED(); ++} ++ ++// Calls into the V8 runtime are based on this very simple interface. ++// Note: To be able to return two values from some calls the code in runtime.cc ++// uses the ObjectPair which is essentially two 32-bit values stuffed into a ++// 64-bit value. With the code below we assume that all runtime calls return ++// 64 bits of result. If they don't, the v1 result register contains a bogus ++// value, which is fine because it is caller-saved. ++ ++using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1, ++ int64_t arg2, int64_t arg3, ++ int64_t arg4, int64_t arg5, ++ int64_t arg6, int64_t arg7, ++ int64_t arg8, int64_t arg9); ++ ++// These prototypes handle the four types of FP calls. ++using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1); ++using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1); ++using SimulatorRuntimeFPCall = double (*)(double darg0); ++using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0); ++ ++// This signature supports direct call in to API function native callback ++// (refer to InvocationCallback in v8.h). ++using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0); ++using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1); ++ ++// This signature supports direct call to accessor getter callback. ++using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1); ++using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1, ++ void* arg2); ++ ++// Software interrupt instructions are used by the simulator to call into the ++// C-based V8 runtime. They are also used for debugging with simulator. ++void Simulator::SoftwareInterrupt() { ++ int32_t opcode_hi15 = instr_.Bits(31, 17); ++ CHECK_EQ(opcode_hi15, 0x15); ++ uint32_t code = instr_.Bits(14, 0); ++ // We first check if we met a call_rt_redirected. ++ if (instr_.InstructionBits() == rtCallRedirInstr) { ++ Redirection* redirection = Redirection::FromInstruction(instr_.instr()); ++ ++ int64_t* stack_pointer = reinterpret_cast(get_register(sp)); ++ ++ int64_t arg0 = get_register(a0); ++ int64_t arg1 = get_register(a1); ++ int64_t arg2 = get_register(a2); ++ int64_t arg3 = get_register(a3); ++ int64_t arg4 = get_register(a4); ++ int64_t arg5 = get_register(a5); ++ int64_t arg6 = get_register(a6); ++ int64_t arg7 = get_register(a7); ++ int64_t arg8 = stack_pointer[0]; ++ int64_t arg9 = stack_pointer[1]; ++ STATIC_ASSERT(kMaxCParameters == 10); ++ ++ bool fp_call = ++ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || ++ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); ++ ++ { ++ // With the hard floating point calling convention, double ++ // arguments are passed in FPU registers. Fetch the arguments ++ // from there and call the builtin using soft floating point ++ // convention. ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ arg0 = get_fpu_register(f0); ++ arg1 = get_fpu_register(f1); ++ arg2 = get_fpu_register(f2); ++ arg3 = get_fpu_register(f3); ++ break; ++ case ExternalReference::BUILTIN_FP_CALL: ++ arg0 = get_fpu_register(f0); ++ arg1 = get_fpu_register(f1); ++ break; ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ arg0 = get_fpu_register(f0); ++ arg1 = get_fpu_register(f1); ++ arg2 = get_register(a2); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ // This is dodgy but it works because the C entry stubs are never moved. ++ // See comment in codegen-arm.cc and bug 1242173. ++ int64_t saved_ra = get_register(ra); ++ ++ intptr_t external = ++ reinterpret_cast(redirection->external_function()); ++ ++ // Based on CpuFeatures::IsSupported(FPU), Loong64 will use either hardware ++ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this ++ // simulator. Soft-float has additional abstraction of ExternalReference, ++ // to support serialization. ++ if (fp_call) { ++ double dval0, dval1; // one or two double parameters ++ int32_t ival; // zero or one integer parameters ++ int64_t iresult = 0; // integer return value ++ double dresult = 0; // double return value ++ GetFpArgs(&dval0, &dval1, &ival); ++ SimulatorRuntimeCall generic_target = ++ reinterpret_cast(external); ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ PrintF("Call to host function at %p with args %f, %f", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0, dval1); ++ break; ++ case ExternalReference::BUILTIN_FP_CALL: ++ PrintF("Call to host function at %p with arg %f", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0); ++ break; ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ PrintF("Call to host function at %p with args %f, %d", ++ reinterpret_cast(FUNCTION_ADDR(generic_target)), ++ dval0, ival); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_COMPARE_CALL: { ++ SimulatorRuntimeCompareCall target = ++ reinterpret_cast(external); ++ iresult = target(dval0, dval1); ++ set_register(v0, static_cast(iresult)); ++ // set_register(v1, static_cast(iresult >> 32)); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_FP_CALL: { ++ SimulatorRuntimeFPFPCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0, dval1); ++ SetFpResult(dresult); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_CALL: { ++ SimulatorRuntimeFPCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0); ++ SetFpResult(dresult); ++ break; ++ } ++ case ExternalReference::BUILTIN_FP_INT_CALL: { ++ SimulatorRuntimeFPIntCall target = ++ reinterpret_cast(external); ++ dresult = target(dval0, ival); ++ SetFpResult(dresult); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ if (::v8::internal::FLAG_trace_sim) { ++ switch (redirection->type()) { ++ case ExternalReference::BUILTIN_COMPARE_CALL: ++ PrintF("Returned %08x\n", static_cast(iresult)); ++ break; ++ case ExternalReference::BUILTIN_FP_FP_CALL: ++ case ExternalReference::BUILTIN_FP_CALL: ++ case ExternalReference::BUILTIN_FP_INT_CALL: ++ PrintF("Returned %f\n", dresult); ++ break; ++ default: ++ UNREACHABLE(); ++ break; ++ } ++ } ++ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " \n", ++ reinterpret_cast(external), arg0); ++ } ++ SimulatorRuntimeDirectApiCall target = ++ reinterpret_cast(external); ++ target(arg0); ++ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " \n", ++ reinterpret_cast(external), arg0, arg1); ++ } ++ SimulatorRuntimeProfilingApiCall target = ++ reinterpret_cast(external); ++ target(arg0, Redirection::ReverseRedirection(arg1)); ++ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " \n", ++ reinterpret_cast(external), arg0, arg1); ++ } ++ SimulatorRuntimeDirectGetterCall target = ++ reinterpret_cast(external); ++ target(arg0, arg1); ++ } else if (redirection->type() == ++ ExternalReference::PROFILING_GETTER_CALL) { ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64 ++ " %08" PRIx64 " \n", ++ reinterpret_cast(external), arg0, arg1, arg2); ++ } ++ SimulatorRuntimeProfilingGetterCall target = ++ reinterpret_cast(external); ++ target(arg0, arg1, Redirection::ReverseRedirection(arg2)); ++ } else { ++ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL || ++ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR); ++ SimulatorRuntimeCall target = ++ reinterpret_cast(external); ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF( ++ "Call to host function at %p " ++ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 ++ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 ++ " , %08" PRIx64 " , %08" PRIx64 " \n", ++ reinterpret_cast(FUNCTION_ADDR(target)), arg0, arg1, arg2, ++ arg3, arg4, arg5, arg6, arg7, arg8, arg9); ++ } ++ ObjectPair result = ++ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); ++ set_register(v0, (int64_t)(result.x)); ++ set_register(v1, (int64_t)(result.y)); ++ } ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1), ++ get_register(v0)); ++ } ++ set_register(ra, saved_ra); ++ set_pc(get_register(ra)); ++ ++ } else if (code <= kMaxStopCode) { ++ if (IsWatchpoint(code)) { ++ PrintWatchpoint(code); ++ } else { ++ IncreaseStopCounter(code); ++ HandleStop(code, instr_.instr()); ++ } ++ } else { ++ // All remaining break_ codes, and all traps are handled here. ++ Loong64Debugger dbg(this); ++ dbg.Debug(); ++ } ++} ++ ++// Stop helper functions. ++bool Simulator::IsWatchpoint(uint64_t code) { ++ return (code <= kMaxWatchpointCode); ++} ++ ++void Simulator::PrintWatchpoint(uint64_t code) { ++ Loong64Debugger dbg(this); ++ ++break_count_; ++ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64 ++ " ) ----------" ++ "----------------------------------", ++ code, break_count_, icount_); ++ dbg.PrintAllRegs(); // Print registers and continue running. ++} ++ ++void Simulator::HandleStop(uint64_t code, Instruction* instr) { ++ // Stop if it is enabled, otherwise go on jumping over the stop ++ // and the message address. ++ if (IsEnabledStop(code)) { ++ Loong64Debugger dbg(this); ++ dbg.Stop(instr); ++ } ++} ++ ++bool Simulator::IsStopInstruction(Instruction* instr) { ++ int32_t opcode_hi15 = instr->Bits(31, 17); ++ uint32_t code = static_cast(instr->Bits(14, 0)); ++ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode && ++ code <= kMaxStopCode; ++} ++ ++bool Simulator::IsEnabledStop(uint64_t code) { ++ DCHECK_LE(code, kMaxStopCode); ++ DCHECK_GT(code, kMaxWatchpointCode); ++ return !(watched_stops_[code].count & kStopDisabledBit); ++} ++ ++void Simulator::EnableStop(uint64_t code) { ++ if (!IsEnabledStop(code)) { ++ watched_stops_[code].count &= ~kStopDisabledBit; ++ } ++} ++ ++void Simulator::DisableStop(uint64_t code) { ++ if (IsEnabledStop(code)) { ++ watched_stops_[code].count |= kStopDisabledBit; ++ } ++} ++ ++void Simulator::IncreaseStopCounter(uint64_t code) { ++ DCHECK_LE(code, kMaxStopCode); ++ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) { ++ PrintF("Stop counter for code %" PRId64 ++ " has overflowed.\n" ++ "Enabling this code and reseting the counter to 0.\n", ++ code); ++ watched_stops_[code].count = 0; ++ EnableStop(code); ++ } else { ++ watched_stops_[code].count++; ++ } ++} ++ ++// Print a stop status. ++void Simulator::PrintStopInfo(uint64_t code) { ++ if (code <= kMaxWatchpointCode) { ++ PrintF("That is a watchpoint, not a stop.\n"); ++ return; ++ } else if (code > kMaxStopCode) { ++ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1); ++ return; ++ } ++ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled"; ++ int32_t count = watched_stops_[code].count & ~kStopDisabledBit; ++ // Don't print the state of unused breakpoints. ++ if (count != 0) { ++ if (watched_stops_[code].desc) { ++ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n", ++ code, code, state, count, watched_stops_[code].desc); ++ } else { ++ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code, ++ code, state, count); ++ } ++ } ++} ++ ++void Simulator::SignalException(Exception e) { ++ FATAL("Error: Exception %i raised.", static_cast(e)); ++} ++ ++template ++static T FPAbs(T a); ++ ++template <> ++double FPAbs(double a) { ++ return fabs(a); ++} ++ ++template <> ++float FPAbs(float a) { ++ return fabsf(a); ++} ++ ++template ++static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) { ++ if (std::isnan(a) && std::isnan(b)) { ++ *result = a; ++ } else if (std::isnan(a)) { ++ *result = b; ++ } else if (std::isnan(b)) { ++ *result = a; ++ } else if (b == a) { ++ // Handle -0.0 == 0.0 case. ++ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax ++ // negates the result. ++ *result = std::signbit(b) - static_cast(kind) ? b : a; ++ } else { ++ return false; ++ } ++ return true; ++} ++ ++template ++static T FPUMin(T a, T b) { ++ T result; ++ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ return result; ++ } else { ++ return b < a ? b : a; ++ } ++} ++ ++template ++static T FPUMax(T a, T b) { ++ T result; ++ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) { ++ return result; ++ } else { ++ return b > a ? b : a; ++ } ++} ++ ++template ++static T FPUMinA(T a, T b) { ++ T result; ++ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ if (FPAbs(a) < FPAbs(b)) { ++ result = a; ++ } else if (FPAbs(b) < FPAbs(a)) { ++ result = b; ++ } else { ++ result = a < b ? a : b; ++ } ++ } ++ return result; ++} ++ ++template ++static T FPUMaxA(T a, T b) { ++ T result; ++ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { ++ if (FPAbs(a) > FPAbs(b)) { ++ result = a; ++ } else if (FPAbs(b) > FPAbs(a)) { ++ result = b; ++ } else { ++ result = a > b ? a : b; ++ } ++ } ++ return result; ++} ++ ++enum class KeepSign : bool { no = false, yes }; ++ ++template ::value, ++ int>::type = 0> ++T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) { ++ DCHECK(std::isnan(arg)); ++ T qNaN = std::numeric_limits::quiet_NaN(); ++ if (keepSign == KeepSign::yes) { ++ return std::copysign(qNaN, result); ++ } ++ return qNaN; ++} ++ ++template ++T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) { ++ if (std::isnan(first)) { ++ return FPUCanonalizeNaNArg(result, first, keepSign); ++ } ++ return result; ++} ++ ++template ++T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) { ++ if (std::isnan(first)) { ++ return FPUCanonalizeNaNArg(result, first, keepSign); ++ } ++ return FPUCanonalizeNaNArgs(result, keepSign, args...); ++} ++ ++template ++T FPUCanonalizeOperation(Func f, T first, Args... args) { ++ return FPUCanonalizeOperation(f, KeepSign::no, first, args...); ++} ++ ++template ++T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) { ++ T result = f(first, args...); ++ if (std::isnan(result)) { ++ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...); ++ } ++ return result; ++} ++ ++// Handle execution based on instruction types. ++void Simulator::DecodeTypeOp6() { ++ int64_t alu_out; ++ // Next pc. ++ int64_t next_pc = bad_ra; ++ ++ // Branch instructions common part. ++ auto BranchAndLinkHelper = [this, &next_pc]() { ++ int64_t current_pc = get_pc(); ++ set_register(ra, current_pc + kInstrSize); ++ int32_t offs26_low16 = ++ static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ int32_t offs26_high10 = static_cast(instr_.Bits(9, 0) << 22) >> 6; ++ int32_t offs26 = offs26_low16 | offs26_high10; ++ next_pc = current_pc + (offs26 << 2); ++ printf_instr("Offs26: %08x\n", offs26); ++ set_pc(next_pc); ++ }; ++ ++ auto BranchOff16Helper = [this, &next_pc](bool do_branch) { ++ int64_t current_pc = get_pc(); ++ int32_t offs16 = static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ printf_instr("Offs16: %08x\n", offs16); ++ int32_t offs = do_branch ? (offs16 << 2) : kInstrSize; ++ next_pc = current_pc + offs; ++ set_pc(next_pc); ++ }; ++ ++ auto BranchOff21Helper = [this, &next_pc](bool do_branch) { ++ int64_t current_pc = get_pc(); ++ int32_t offs21_low16 = ++ static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ int32_t offs21_high5 = static_cast(instr_.Bits(4, 0) << 27) >> 11; ++ int32_t offs = offs21_low16 | offs21_high5; ++ printf_instr("Offs21: %08x\n", offs); ++ offs = do_branch ? (offs << 2) : kInstrSize; ++ next_pc = current_pc + offs; ++ set_pc(next_pc); ++ }; ++ ++ auto BranchOff26Helper = [this, &next_pc]() { ++ int64_t current_pc = get_pc(); ++ int32_t offs26_low16 = ++ static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ int32_t offs26_high10 = static_cast(instr_.Bits(9, 0) << 22) >> 6; ++ int32_t offs26 = offs26_low16 | offs26_high10; ++ next_pc = current_pc + (offs26 << 2); ++ printf_instr("Offs26: %08x\n", offs26); ++ set_pc(next_pc); ++ }; ++ ++ auto JumpOff16Helper = [this, &next_pc]() { ++ int32_t offs16 = static_cast(instr_.Bits(25, 10) << 16) >> 16; ++ printf_instr("JIRL\t %s: %016lx, %s: %016lx, offs16: %x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), offs16); ++ set_register(rd_reg(), get_pc() + kInstrSize); ++ next_pc = rj() + (offs16 << 2); ++ set_pc(next_pc); ++ }; ++ ++ switch (instr_.Bits(31, 26) << 26) { ++ case ADDU16I_D: { ++ printf_instr("ADDU16I_D\t %s: %016lx, %s: %016lx, si16: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si16()); ++ int32_t si16_upper = static_cast(si16()) << 16; ++ alu_out = static_cast(si16_upper) + rj(); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BEQZ: ++ printf_instr("BEQZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj()); ++ BranchOff21Helper(rj() == 0); ++ break; ++ case BNEZ: ++ printf_instr("BNEZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj()); ++ BranchOff21Helper(rj() != 0); ++ break; ++ case BCZ: { ++ if (instr_.Bits(9, 8) == 0b00) { ++ // BCEQZ ++ printf_instr("BCEQZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False"); ++ BranchOff21Helper(cj() == false); ++ } else if (instr_.Bits(9, 8) == 0b01) { ++ // BCNEZ ++ printf_instr("BCNEZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False"); ++ BranchOff21Helper(cj() == true); ++ } else { ++ UNREACHABLE(); ++ } ++ break; ++ } ++ case JIRL: ++ JumpOff16Helper(); ++ break; ++ case B: ++ printf_instr("B\t "); ++ BranchOff26Helper(); ++ break; ++ case BL: ++ printf_instr("BL\t "); ++ BranchAndLinkHelper(); ++ break; ++ case BEQ: ++ printf_instr("BEQ\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() == rd()); ++ break; ++ case BNE: ++ printf_instr("BNE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() != rd()); ++ break; ++ case BLT: ++ printf_instr("BLT\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() < rd()); ++ break; ++ case BGE: ++ printf_instr("BGE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj() >= rd()); ++ break; ++ case BLTU: ++ printf_instr("BLTU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj_u() < rd_u()); ++ break; ++ case BGEU: ++ printf_instr("BGEU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()), ++ rj(), Registers::Name(rd_reg()), rd()); ++ BranchOff16Helper(rj_u() >= rd_u()); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp7() { ++ int64_t alu_out; ++ ++ switch (instr_.Bits(31, 25) << 25) { ++ case LU12I_W: { ++ printf_instr("LU12I_W\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_upper = static_cast(si20() << 12); ++ SetResult(rd_reg(), static_cast(si20_upper)); ++ break; ++ } ++ case LU32I_D: { ++ printf_instr("LU32I_D\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12) >> 12; ++ int64_t lower_32bit_mask = 0xFFFFFFFF; ++ alu_out = (static_cast(si20_signExtend) << 32) | ++ (rd() & lower_32bit_mask); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case PCADDI: { ++ printf_instr("PCADDI\t %s: %016lx, si20: %d\n", Registers::Name(rd_reg()), ++ rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12) >> 10; ++ int64_t current_pc = get_pc(); ++ alu_out = static_cast(si20_signExtend) + current_pc; ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case PCALAU12I: { ++ printf_instr("PCALAU12I\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12); ++ int64_t current_pc = get_pc(); ++ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000; ++ alu_out = static_cast(si20_signExtend) + current_pc; ++ SetResult(rd_reg(), alu_out & clear_lower12bit_mask); ++ break; ++ } ++ case PCADDU12I: { ++ printf_instr("PCADDU12I\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int32_t si20_signExtend = static_cast(si20() << 12); ++ int64_t current_pc = get_pc(); ++ alu_out = static_cast(si20_signExtend) + current_pc; ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case PCADDU18I: { ++ printf_instr("PCADDU18I\t %s: %016lx, si20: %d\n", ++ Registers::Name(rd_reg()), rd(), si20()); ++ int64_t si20_signExtend = (static_cast(si20()) << 44) >> 26; ++ int64_t current_pc = get_pc(); ++ alu_out = si20_signExtend + current_pc; ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp8() { ++ int64_t addr = 0x0; ++ int64_t si14_se = (static_cast(si14()) << 50) >> 48; ++ ++ switch (instr_.Bits(31, 24) << 24) { ++ case LDPTR_W: ++ printf_instr("LDPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ set_register(rd_reg(), ReadW(rj() + si14_se, instr_.instr())); ++ break; ++ case STPTR_W: ++ printf_instr("STPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ WriteW(rj() + si14_se, static_cast(rd()), instr_.instr()); ++ break; ++ case LDPTR_D: ++ printf_instr("LDPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ set_register(rd_reg(), Read2W(rj() + si14_se, instr_.instr())); ++ break; ++ case STPTR_D: ++ printf_instr("STPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ Write2W(rj() + si14_se, rd(), instr_.instr()); ++ break; ++ case LL_W: { ++ printf_instr("LL_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ addr = si14_se + rj(); ++ set_register(rd_reg(), ReadW(addr, instr_.instr())); ++ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr, ++ &global_monitor_thread_); ++ break; ++ } ++ case SC_W: { ++ printf_instr("SC_W\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ addr = si14_se + rj(); ++ WriteConditionalW(addr, static_cast(rd()), instr_.instr(), ++ rd_reg()); ++ break; ++ } ++ case LL_D: { ++ printf_instr("LL_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ addr = si14_se + rj(); ++ set_register(rd_reg(), Read2W(addr, instr_.instr())); ++ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr, ++ &global_monitor_thread_); ++ break; ++ } ++ case SC_D: { ++ printf_instr("SC_D\t %s: %016lx, %s: %016lx, si14: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si14_se); ++ addr = si14_se + rj(); ++ WriteConditional2W(addr, rd(), instr_.instr(), rd_reg()); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp10() { ++ int64_t alu_out = 0x0; ++ int64_t si12_se = (static_cast(si12()) << 52) >> 52; ++ uint64_t si12_ze = (static_cast(ui12()) << 52) >> 52; ++ ++ switch (instr_.Bits(31, 22) << 22) { ++ case BSTR_W: { ++ CHECK_EQ(instr_.Bit(21), 1); ++ uint8_t lsbw_ = lsbw(); ++ uint8_t msbw_ = msbw(); ++ CHECK_LE(lsbw_, msbw_); ++ uint8_t size = msbw_ - lsbw_ + 1; ++ uint64_t mask = (1ULL << size) - 1; ++ if (instr_.Bit(15) == 0) { ++ // BSTRINS_W ++ printf_instr( ++ "BSTRINS_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbw_, lsbw_); ++ alu_out = static_cast((rd_u() & ~(mask << lsbw_)) | ++ ((rj_u() & mask) << lsbw_)); ++ } else { ++ // BSTRPICK_W ++ printf_instr( ++ "BSTRPICK_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbw_, lsbw_); ++ alu_out = static_cast((rj_u() & (mask << lsbw_)) >> lsbw_); ++ } ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BSTRINS_D: { ++ uint8_t lsbd_ = lsbd(); ++ uint8_t msbd_ = msbd(); ++ CHECK_LE(lsbd_, msbd_); ++ printf_instr( ++ "BSTRINS_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbd_, lsbd_); ++ uint8_t size = msbd_ - lsbd_ + 1; ++ if (size < 64) { ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = (rd_u() & ~(mask << lsbd_)) | ((rj_u() & mask) << lsbd_); ++ SetResult(rd_reg(), alu_out); ++ } else if (size == 64) { ++ SetResult(rd_reg(), rj()); ++ } ++ break; ++ } ++ case BSTRPICK_D: { ++ uint8_t lsbd_ = lsbd(); ++ uint8_t msbd_ = msbd(); ++ CHECK_LE(lsbd_, msbd_); ++ printf_instr( ++ "BSTRPICK_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(), ++ msbd_, lsbd_); ++ uint8_t size = msbd_ - lsbd_ + 1; ++ if (size < 64) { ++ uint64_t mask = (1ULL << size) - 1; ++ alu_out = (rj_u() & (mask << lsbd_)) >> lsbd_; ++ SetResult(rd_reg(), alu_out); ++ } else if (size == 64) { ++ SetResult(rd_reg(), rj()); ++ } ++ break; ++ } ++ case SLTI: ++ printf_instr("SLTI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ SetResult(rd_reg(), rj() < si12_se ? 1 : 0); ++ break; ++ case SLTUI: ++ printf_instr("SLTUI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ SetResult(rd_reg(), rj_u() < static_cast(si12_se) ? 1 : 0); ++ break; ++ case ADDI_W: { ++ printf_instr("ADDI_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ int32_t alu32_out = ++ static_cast(rj()) + static_cast(si12_se); ++ SetResult(rd_reg(), alu32_out); ++ break; ++ } ++ case ADDI_D: ++ printf_instr("ADDI_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ SetResult(rd_reg(), rj() + si12_se); ++ break; ++ case LU52I_D: { ++ printf_instr("LU52I_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_se); ++ int64_t si12_se = static_cast(si12()) << 52; ++ uint64_t mask = (1ULL << 52) - 1; ++ alu_out = si12_se + (rj() & mask); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case ANDI: ++ printf_instr("ANDI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ SetResult(rd_reg(), rj() & si12_ze); ++ break; ++ case ORI: ++ printf_instr("ORI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ SetResult(rd_reg(), rj_u() | si12_ze); ++ break; ++ case XORI: ++ printf_instr("XORI\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ SetResult(rd_reg(), rj_u() ^ si12_ze); ++ break; ++ case LD_B: ++ printf_instr("LD_B\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadB(rj() + si12_se)); ++ break; ++ case LD_H: ++ printf_instr("LD_H\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadH(rj() + si12_se, instr_.instr())); ++ break; ++ case LD_W: ++ printf_instr("LD_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadW(rj() + si12_se, instr_.instr())); ++ break; ++ case LD_D: ++ printf_instr("LD_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), Read2W(rj() + si12_se, instr_.instr())); ++ break; ++ case ST_B: ++ printf_instr("ST_B\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ WriteB(rj() + si12_se, static_cast(rd())); ++ break; ++ case ST_H: ++ printf_instr("ST_H\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ WriteH(rj() + si12_se, static_cast(rd()), instr_.instr()); ++ break; ++ case ST_W: ++ printf_instr("ST_W\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ WriteW(rj() + si12_se, static_cast(rd()), instr_.instr()); ++ break; ++ case ST_D: ++ printf_instr("ST_D\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ Write2W(rj() + si12_se, rd(), instr_.instr()); ++ break; ++ case LD_BU: ++ printf_instr("LD_BU\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadBU(rj() + si12_se)); ++ break; ++ case LD_HU: ++ printf_instr("LD_HU\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadHU(rj() + si12_se, instr_.instr())); ++ break; ++ case LD_WU: ++ printf_instr("LD_WU\t %s: %016lx, %s: %016lx, si12: %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), si12_ze); ++ set_register(rd_reg(), ReadWU(rj() + si12_se, instr_.instr())); ++ break; ++ case FLD_S: { ++ printf_instr("FLD_S\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits. ++ set_fpu_register_word( ++ fd_reg(), ReadW(rj() + si12_se, instr_.instr(), FLOAT_DOUBLE)); ++ break; ++ } ++ case FST_S: { ++ printf_instr("FST_S\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ int32_t alu_out_32 = static_cast(get_fpu_register(fd_reg())); ++ WriteW(rj() + si12_se, alu_out_32, instr_.instr()); ++ break; ++ } ++ case FLD_D: { ++ printf_instr("FLD_D\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ set_fpu_register_double(fd_reg(), ReadD(rj() + si12_se, instr_.instr())); ++ TraceMemRd(rj() + si12_se, get_fpu_register(fd_reg()), DOUBLE); ++ break; ++ } ++ case FST_D: { ++ printf_instr("FST_D\t %s: %016f, %s: %016lx, si12: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), si12_ze); ++ WriteD(rj() + si12_se, get_fpu_register_double(fd_reg()), instr_.instr()); ++ TraceMemWr(rj() + si12_se, get_fpu_register(fd_reg()), DWORD); ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp12() { ++ switch (instr_.Bits(31, 20) << 20) { ++ case FMADD_S: ++ printf_instr("FMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), std::fma(fj_float(), fk_float(), fa_float())); ++ break; ++ case FMADD_D: ++ printf_instr("FMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(fj_double(), fk_double(), fa_double())); ++ break; ++ case FMSUB_S: ++ printf_instr("FMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), ++ std::fma(fj_float(), fk_float(), -fa_float())); ++ break; ++ case FMSUB_D: ++ printf_instr("FMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(fj_double(), fk_double(), -fa_double())); ++ break; ++ case FNMADD_S: ++ printf_instr("FNMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), ++ std::fma(-fj_float(), fk_float(), -fa_float())); ++ break; ++ case FNMADD_D: ++ printf_instr("FNMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(-fj_double(), fk_double(), -fa_double())); ++ break; ++ case FNMSUB_S: ++ printf_instr("FNMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fk_reg()), fk_float(), ++ FPURegisters::Name(fa_reg()), fa_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), ++ std::fma(-fj_float(), fk_float(), fa_float())); ++ break; ++ case FNMSUB_D: ++ printf_instr("FNMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fk_reg()), fk_double(), ++ FPURegisters::Name(fa_reg()), fa_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), ++ std::fma(-fj_double(), fk_double(), fa_double())); ++ break; ++ case FCMP_COND_S: { ++ CHECK_EQ(instr_.Bits(4, 3), 0); ++ float fj = fj_float(); ++ float fk = fk_float(); ++ switch (cond()) { ++ case CAF: { ++ printf_instr("FCMP_CAF_S fcc%d\n", cd_reg()); ++ set_cf_register(cd_reg(), false); ++ break; ++ } ++ case CUN: { ++ printf_instr("FCMP_CUN_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CEQ: { ++ printf_instr("FCMP_CEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj == fk); ++ break; ++ } ++ case CUEQ: { ++ printf_instr("FCMP_CUEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj == fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLT: { ++ printf_instr("FCMP_CLT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj < fk); ++ break; ++ } ++ case CULT: { ++ printf_instr("FCMP_CULT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj < fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLE: { ++ printf_instr("FCMP_CLE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj <= fk); ++ break; ++ } ++ case CULE: { ++ printf_instr("FCMP_CULE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj <= fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CNE: { ++ printf_instr("FCMP_CNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), (fj < fk) || (fj > fk)); ++ break; ++ } ++ case COR: { ++ printf_instr("FCMP_COR_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk)); ++ break; ++ } ++ case CUNE: { ++ printf_instr("FCMP_CUNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj != fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case SAF: ++ case SUN: ++ case SEQ: ++ case SUEQ: ++ case SLT: ++ case SULT: ++ case SLE: ++ case SULE: ++ case SNE: ++ case SOR: ++ case SUNE: ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++ case FCMP_COND_D: { ++ CHECK_EQ(instr_.Bits(4, 3), 0); ++ double fj = fj_double(); ++ double fk = fk_double(); ++ switch (cond()) { ++ case CAF: { ++ printf_instr("FCMP_CAF_D fcc%d\n", cd_reg()); ++ set_cf_register(cd_reg(), false); ++ break; ++ } ++ case CUN: { ++ printf_instr("FCMP_CUN_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CEQ: { ++ printf_instr("FCMP_CEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj == fk); ++ break; ++ } ++ case CUEQ: { ++ printf_instr("FCMP_CUEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj == fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLT: { ++ printf_instr("FCMP_CLT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj < fk); ++ break; ++ } ++ case CULT: { ++ printf_instr("FCMP_CULT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj < fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CLE: { ++ printf_instr("FCMP_CLE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), fj <= fk); ++ break; ++ } ++ case CULE: { ++ printf_instr("FCMP_CULE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj <= fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case CNE: { ++ printf_instr("FCMP_CNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), (fj < fk) || (fj > fk)); ++ break; ++ } ++ case COR: { ++ printf_instr("FCMP_COR_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk)); ++ break; ++ } ++ case CUNE: { ++ printf_instr("FCMP_CUNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(), ++ FPURegisters::Name(fj_reg()), fj, ++ FPURegisters::Name(fk_reg()), fk); ++ set_cf_register(cd_reg(), ++ (fj != fk) || std::isnan(fj) || std::isnan(fk)); ++ break; ++ } ++ case SAF: ++ case SUN: ++ case SEQ: ++ case SUEQ: ++ case SLT: ++ case SULT: ++ case SLE: ++ case SULE: ++ case SNE: ++ case SOR: ++ case SUNE: ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++ break; ++ } ++ case FSEL: { ++ CHECK_EQ(instr_.Bits(19, 18), 0); ++ printf_instr("FSEL fcc%d, %s: %016f, %s: %016f, %s: %016f\n", ca_reg(), ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ if (ca() == 0) { ++ SetFPUDoubleResult(fd_reg(), fj_double()); ++ } else { ++ SetFPUDoubleResult(fd_reg(), fk_double()); ++ } ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp14() { ++ int64_t alu_out = 0x0; ++ int32_t alu32_out = 0x0; ++ ++ switch (instr_.Bits(31, 18) << 18) { ++ case ALSL: { ++ uint8_t sa = sa2() + 1; ++ alu32_out = ++ (static_cast(rj()) << sa) + static_cast(rk()); ++ if (instr_.Bit(17) == 0) { ++ // ALSL_W ++ printf_instr("ALSL_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ SetResult(rd_reg(), alu32_out); ++ } else { ++ // ALSL_WU ++ printf_instr("ALSL_WU\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } ++ break; ++ } ++ case BYTEPICK_W: { ++ CHECK_EQ(instr_.Bit(17), 0); ++ printf_instr("BYTEPICK_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ uint8_t sa = sa2() * 8; ++ if (sa == 0) { ++ alu32_out = static_cast(rk()); ++ } else { ++ int32_t mask = (1 << 31) >> (sa - 1); ++ int32_t rk_hi = (static_cast(rk()) & (~mask)) << sa; ++ int32_t rj_lo = (static_cast(rj()) & mask) >> (32 - sa); ++ alu32_out = rk_hi | rj_lo; ++ } ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ break; ++ } ++ case BYTEPICK_D: { ++ printf_instr("BYTEPICK_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa3: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa3()); ++ uint8_t sa = sa3() * 8; ++ if (sa == 0) { ++ alu_out = rk(); ++ } else { ++ int64_t mask = (1ULL << 63) >> (sa - 1); ++ int64_t rk_hi = (rk() & (~mask)) << sa; ++ int64_t rj_lo = (rj() & mask) >> (64 - sa); ++ alu_out = rk_hi | rj_lo; ++ } ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case ALSL_D: { ++ printf_instr("ALSL_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk(), sa2()); ++ CHECK_EQ(instr_.Bit(17), 0); ++ uint8_t sa = sa2() + 1; ++ alu_out = (rj() << sa) + rk(); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case SLLI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // SLLI_W ++ printf_instr("SLLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast(rj()) << ui5(); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if ((instr_.Bits(17, 16) == 0b01)) { ++ // SLLI_D ++ printf_instr("SLLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ SetResult(rd_reg(), rj() << ui6()); ++ } ++ break; ++ } ++ case SRLI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // SRLI_W ++ printf_instr("SRLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast(rj()) >> ui5(); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if (instr_.Bits(17, 16) == 0b01) { ++ // SRLI_D ++ printf_instr("SRLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ SetResult(rd_reg(), rj_u() >> ui6()); ++ } ++ break; ++ } ++ case SRAI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // SRAI_W ++ printf_instr("SRAI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast(rj()) >> ui5(); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if (instr_.Bits(17, 16) == 0b01) { ++ // SRAI_D ++ printf_instr("SRAI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ SetResult(rd_reg(), rj() >> ui6()); ++ } ++ break; ++ } ++ case ROTRI: { ++ DCHECK_EQ(instr_.Bit(17), 0); ++ if (instr_.Bits(17, 15) == 0b001) { ++ // ROTRI_W ++ printf_instr("ROTRI_W\t %s: %016lx, %s: %016lx, ui5: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui5()); ++ alu32_out = static_cast( ++ base::bits::RotateRight32(static_cast(rj_u()), ++ static_cast(ui5()))); ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ } else if (instr_.Bits(17, 16) == 0b01) { ++ // ROTRI_D ++ printf_instr("ROTRI_D\t %s: %016lx, %s: %016lx, ui6: %d\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), ui6()); ++ alu_out = ++ static_cast(base::bits::RotateRight64(rj_u(), ui6())); ++ SetResult(rd_reg(), alu_out); ++ printf_instr("ROTRI, %s, %s, %d\n", Registers::Name(rd_reg()), ++ Registers::Name(rj_reg()), ui6()); ++ } ++ break; ++ } ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp17() { ++ int64_t alu_out; ++ ++ switch (instr_.Bits(31, 15) << 15) { ++ case ADD_W: { ++ printf_instr("ADD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t alu32_out = static_cast(rj() + rk()); ++ // Sign-extend result of 32bit operation into 64bit register. ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ break; ++ } ++ case ADD_D: ++ printf_instr("ADD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() + rk()); ++ break; ++ case SUB_W: { ++ printf_instr("SUB_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t alu32_out = static_cast(rj() - rk()); ++ // Sign-extend result of 32bit operation into 64bit register. ++ SetResult(rd_reg(), static_cast(alu32_out)); ++ break; ++ } ++ case SUB_D: ++ printf_instr("SUB_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() - rk()); ++ break; ++ case SLT: ++ printf_instr("SLT\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() < rk() ? 1 : 0); ++ break; ++ case SLTU: ++ printf_instr("SLTU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj_u() < rk_u() ? 1 : 0); ++ break; ++ case MASKEQZ: ++ printf_instr("MASKEQZ\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rk() == 0 ? rj() : 0); ++ break; ++ case MASKNEZ: ++ printf_instr("MASKNEZ\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rk() != 0 ? rj() : 0); ++ break; ++ case NOR: ++ printf_instr("NOR\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), ~(rj() | rk())); ++ break; ++ case AND: ++ printf_instr("AND\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() & rk()); ++ break; ++ case OR: ++ printf_instr("OR\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() | rk()); ++ break; ++ case XOR: ++ printf_instr("XOR\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() ^ rk()); ++ break; ++ case ORN: ++ printf_instr("ORN\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() | (~rk())); ++ break; ++ case ANDN: ++ printf_instr("ANDN\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() & (~rk())); ++ break; ++ case SLL_W: ++ printf_instr("SLL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), (int32_t)rj() << (rk_u() % 32)); ++ break; ++ case SRL_W: { ++ printf_instr("SRL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast((uint32_t)rj_u() >> (rk_u() % 32)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case SRA_W: ++ printf_instr("SRA_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), (int32_t)rj() >> (rk_u() % 32)); ++ break; ++ case SLL_D: ++ printf_instr("SLL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() << (rk_u() % 64)); ++ break; ++ case SRL_D: { ++ printf_instr("SRL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast(rj_u() >> (rk_u() % 64)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case SRA_D: ++ printf_instr("SRA_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() >> (rk_u() % 64)); ++ break; ++ case ROTR_W: { ++ printf_instr("ROTR_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast( ++ base::bits::RotateRight32(static_cast(rj_u()), ++ static_cast(rk_u() % 32))); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case ROTR_D: { ++ printf_instr("ROTR_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast( ++ base::bits::RotateRight64((rj_u()), (rk_u() % 64))); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case MUL_W: { ++ printf_instr("MUL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ alu_out = static_cast(rj()) * static_cast(rk()); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case MULH_W: { ++ printf_instr("MULH_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t rj_lo = static_cast(rj()); ++ int32_t rk_lo = static_cast(rk()); ++ alu_out = static_cast(rj_lo) * static_cast(rk_lo); ++ SetResult(rd_reg(), alu_out >> 32); ++ break; ++ } ++ case MULH_WU: { ++ printf_instr("MULH_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint32_t rj_lo = static_cast(rj_u()); ++ uint32_t rk_lo = static_cast(rk_u()); ++ alu_out = static_cast(rj_lo) * static_cast(rk_lo); ++ SetResult(rd_reg(), alu_out >> 32); ++ break; ++ } ++ case MUL_D: ++ printf_instr("MUL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), rj() * rk()); ++ break; ++ case MULH_D: ++ printf_instr("MULH_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), MultiplyHighSigned(rj(), rk())); ++ break; ++ case MULH_DU: ++ printf_instr("MULH_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ SetResult(rd_reg(), MultiplyHighUnsigned(rj_u(), rk_u())); ++ break; ++ case MULW_D_W: { ++ printf_instr("MULW_D_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int64_t rj_i32 = static_cast(rj()); ++ int64_t rk_i32 = static_cast(rk()); ++ SetResult(rd_reg(), rj_i32 * rk_i32); ++ break; ++ } ++ case MULW_D_WU: { ++ printf_instr("MULW_D_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint64_t rj_u32 = static_cast(rj_u()); ++ uint64_t rk_u32 = static_cast(rk_u()); ++ SetResult(rd_reg(), rj_u32 * rk_u32); ++ break; ++ } ++ case DIV_W: { ++ printf_instr("DIV_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t rj_i32 = static_cast(rj()); ++ int32_t rk_i32 = static_cast(rk()); ++ if (rj_i32 == INT_MIN && rk_i32 == -1) { ++ SetResult(rd_reg(), INT_MIN); ++ } else if (rk_i32 != 0) { ++ SetResult(rd_reg(), rj_i32 / rk_i32); ++ } ++ break; ++ } ++ case MOD_W: { ++ printf_instr("MOD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ int32_t rj_i32 = static_cast(rj()); ++ int32_t rk_i32 = static_cast(rk()); ++ if (rj_i32 == INT_MIN && rk_i32 == -1) { ++ SetResult(rd_reg(), 0); ++ } else if (rk_i32 != 0) { ++ SetResult(rd_reg(), rj_i32 % rk_i32); ++ } ++ break; ++ } ++ case DIV_WU: { ++ printf_instr("DIV_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint32_t rj_u32 = static_cast(rj()); ++ uint32_t rk_u32 = static_cast(rk()); ++ if (rk_u32 != 0) { ++ SetResult(rd_reg(), static_cast(rj_u32 / rk_u32)); ++ } ++ break; ++ } ++ case MOD_WU: { ++ printf_instr("MOD_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ uint32_t rj_u32 = static_cast(rj()); ++ uint32_t rk_u32 = static_cast(rk()); ++ if (rk_u32 != 0) { ++ SetResult(rd_reg(), static_cast(rj_u32 % rk_u32)); ++ } ++ break; ++ } ++ case DIV_D: { ++ printf_instr("DIV_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rj() == LONG_MIN && rk() == -1) { ++ SetResult(rd_reg(), LONG_MIN); ++ } else if (rk() != 0) { ++ SetResult(rd_reg(), rj() / rk()); ++ } ++ break; ++ } ++ case MOD_D: { ++ printf_instr("MOD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rj() == LONG_MIN && rk() == -1) { ++ SetResult(rd_reg(), 0); ++ } else if (rk() != 0) { ++ SetResult(rd_reg(), rj() % rk()); ++ } ++ break; ++ } ++ case DIV_DU: { ++ printf_instr("DIV_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rk_u() != 0) { ++ SetResult(rd_reg(), static_cast(rj_u() / rk_u())); ++ } ++ break; ++ } ++ case MOD_DU: { ++ printf_instr("MOD_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ if (rk_u() != 0) { ++ SetResult(rd_reg(), static_cast(rj_u() % rk_u())); ++ } ++ break; ++ } ++ case BREAK: ++ printf_instr("BREAK\t code: %x\n", instr_.Bits(14, 0)); ++ SoftwareInterrupt(); ++ break; ++ case FADD_S: { ++ printf_instr("FADD_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FADD_D: { ++ printf_instr("FADD_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs + rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FSUB_S: { ++ printf_instr("FSUB_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FSUB_D: { ++ printf_instr("FSUB_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs - rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FMUL_S: { ++ printf_instr("FMUL_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FMUL_D: { ++ printf_instr("FMUL_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs * rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FDIV_S: { ++ printf_instr("FDIV_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult( ++ fd_reg(), ++ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; }, ++ fj_float(), fk_float())); ++ break; ++ } ++ case FDIV_D: { ++ printf_instr("FDIV_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), ++ FPUCanonalizeOperation( ++ [](double lhs, double rhs) { return lhs / rhs; }, ++ fj_double(), fk_double())); ++ break; ++ } ++ case FMAX_S: ++ printf_instr("FMAX_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMax(fk_float(), fj_float())); ++ break; ++ case FMAX_D: ++ printf_instr("FMAX_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMax(fk_double(), fj_double())); ++ break; ++ case FMIN_S: ++ printf_instr("FMIN_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMin(fk_float(), fj_float())); ++ break; ++ case FMIN_D: ++ printf_instr("FMIN_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMin(fk_double(), fj_double())); ++ break; ++ case FMAXA_S: ++ printf_instr("FMAXA_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMaxA(fk_float(), fj_float())); ++ break; ++ case FMAXA_D: ++ printf_instr("FMAXA_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMaxA(fk_double(), fj_double())); ++ break; ++ case FMINA_S: ++ printf_instr("FMINA_S\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float(), ++ FPURegisters::Name(fk_reg()), fk_float()); ++ SetFPUFloatResult(fd_reg(), FPUMinA(fk_float(), fj_float())); ++ break; ++ case FMINA_D: ++ printf_instr("FMINA_D\t %s: %016f, %s, %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double(), ++ FPURegisters::Name(fk_reg()), fk_double()); ++ SetFPUDoubleResult(fd_reg(), FPUMinA(fk_double(), fj_double())); ++ break; ++ case LDX_B: ++ printf_instr("LDX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadB(rj() + rk())); ++ break; ++ case LDX_H: ++ printf_instr("LDX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadH(rj() + rk(), instr_.instr())); ++ break; ++ case LDX_W: ++ printf_instr("LDX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadW(rj() + rk(), instr_.instr())); ++ break; ++ case LDX_D: ++ printf_instr("LDX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), Read2W(rj() + rk(), instr_.instr())); ++ break; ++ case STX_B: ++ printf_instr("STX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ WriteB(rj() + rk(), static_cast(rd())); ++ break; ++ case STX_H: ++ printf_instr("STX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ WriteH(rj() + rk(), static_cast(rd()), instr_.instr()); ++ break; ++ case STX_W: ++ printf_instr("STX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ WriteW(rj() + rk(), static_cast(rd()), instr_.instr()); ++ break; ++ case STX_D: ++ printf_instr("STX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ Write2W(rj() + rk(), rd(), instr_.instr()); ++ break; ++ case LDX_BU: ++ printf_instr("LDX_BU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadBU(rj() + rk())); ++ break; ++ case LDX_HU: ++ printf_instr("LDX_HU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadHU(rj() + rk(), instr_.instr())); ++ break; ++ case LDX_WU: ++ printf_instr("LDX_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj(), Registers::Name(rk_reg()), rk()); ++ set_register(rd_reg(), ReadWU(rj() + rk(), instr_.instr())); ++ break; ++ case FLDX_S: ++ printf_instr("FLDX_S\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits. ++ set_fpu_register_word(fd_reg(), ++ ReadW(rj() + rk(), instr_.instr(), FLOAT_DOUBLE)); ++ break; ++ case FLDX_D: ++ printf_instr("FLDX_D\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ set_fpu_register_double(fd_reg(), ReadD(rj() + rk(), instr_.instr())); ++ break; ++ case FSTX_S: ++ printf_instr("FSTX_S\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ WriteW(rj() + rk(), static_cast(get_fpu_register(fd_reg())), ++ instr_.instr()); ++ break; ++ case FSTX_D: ++ printf_instr("FSTX_D\t %s: %016f, %s: %016lx, %s: %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()), ++ rk()); ++ WriteD(rj() + rk(), get_fpu_register_double(fd_reg()), instr_.instr()); ++ break; ++ case AMSWAP_W: ++ printf("Sim UNIMPLEMENTED: AMSWAP_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMSWAP_D: ++ printf("Sim UNIMPLEMENTED: AMSWAP_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMADD_W: ++ printf("Sim UNIMPLEMENTED: AMADD_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMADD_D: ++ printf("Sim UNIMPLEMENTED: AMADD_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMAND_W: ++ printf("Sim UNIMPLEMENTED: AMAND_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMAND_D: ++ printf("Sim UNIMPLEMENTED: AMAND_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMOR_W: ++ printf("Sim UNIMPLEMENTED: AMOR_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMOR_D: ++ printf("Sim UNIMPLEMENTED: AMOR_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMXOR_W: ++ printf("Sim UNIMPLEMENTED: AMXOR_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMXOR_D: ++ printf("Sim UNIMPLEMENTED: AMXOR_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_W: ++ printf("Sim UNIMPLEMENTED: AMMAX_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_D: ++ printf("Sim UNIMPLEMENTED: AMMAX_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_W: ++ printf("Sim UNIMPLEMENTED: AMMIN_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_D: ++ printf("Sim UNIMPLEMENTED: AMMIN_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_WU: ++ printf("Sim UNIMPLEMENTED: AMMAX_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DU: ++ printf("Sim UNIMPLEMENTED: AMMAX_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_WU: ++ printf("Sim UNIMPLEMENTED: AMMIN_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DU: ++ printf("Sim UNIMPLEMENTED: AMMIN_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMSWAP_DB_W: { ++ printf_instr("AMSWAP_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), static_cast(rk()), instr_.instr(), ++ rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMSWAP_DB_D: { ++ printf_instr("AMSWAP_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMADD_DB_W: { ++ printf_instr("AMADD_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) + ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMADD_DB_D: { ++ printf_instr("AMADD_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() + rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMAND_DB_W: { ++ printf_instr("AMAND_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) & ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMAND_DB_D: { ++ printf_instr("AMAND_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() & rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMOR_DB_W: { ++ printf_instr("AMOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) | ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMOR_DB_D: { ++ printf_instr("AMOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() | rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMXOR_DB_W: { ++ printf_instr("AMXOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int32_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), ReadW(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditionalW(rj(), ++ static_cast(static_cast(rk()) ^ ++ static_cast(rd())), ++ instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMXOR_DB_D: { ++ printf_instr("AMXOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()), ++ rk(), Registers::Name(rj_reg()), rj()); ++ int64_t rdvalue; ++ do { ++ { ++ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); ++ set_register(rd_reg(), Read2W(rj(), instr_.instr())); ++ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord); ++ GlobalMonitor::Get()->NotifyLoadLinked_Locked( ++ rj(), &global_monitor_thread_); ++ } ++ rdvalue = get_register(rd_reg()); ++ WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), rd_reg()); ++ } while (!get_register(rd_reg())); ++ set_register(rd_reg(), rdvalue); ++ } break; ++ case AMMAX_DB_W: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DB_D: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_W: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_D: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DB_WU: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMAX_DB_DU: ++ printf("Sim UNIMPLEMENTED: AMMAX_DB_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_WU: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_WU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case AMMIN_DB_DU: ++ printf("Sim UNIMPLEMENTED: AMMIN_DB_DU\n"); ++ UNIMPLEMENTED(); ++ break; ++ case DBAR: ++ printf_instr("DBAR\n"); ++ break; ++ case IBAR: ++ printf("Sim UNIMPLEMENTED: IBAR\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSCALEB_S: ++ printf("Sim UNIMPLEMENTED: FSCALEB_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FSCALEB_D: ++ printf("Sim UNIMPLEMENTED: FSCALEB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCOPYSIGN_S: ++ printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCOPYSIGN_D: ++ printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void Simulator::DecodeTypeOp22() { ++ int64_t alu_out; ++ ++ switch (instr_.Bits(31, 10) << 10) { ++ case CLZ_W: { ++ printf_instr("CLZ_W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountLeadingZeros32(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case CTZ_W: { ++ printf_instr("CTZ_W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountTrailingZeros32(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case CLZ_D: { ++ printf_instr("CLZ_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountLeadingZeros64(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case CTZ_D: { ++ printf_instr("CTZ_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = base::bits::CountTrailingZeros64(static_cast(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_2H: { ++ printf_instr("REVB_2H\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint32_t input = static_cast(rj()); ++ uint64_t output = 0; ++ ++ uint32_t mask = 0xFF000000; ++ for (int i = 0; i < 4; i++) { ++ uint32_t tmp = mask & input; ++ if (i % 2 == 0) { ++ tmp = tmp >> 8; ++ } else { ++ tmp = tmp << 8; ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(static_cast(output)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_4H: { ++ printf_instr("REVB_4H\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFF00000000000000; ++ for (int i = 0; i < 8; i++) { ++ uint64_t tmp = mask & input; ++ if (i % 2 == 0) { ++ tmp = tmp >> 8; ++ } else { ++ tmp = tmp << 8; ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_2W: { ++ printf_instr("REVB_2W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFF000000FF000000; ++ for (int i = 0; i < 4; i++) { ++ uint64_t tmp = mask & input; ++ if (i <= 1) { ++ tmp = tmp >> (24 - i * 16); ++ } else { ++ tmp = tmp << (i * 16 - 24); ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVB_D: { ++ printf_instr("REVB_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFF00000000000000; ++ for (int i = 0; i < 8; i++) { ++ uint64_t tmp = mask & input; ++ if (i <= 3) { ++ tmp = tmp >> (56 - i * 16); ++ } else { ++ tmp = tmp << (i * 16 - 56); ++ } ++ output = output | tmp; ++ mask = mask >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVH_2W: { ++ printf_instr("REVH_2W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFFFF000000000000; ++ for (int i = 0; i < 4; i++) { ++ uint64_t tmp = mask & input; ++ if (i % 2 == 0) { ++ tmp = tmp >> 16; ++ } else { ++ tmp = tmp << 16; ++ } ++ output = output | tmp; ++ mask = mask >> 16; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case REVH_D: { ++ printf_instr("REVH_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ ++ uint64_t mask = 0xFFFF000000000000; ++ for (int i = 0; i < 4; i++) { ++ uint64_t tmp = mask & input; ++ if (i <= 1) { ++ tmp = tmp >> (48 - i * 32); ++ } else { ++ tmp = tmp << (i * 32 - 48); ++ } ++ output = output | tmp; ++ mask = mask >> 16; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_4B: { ++ printf_instr("BITREV_4B\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint32_t input = static_cast(rj()); ++ uint32_t output = 0; ++ uint8_t i_byte, o_byte; ++ ++ // Reverse the bit in byte for each individual byte ++ for (int i = 0; i < 4; i++) { ++ output = output >> 8; ++ i_byte = input & 0xFF; ++ ++ // Fast way to reverse bits in byte ++ // Devised by Sean Anderson, July 13, 2001 ++ o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | ++ (i_byte * 0x8020LU & 0x88440LU)) * ++ 0x10101LU >> ++ 16); ++ ++ output = output | (static_cast(o_byte << 24)); ++ input = input >> 8; ++ } ++ ++ alu_out = static_cast(static_cast(output)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_8B: { ++ printf_instr("BITREV_8B\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint64_t input = rj_u(); ++ uint64_t output = 0; ++ uint8_t i_byte, o_byte; ++ ++ // Reverse the bit in byte for each individual byte ++ for (int i = 0; i < 8; i++) { ++ output = output >> 8; ++ i_byte = input & 0xFF; ++ ++ // Fast way to reverse bits in byte ++ // Devised by Sean Anderson, July 13, 2001 ++ o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | ++ (i_byte * 0x8020LU & 0x88440LU)) * ++ 0x10101LU >> ++ 16); ++ ++ output = output | (static_cast(o_byte) << 56); ++ input = input >> 8; ++ } ++ ++ alu_out = static_cast(output); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_W: { ++ printf_instr("BITREV_W\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint32_t input = static_cast(rj()); ++ uint32_t output = 0; ++ output = base::bits::ReverseBits(input); ++ alu_out = static_cast(static_cast(output)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case BITREV_D: { ++ printf_instr("BITREV_D\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ alu_out = static_cast(base::bits::ReverseBits(rj_u())); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case EXT_W_B: { ++ printf_instr("EXT_W_B\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint8_t input = static_cast(rj()); ++ alu_out = static_cast(static_cast(input)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case EXT_W_H: { ++ printf_instr("EXT_W_H\t %s: %016lx, %s, %016lx\n", ++ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), ++ rj()); ++ uint16_t input = static_cast(rj()); ++ alu_out = static_cast(static_cast(input)); ++ SetResult(rd_reg(), alu_out); ++ break; ++ } ++ case FABS_S: ++ printf_instr("FABS_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), std::abs(fj_float())); ++ break; ++ case FABS_D: ++ printf_instr("FABS_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), std::abs(fj_double())); ++ break; ++ case FNEG_S: ++ printf_instr("FNEG_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), -fj_float()); ++ break; ++ case FNEG_D: ++ printf_instr("FNEG_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUDoubleResult(fd_reg(), -fj_double()); ++ break; ++ case FSQRT_S: { ++ printf_instr("FSQRT_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ if (fj_float() >= 0) { ++ SetFPUFloatResult(fd_reg(), std::sqrt(fj_float())); ++ } else { ++ SetFPUFloatResult(fd_reg(), std::sqrt(-1)); // qnan ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ } ++ break; ++ } ++ case FSQRT_D: { ++ printf_instr("FSQRT_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ if (fj_double() >= 0) { ++ SetFPUDoubleResult(fd_reg(), std::sqrt(fj_double())); ++ } else { ++ SetFPUDoubleResult(fd_reg(), std::sqrt(-1)); // qnan ++ set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ++ } ++ break; ++ } ++ case FMOV_S: ++ printf_instr("FMOV_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUFloatResult(fd_reg(), fj_float()); ++ break; ++ case FMOV_D: ++ printf_instr("FMOV_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_float(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUDoubleResult(fd_reg(), fj_double()); ++ break; ++ case MOVGR2FR_W: { ++ printf_instr("MOVGR2FR_W\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj()); ++ set_fpu_register_word(fd_reg(), static_cast(rj())); ++ TraceRegWr(get_fpu_register(fd_reg()), FLOAT_DOUBLE); ++ break; ++ } ++ case MOVGR2FR_D: ++ printf_instr("MOVGR2FR_D\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj()); ++ SetFPUResult2(fd_reg(), rj()); ++ break; ++ case MOVGR2FRH_W: { ++ printf_instr("MOVGR2FRH_W\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ Registers::Name(rj_reg()), rj()); ++ set_fpu_register_hi_word(fd_reg(), static_cast(rj())); ++ TraceRegWr(get_fpu_register(fd_reg()), DOUBLE); ++ break; ++ } ++ case MOVFR2GR_S: { ++ printf_instr("MOVFR2GR_S\t %s: %016lx, %s, %016f\n", ++ Registers::Name(rd_reg()), rd(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ set_register(rd_reg(), ++ static_cast(get_fpu_register_word(fj_reg()))); ++ TraceRegWr(get_register(rd_reg()), WORD_DWORD); ++ break; ++ } ++ case MOVFR2GR_D: ++ printf_instr("MOVFR2GR_D\t %s: %016lx, %s, %016f\n", ++ Registers::Name(rd_reg()), rd(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetResult(rd_reg(), get_fpu_register(fj_reg())); ++ break; ++ case MOVFRH2GR_S: ++ printf_instr("MOVFRH2GR_S\t %s: %016lx, %s, %016f\n", ++ Registers::Name(rd_reg()), rd(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetResult(rd_reg(), get_fpu_register_hi_word(fj_reg())); ++ break; ++ case MOVGR2FCSR: { ++ printf_instr("MOVGR2FCSR\t fcsr: %016x, %s, %016lx\n", FCSR_, ++ Registers::Name(rj_reg()), rj()); ++ // fcsr could be 0-3 ++ CHECK_LT(rd_reg(), 4); ++ FCSR_ = static_cast(rj()); ++ TraceRegWr(FCSR_); ++ break; ++ } ++ case MOVFCSR2GR: { ++ printf_instr("MOVFCSR2GR\t %s, %016lx, FCSR: %016x\n", ++ Registers::Name(rd_reg()), rd(), FCSR_); ++ // fcsr could be 0-3 ++ CHECK_LT(rj_reg(), 4); ++ SetResult(rd_reg(), FCSR_); ++ break; ++ } ++ case FCVT_S_D: ++ printf_instr("FCVT_S_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ SetFPUFloatResult(fd_reg(), static_cast(fj_double())); ++ break; ++ case FCVT_D_S: ++ printf_instr("FCVT_D_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ SetFPUDoubleResult(fd_reg(), static_cast(fj_float())); ++ break; ++ case FTINTRM_W_S: { ++ printf_instr("FTINTRM_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRM_W_D: { ++ printf_instr("FTINTRM_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRM_L_S: { ++ printf_instr("FTINTRM_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRM_L_D: { ++ printf_instr("FTINTRM_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_W_S: { ++ printf_instr("FTINTRP_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::ceil(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_W_D: { ++ printf_instr("FTINTRP_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::ceil(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_L_S: { ++ printf_instr("FTINTRP_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::ceil(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRP_L_D: { ++ printf_instr("FTINTRP_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::ceil(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_W_S: { ++ printf_instr("FTINTRZ_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::trunc(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_W_D: { ++ printf_instr("FTINTRZ_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::trunc(fj); ++ int32_t result = static_cast(rounded); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_L_S: { ++ printf_instr("FTINTRZ_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::trunc(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRZ_L_D: { ++ printf_instr("FTINTRZ_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::trunc(fj); ++ int64_t result = static_cast(rounded); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_W_S: { ++ printf_instr("FTINTRNE_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj + 0.5); ++ int32_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_W_D: { ++ printf_instr("FTINTRNE_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj + 0.5); ++ int32_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_L_S: { ++ printf_instr("FTINTRNE_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded = std::floor(fj + 0.5); ++ int64_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINTRNE_L_D: { ++ printf_instr("FTINTRNE_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded = std::floor(fj + 0.5); ++ int64_t result = static_cast(rounded); ++ if ((result & 1) != 0 && result - fj == 0.5) { ++ // If the number is halfway between two integers, ++ // round to the even one. ++ result--; ++ } ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_W_S: { ++ printf_instr("FTINT_W_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded; ++ int32_t result; ++ round_according_to_fcsr(fj, &rounded, &result); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_W_D: { ++ printf_instr("FTINT_W_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded; ++ int32_t result; ++ round_according_to_fcsr(fj, &rounded, &result); ++ SetFPUWordResult(fd_reg(), result); ++ if (set_fcsr_round_error(fj, rounded)) { ++ set_fpu_register_word_invalid_result(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_L_S: { ++ printf_instr("FTINT_L_S\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float rounded; ++ int64_t result; ++ round64_according_to_fcsr(fj, &rounded, &result); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FTINT_L_D: { ++ printf_instr("FTINT_L_D\t %s: %016f, %s, %016f\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double rounded; ++ int64_t result; ++ round64_according_to_fcsr(fj, &rounded, &result); ++ SetFPUResult(fd_reg(), result); ++ if (set_fcsr_round64_error(fj, rounded)) { ++ set_fpu_register_invalid_result64(fj, rounded); ++ } ++ break; ++ } ++ case FFINT_S_W: { ++ alu_out = get_fpu_register_signed_word(fj_reg()); ++ printf_instr("FFINT_S_W\t %s: %016f, %s, %016x\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), (int)alu_out); ++ SetFPUFloatResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FFINT_S_L: { ++ alu_out = get_fpu_register(fj_reg()); ++ printf_instr("FFINT_S_L\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), alu_out); ++ SetFPUFloatResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FFINT_D_W: { ++ alu_out = get_fpu_register_signed_word(fj_reg()); ++ printf_instr("FFINT_D_W\t %s: %016f, %s, %016x\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), (int)alu_out); ++ SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FFINT_D_L: { ++ alu_out = get_fpu_register(fj_reg()); ++ printf_instr("FFINT_D_L\t %s: %016f, %s, %016lx\n", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), alu_out); ++ SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); ++ break; ++ } ++ case FRINT_S: { ++ printf_instr("FRINT_S\t %s: %016f, %s, %016f mode : ", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_float()); ++ float fj = fj_float(); ++ float result, temp_result; ++ double temp; ++ float upper = std::ceil(fj); ++ float lower = std::floor(fj); ++ switch (get_fcsr_rounding_mode()) { ++ case kRoundToNearest: ++ printf_instr(" kRoundToNearest\n"); ++ if (upper - fj < fj - lower) { ++ result = upper; ++ } else if (upper - fj > fj - lower) { ++ result = lower; ++ } else { ++ temp_result = upper / 2; ++ float reminder = std::modf(temp_result, &temp); ++ if (reminder == 0) { ++ result = upper; ++ } else { ++ result = lower; ++ } ++ } ++ break; ++ case kRoundToZero: ++ printf_instr(" kRoundToZero\n"); ++ result = (fj > 0 ? lower : upper); ++ break; ++ case kRoundToPlusInf: ++ printf_instr(" kRoundToPlusInf\n"); ++ result = upper; ++ break; ++ case kRoundToMinusInf: ++ printf_instr(" kRoundToMinusInf\n"); ++ result = lower; ++ break; ++ } ++ SetFPUFloatResult(fd_reg(), result); ++ if (result != fj) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ break; ++ } ++ case FRINT_D: { ++ printf_instr("FRINT_D\t %s: %016f, %s, %016f mode : ", ++ FPURegisters::Name(fd_reg()), fd_double(), ++ FPURegisters::Name(fj_reg()), fj_double()); ++ double fj = fj_double(); ++ double result, temp, temp_result; ++ double upper = std::ceil(fj); ++ double lower = std::floor(fj); ++ switch (get_fcsr_rounding_mode()) { ++ case kRoundToNearest: ++ printf_instr(" kRoundToNearest\n"); ++ if (upper - fj < fj - lower) { ++ result = upper; ++ } else if (upper - fj > fj - lower) { ++ result = lower; ++ } else { ++ temp_result = upper / 2; ++ double reminder = std::modf(temp_result, &temp); ++ if (reminder == 0) { ++ result = upper; ++ } else { ++ result = lower; ++ } ++ } ++ break; ++ case kRoundToZero: ++ printf_instr(" kRoundToZero\n"); ++ result = (fj > 0 ? lower : upper); ++ break; ++ case kRoundToPlusInf: ++ printf_instr(" kRoundToPlusInf\n"); ++ result = upper; ++ break; ++ case kRoundToMinusInf: ++ printf_instr(" kRoundToMinusInf\n"); ++ result = lower; ++ break; ++ } ++ SetFPUDoubleResult(fd_reg(), result); ++ if (result != fj) { ++ set_fcsr_bit(kFCSRInexactFlagBit, true); ++ } ++ break; ++ } ++ case MOVFR2CF: ++ printf("Sim UNIMPLEMENTED: MOVFR2CF\n"); ++ UNIMPLEMENTED(); ++ break; ++ case MOVCF2FR: ++ printf("Sim UNIMPLEMENTED: MOVCF2FR\n"); ++ UNIMPLEMENTED(); ++ break; ++ case MOVGR2CF: ++ printf_instr("MOVGR2CF\t FCC%d, %s: %016lx\n", cd_reg(), ++ Registers::Name(rj_reg()), rj()); ++ set_cf_register(cd_reg(), rj() & 1); ++ break; ++ case MOVCF2GR: ++ printf_instr("MOVCF2GR\t %s: %016lx, FCC%d\n", Registers::Name(rd_reg()), ++ rd(), cj_reg()); ++ SetResult(rd_reg(), cj()); ++ break; ++ case FRECIP_S: ++ printf("Sim UNIMPLEMENTED: FRECIP_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FRECIP_D: ++ printf("Sim UNIMPLEMENTED: FRECIP_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FRSQRT_S: ++ printf("Sim UNIMPLEMENTED: FRSQRT_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FRSQRT_D: ++ printf("Sim UNIMPLEMENTED: FRSQRT_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCLASS_S: ++ printf("Sim UNIMPLEMENTED: FCLASS_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FCLASS_D: ++ printf("Sim UNIMPLEMENTED: FCLASS_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLOGB_S: ++ printf("Sim UNIMPLEMENTED: FLOGB_S\n"); ++ UNIMPLEMENTED(); ++ break; ++ case FLOGB_D: ++ printf("Sim UNIMPLEMENTED: FLOGB_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CLO_W: ++ printf("Sim UNIMPLEMENTED: CLO_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CTO_W: ++ printf("Sim UNIMPLEMENTED: CTO_W\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CLO_D: ++ printf("Sim UNIMPLEMENTED: CLO_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ case CTO_D: ++ printf("Sim UNIMPLEMENTED: CTO_D\n"); ++ UNIMPLEMENTED(); ++ break; ++ // Unimplemented opcodes raised an error in the configuration step before, ++ // so we can use the default here to set the destination register in common ++ // cases. ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++// Executes the current instruction. ++void Simulator::InstructionDecode(Instruction* instr) { ++ if (v8::internal::FLAG_check_icache) { ++ CheckICache(i_cache(), instr); ++ } ++ pc_modified_ = false; ++ ++ v8::internal::EmbeddedVector buffer; ++ ++ if (::v8::internal::FLAG_trace_sim) { ++ SNPrintF(trace_buf_, " "); ++ disasm::NameConverter converter; ++ disasm::Disassembler dasm(converter); ++ // Use a reasonably large buffer. ++ dasm.InstructionDecode(buffer, reinterpret_cast(instr)); ++ } ++ ++ static int instr_count = 0; ++ USE(instr_count); ++ instr_ = instr; ++ printf_instr("\nInstr%3d: %08x, PC: %016lx\t", instr_count++, ++ instr_.Bits(31, 0), get_pc()); ++ switch (instr_.InstructionType()) { ++ case Instruction::kOp6Type: ++ DecodeTypeOp6(); ++ break; ++ case Instruction::kOp7Type: ++ DecodeTypeOp7(); ++ break; ++ case Instruction::kOp8Type: ++ DecodeTypeOp8(); ++ break; ++ case Instruction::kOp10Type: ++ DecodeTypeOp10(); ++ break; ++ case Instruction::kOp12Type: ++ DecodeTypeOp12(); ++ break; ++ case Instruction::kOp14Type: ++ DecodeTypeOp14(); ++ break; ++ case Instruction::kOp17Type: ++ DecodeTypeOp17(); ++ break; ++ case Instruction::kOp22Type: ++ DecodeTypeOp22(); ++ break; ++ default: { ++ printf("instr_: %x\n", instr_.Bits(31, 0)); ++ UNREACHABLE(); ++ } ++ } ++ ++ if (::v8::internal::FLAG_trace_sim) { ++ PrintF(" 0x%08" PRIxPTR " %-44s %s\n", ++ reinterpret_cast(instr), buffer.begin(), ++ trace_buf_.begin()); ++ } ++ ++ if (!pc_modified_) { ++ set_register(pc, reinterpret_cast(instr) + kInstrSize); ++ } ++} ++ ++void Simulator::Execute() { ++ // Get the PC to simulate. Cannot use the accessor here as we need the ++ // raw PC value and not the one used as input to arithmetic instructions. ++ int64_t program_counter = get_pc(); ++ if (::v8::internal::FLAG_stop_sim_at == 0) { ++ // Fast version of the dispatch loop without checking whether the simulator ++ // should be stopping at a particular executed instruction. ++ while (program_counter != end_sim_pc) { ++ Instruction* instr = reinterpret_cast(program_counter); ++ icount_++; ++ InstructionDecode(instr); ++ program_counter = get_pc(); ++ } ++ } else { ++ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when ++ // we reach the particular instruction count. ++ while (program_counter != end_sim_pc) { ++ Instruction* instr = reinterpret_cast(program_counter); ++ icount_++; ++ if (icount_ == static_cast(::v8::internal::FLAG_stop_sim_at)) { ++ Loong64Debugger dbg(this); ++ dbg.Debug(); ++ } else { ++ InstructionDecode(instr); ++ } ++ program_counter = get_pc(); ++ } ++ } ++} ++ ++void Simulator::CallInternal(Address entry) { ++ // Adjust JS-based stack limit to C-based stack limit. ++ isolate_->stack_guard()->AdjustStackLimitForSimulator(); ++ ++ // Prepare to execute the code at entry. ++ set_register(pc, static_cast(entry)); ++ // Put down marker for end of simulation. The simulator will stop simulation ++ // when the PC reaches this value. By saving the "end simulation" value into ++ // the LR the simulation stops when returning to this call point. ++ set_register(ra, end_sim_pc); ++ ++ // Remember the values of callee-saved registers. ++ int64_t s0_val = get_register(s0); ++ int64_t s1_val = get_register(s1); ++ int64_t s2_val = get_register(s2); ++ int64_t s3_val = get_register(s3); ++ int64_t s4_val = get_register(s4); ++ int64_t s5_val = get_register(s5); ++ int64_t s6_val = get_register(s6); ++ int64_t s7_val = get_register(s7); ++ int64_t s8_val = get_register(s8); ++ int64_t gp_val = get_register(gp); ++ int64_t sp_val = get_register(sp); ++ int64_t tp_val = get_register(tp); ++ int64_t fp_val = get_register(fp); ++ ++ // Set up the callee-saved registers with a known value. To be able to check ++ // that they are preserved properly across JS execution. ++ int64_t callee_saved_value = icount_; ++ set_register(s0, callee_saved_value); ++ set_register(s1, callee_saved_value); ++ set_register(s2, callee_saved_value); ++ set_register(s3, callee_saved_value); ++ set_register(s4, callee_saved_value); ++ set_register(s5, callee_saved_value); ++ set_register(s6, callee_saved_value); ++ set_register(s7, callee_saved_value); ++ set_register(s8, callee_saved_value); ++ set_register(gp, callee_saved_value); ++ set_register(tp, callee_saved_value); ++ set_register(fp, callee_saved_value); ++ ++ // Start the simulation. ++ Execute(); ++ ++ // Check that the callee-saved registers have been preserved. ++ CHECK_EQ(callee_saved_value, get_register(s0)); ++ CHECK_EQ(callee_saved_value, get_register(s1)); ++ CHECK_EQ(callee_saved_value, get_register(s2)); ++ CHECK_EQ(callee_saved_value, get_register(s3)); ++ CHECK_EQ(callee_saved_value, get_register(s4)); ++ CHECK_EQ(callee_saved_value, get_register(s5)); ++ CHECK_EQ(callee_saved_value, get_register(s6)); ++ CHECK_EQ(callee_saved_value, get_register(s7)); ++ CHECK_EQ(callee_saved_value, get_register(s8)); ++ CHECK_EQ(callee_saved_value, get_register(gp)); ++ CHECK_EQ(callee_saved_value, get_register(tp)); ++ CHECK_EQ(callee_saved_value, get_register(fp)); ++ ++ // Restore callee-saved registers with the original value. ++ set_register(s0, s0_val); ++ set_register(s1, s1_val); ++ set_register(s2, s2_val); ++ set_register(s3, s3_val); ++ set_register(s4, s4_val); ++ set_register(s5, s5_val); ++ set_register(s6, s6_val); ++ set_register(s7, s7_val); ++ set_register(s8, s8_val); ++ set_register(gp, gp_val); ++ set_register(sp, sp_val); ++ set_register(tp, tp_val); ++ set_register(fp, fp_val); ++} ++ ++intptr_t Simulator::CallImpl(Address entry, int argument_count, ++ const intptr_t* arguments) { ++ constexpr int kRegisterPassedArguments = 8; ++ // Set up arguments. ++ ++ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count); ++ if (reg_arg_count > 0) set_register(a0, arguments[0]); ++ if (reg_arg_count > 1) set_register(a1, arguments[1]); ++ if (reg_arg_count > 2) set_register(a2, arguments[2]); ++ if (reg_arg_count > 3) set_register(a3, arguments[3]); ++ if (reg_arg_count > 4) set_register(a4, arguments[4]); ++ if (reg_arg_count > 5) set_register(a5, arguments[5]); ++ if (reg_arg_count > 6) set_register(a6, arguments[6]); ++ if (reg_arg_count > 7) set_register(a7, arguments[7]); ++ ++ // Remaining arguments passed on stack. ++ int64_t original_stack = get_register(sp); ++ // Compute position of stack on entry to generated code. ++ int stack_args_count = argument_count - reg_arg_count; ++ int stack_args_size = stack_args_count * sizeof(*arguments) + kCArgsSlotsSize; ++ int64_t entry_stack = original_stack - stack_args_size; ++ ++ if (base::OS::ActivationFrameAlignment() != 0) { ++ entry_stack &= -base::OS::ActivationFrameAlignment(); ++ } ++ // Store remaining arguments on stack, from low to high memory. ++ intptr_t* stack_argument = reinterpret_cast(entry_stack); ++ memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count, ++ stack_args_count * sizeof(*arguments)); ++ set_register(sp, entry_stack); ++ ++ CallInternal(entry); ++ ++ // Pop stack passed arguments. ++ CHECK_EQ(entry_stack, get_register(sp)); ++ set_register(sp, original_stack); ++ ++ return get_register(v0); ++} ++ ++double Simulator::CallFP(Address entry, double d0, double d1) { ++ const FPURegister fparg2 = f1; ++ set_fpu_register_double(f0, d0); ++ set_fpu_register_double(fparg2, d1); ++ CallInternal(entry); ++ return get_fpu_register_double(f0); ++} ++ ++uintptr_t Simulator::PushAddress(uintptr_t address) { ++ int64_t new_sp = get_register(sp) - sizeof(uintptr_t); ++ uintptr_t* stack_slot = reinterpret_cast(new_sp); ++ *stack_slot = address; ++ set_register(sp, new_sp); ++ return new_sp; ++} ++ ++uintptr_t Simulator::PopAddress() { ++ int64_t current_sp = get_register(sp); ++ uintptr_t* stack_slot = reinterpret_cast(current_sp); ++ uintptr_t address = *stack_slot; ++ set_register(sp, current_sp + sizeof(uintptr_t)); ++ return address; ++} ++ ++Simulator::LocalMonitor::LocalMonitor() ++ : access_state_(MonitorAccess::Open), ++ tagged_addr_(0), ++ size_(TransactionSize::None) {} ++ ++void Simulator::LocalMonitor::Clear() { ++ access_state_ = MonitorAccess::Open; ++ tagged_addr_ = 0; ++ size_ = TransactionSize::None; ++} ++ ++void Simulator::LocalMonitor::NotifyLoad() { ++ if (access_state_ == MonitorAccess::RMW) { ++ // A non linked load could clear the local monitor. As a result, it's ++ // most strict to unconditionally clear the local monitor on load. ++ Clear(); ++ } ++} ++ ++void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr, ++ TransactionSize size) { ++ access_state_ = MonitorAccess::RMW; ++ tagged_addr_ = addr; ++ size_ = size; ++} ++ ++void Simulator::LocalMonitor::NotifyStore() { ++ if (access_state_ == MonitorAccess::RMW) { ++ // A non exclusive store could clear the local monitor. As a result, it's ++ // most strict to unconditionally clear the local monitor on store. ++ Clear(); ++ } ++} ++ ++bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr, ++ TransactionSize size) { ++ if (access_state_ == MonitorAccess::RMW) { ++ if (addr == tagged_addr_ && size_ == size) { ++ Clear(); ++ return true; ++ } else { ++ return false; ++ } ++ } else { ++ DCHECK(access_state_ == MonitorAccess::Open); ++ return false; ++ } ++} ++ ++Simulator::GlobalMonitor::LinkedAddress::LinkedAddress() ++ : access_state_(MonitorAccess::Open), ++ tagged_addr_(0), ++ next_(nullptr), ++ prev_(nullptr), ++ failure_counter_(0) {} ++ ++void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() { ++ access_state_ = MonitorAccess::Open; ++ tagged_addr_ = 0; ++} ++ ++void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked( ++ uintptr_t addr) { ++ access_state_ = MonitorAccess::RMW; ++ tagged_addr_ = addr; ++} ++ ++void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() { ++ if (access_state_ == MonitorAccess::RMW) { ++ // A non exclusive store could clear the global monitor. As a result, it's ++ // most strict to unconditionally clear global monitors on store. ++ Clear_Locked(); ++ } ++} ++ ++bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked( ++ uintptr_t addr, bool is_requesting_thread) { ++ if (access_state_ == MonitorAccess::RMW) { ++ if (is_requesting_thread) { ++ if (addr == tagged_addr_) { ++ Clear_Locked(); ++ // Introduce occasional sc/scd failures. This is to simulate the ++ // behavior of hardware, which can randomly fail due to background ++ // cache evictions. ++ if (failure_counter_++ >= kMaxFailureCounter) { ++ failure_counter_ = 0; ++ return false; ++ } else { ++ return true; ++ } ++ } ++ } else if ((addr & kExclusiveTaggedAddrMask) == ++ (tagged_addr_ & kExclusiveTaggedAddrMask)) { ++ // Check the masked addresses when responding to a successful lock by ++ // another thread so the implementation is more conservative (i.e. the ++ // granularity of locking is as large as possible.) ++ Clear_Locked(); ++ return false; ++ } ++ } ++ return false; ++} ++ ++void Simulator::GlobalMonitor::NotifyLoadLinked_Locked( ++ uintptr_t addr, LinkedAddress* linked_address) { ++ linked_address->NotifyLoadLinked_Locked(addr); ++ PrependProcessor_Locked(linked_address); ++} ++ ++void Simulator::GlobalMonitor::NotifyStore_Locked( ++ LinkedAddress* linked_address) { ++ // Notify each thread of the store operation. ++ for (LinkedAddress* iter = head_; iter; iter = iter->next_) { ++ iter->NotifyStore_Locked(); ++ } ++} ++ ++bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked( ++ uintptr_t addr, LinkedAddress* linked_address) { ++ DCHECK(IsProcessorInLinkedList_Locked(linked_address)); ++ if (linked_address->NotifyStoreConditional_Locked(addr, true)) { ++ // Notify the other processors that this StoreConditional succeeded. ++ for (LinkedAddress* iter = head_; iter; iter = iter->next_) { ++ if (iter != linked_address) { ++ iter->NotifyStoreConditional_Locked(addr, false); ++ } ++ } ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked( ++ LinkedAddress* linked_address) const { ++ return head_ == linked_address || linked_address->next_ || ++ linked_address->prev_; ++} ++ ++void Simulator::GlobalMonitor::PrependProcessor_Locked( ++ LinkedAddress* linked_address) { ++ if (IsProcessorInLinkedList_Locked(linked_address)) { ++ return; ++ } ++ ++ if (head_) { ++ head_->prev_ = linked_address; ++ } ++ linked_address->prev_ = nullptr; ++ linked_address->next_ = head_; ++ head_ = linked_address; ++} ++ ++void Simulator::GlobalMonitor::RemoveLinkedAddress( ++ LinkedAddress* linked_address) { ++ base::MutexGuard lock_guard(&mutex); ++ if (!IsProcessorInLinkedList_Locked(linked_address)) { ++ return; ++ } ++ ++ if (linked_address->prev_) { ++ linked_address->prev_->next_ = linked_address->next_; ++ } else { ++ head_ = linked_address->next_; ++ } ++ if (linked_address->next_) { ++ linked_address->next_->prev_ = linked_address->prev_; ++ } ++ linked_address->prev_ = nullptr; ++ linked_address->next_ = nullptr; ++} ++ ++#undef SScanF ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // USE_SIMULATOR +diff --git a/deps/v8/src/execution/loong64/simulator-loong64.h b/deps/v8/src/execution/loong64/simulator-loong64.h +new file mode 100644 +index 00000000..8b53d67b +--- /dev/null ++++ b/deps/v8/src/execution/loong64/simulator-loong64.h +@@ -0,0 +1,646 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Declares a Simulator for loongisa instructions if we are not generating a ++// native loongisa binary. This Simulator allows us to run and debug loongisa ++// code generation on regular desktop machines. V8 calls into generated code via ++// the GeneratedCode wrapper, which will start execution in the Simulator or ++// forwards to the real entry on a loongisa HW platform. ++ ++#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_ ++#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_ ++ ++// globals.h defines USE_SIMULATOR. ++#include "src/common/globals.h" ++ ++template ++int Compare(const T& a, const T& b) { ++ if (a == b) ++ return 0; ++ else if (a < b) ++ return -1; ++ else ++ return 1; ++} ++ ++// Returns the negative absolute value of its argument. ++template ::value>::type> ++T Nabs(T a) { ++ return a < 0 ? a : -a; ++} ++ ++#if defined(USE_SIMULATOR) ++// Running with a simulator. ++ ++#include "src/base/hashmap.h" ++#include "src/codegen/assembler.h" ++#include "src/codegen/loong64/constants-loong64.h" ++#include "src/execution/simulator-base.h" ++#include "src/utils/allocation.h" ++ ++namespace v8 { ++namespace internal { ++ ++// ----------------------------------------------------------------------------- ++// Utility functions ++ ++class CachePage { ++ public: ++ static const int LINE_VALID = 0; ++ static const int LINE_INVALID = 1; ++ ++ static const int kPageShift = 12; ++ static const int kPageSize = 1 << kPageShift; ++ static const int kPageMask = kPageSize - 1; ++ static const int kLineShift = 2; // The cache line is only 4 bytes right now. ++ static const int kLineLength = 1 << kLineShift; ++ static const int kLineMask = kLineLength - 1; ++ ++ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); } ++ ++ char* ValidityByte(int offset) { ++ return &validity_map_[offset >> kLineShift]; ++ } ++ ++ char* CachedData(int offset) { return &data_[offset]; } ++ ++ private: ++ char data_[kPageSize]; // The cached data. ++ static const int kValidityMapSize = kPageSize >> kLineShift; ++ char validity_map_[kValidityMapSize]; // One byte per line. ++}; ++ ++class SimInstructionBase : public InstructionBase { ++ public: ++ Type InstructionType() const { return type_; } ++ inline Instruction* instr() const { return instr_; } ++ inline int32_t operand() const { return operand_; } ++ ++ protected: ++ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {} ++ explicit SimInstructionBase(Instruction* instr) {} ++ ++ int32_t operand_; ++ Instruction* instr_; ++ Type type_; ++ ++ private: ++ DISALLOW_ASSIGN(SimInstructionBase); ++}; ++ ++class SimInstruction : public InstructionGetters { ++ public: ++ SimInstruction() {} ++ ++ explicit SimInstruction(Instruction* instr) { *this = instr; } ++ ++ SimInstruction& operator=(Instruction* instr) { ++ operand_ = *reinterpret_cast(instr); ++ instr_ = instr; ++ type_ = InstructionBase::InstructionType(); ++ DCHECK(reinterpret_cast(&operand_) == this); ++ return *this; ++ } ++}; ++ ++class Simulator : public SimulatorBase { ++ public: ++ friend class Loong64Debugger; ++ ++ // Registers are declared in order. ++ enum Register { ++ no_reg = -1, ++ zero_reg = 0, ++ ra, ++ gp, ++ sp, ++ a0, ++ a1, ++ a2, ++ a3, ++ a4, ++ a5, ++ a6, ++ a7, ++ t0, ++ t1, ++ t2, ++ t3, ++ t4, ++ t5, ++ t6, ++ t7, ++ t8, ++ tp, ++ fp, ++ s0, ++ s1, ++ s2, ++ s3, ++ s4, ++ s5, ++ s6, ++ s7, ++ s8, ++ pc, // pc must be the last register. ++ kNumSimuRegisters, ++ // aliases ++ v0 = a0, ++ v1 = a1 ++ }; ++ ++ // Condition flag registers. ++ enum CFRegister { ++ fcc0, ++ fcc1, ++ fcc2, ++ fcc3, ++ fcc4, ++ fcc5, ++ fcc6, ++ fcc7, ++ kNumCFRegisters ++ }; ++ ++ // Floating point registers. ++ enum FPURegister { ++ f0, ++ f1, ++ f2, ++ f3, ++ f4, ++ f5, ++ f6, ++ f7, ++ f8, ++ f9, ++ f10, ++ f11, ++ f12, ++ f13, ++ f14, ++ f15, ++ f16, ++ f17, ++ f18, ++ f19, ++ f20, ++ f21, ++ f22, ++ f23, ++ f24, ++ f25, ++ f26, ++ f27, ++ f28, ++ f29, ++ f30, ++ f31, ++ kNumFPURegisters ++ }; ++ ++ explicit Simulator(Isolate* isolate); ++ ~Simulator(); ++ ++ // The currently executing Simulator instance. Potentially there can be one ++ // for each native thread. ++ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate); ++ ++ // Accessors for register state. Reading the pc value adheres to the LOONG64 ++ // architecture specification and is off by a 8 from the currently executing ++ // instruction. ++ void set_register(int reg, int64_t value); ++ void set_register_word(int reg, int32_t value); ++ void set_dw_register(int dreg, const int* dbl); ++ int64_t get_register(int reg) const; ++ double get_double_from_register_pair(int reg); ++ // Same for FPURegisters. ++ void set_fpu_register(int fpureg, int64_t value); ++ void set_fpu_register_word(int fpureg, int32_t value); ++ void set_fpu_register_hi_word(int fpureg, int32_t value); ++ void set_fpu_register_float(int fpureg, float value); ++ void set_fpu_register_double(int fpureg, double value); ++ void set_fpu_register_invalid_result64(float original, float rounded); ++ void set_fpu_register_invalid_result(float original, float rounded); ++ void set_fpu_register_word_invalid_result(float original, float rounded); ++ void set_fpu_register_invalid_result64(double original, double rounded); ++ void set_fpu_register_invalid_result(double original, double rounded); ++ void set_fpu_register_word_invalid_result(double original, double rounded); ++ int64_t get_fpu_register(int fpureg) const; ++ int32_t get_fpu_register_word(int fpureg) const; ++ int32_t get_fpu_register_signed_word(int fpureg) const; ++ int32_t get_fpu_register_hi_word(int fpureg) const; ++ float get_fpu_register_float(int fpureg) const; ++ double get_fpu_register_double(int fpureg) const; ++ void set_cf_register(int cfreg, bool value); ++ bool get_cf_register(int cfreg) const; ++ void set_fcsr_rounding_mode(FPURoundingMode mode); ++ unsigned int get_fcsr_rounding_mode(); ++ void set_fcsr_bit(uint32_t cc, bool value); ++ bool test_fcsr_bit(uint32_t cc); ++ bool set_fcsr_round_error(double original, double rounded); ++ bool set_fcsr_round64_error(double original, double rounded); ++ bool set_fcsr_round_error(float original, float rounded); ++ bool set_fcsr_round64_error(float original, float rounded); ++ void round_according_to_fcsr(double toRound, double* rounded, ++ int32_t* rounded_int); ++ void round64_according_to_fcsr(double toRound, double* rounded, ++ int64_t* rounded_int); ++ void round_according_to_fcsr(float toRound, float* rounded, ++ int32_t* rounded_int); ++ void round64_according_to_fcsr(float toRound, float* rounded, ++ int64_t* rounded_int); ++ // Special case of set_register and get_register to access the raw PC value. ++ void set_pc(int64_t value); ++ int64_t get_pc() const; ++ ++ Address get_sp() const { return static_cast
(get_register(sp)); } ++ ++ // Accessor to the internal simulator stack area. ++ uintptr_t StackLimit(uintptr_t c_limit) const; ++ ++ // Executes LOONG64 instructions until the PC reaches end_sim_pc. ++ void Execute(); ++ ++ template ++ Return Call(Address entry, Args... args) { ++ return VariadicCall(this, &Simulator::CallImpl, entry, args...); ++ } ++ ++ // Alternative: call a 2-argument double function. ++ double CallFP(Address entry, double d0, double d1); ++ ++ // Push an address onto the JS stack. ++ uintptr_t PushAddress(uintptr_t address); ++ ++ // Pop an address from the JS stack. ++ uintptr_t PopAddress(); ++ ++ // Debugger input. ++ void set_last_debugger_input(char* input); ++ char* last_debugger_input() { return last_debugger_input_; } ++ ++ // Redirection support. ++ static void SetRedirectInstruction(Instruction* instruction); ++ ++ // ICache checking. ++ static bool ICacheMatch(void* one, void* two); ++ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start, ++ size_t size); ++ ++ // Returns true if pc register contains one of the 'special_values' defined ++ // below (bad_ra, end_sim_pc). ++ bool has_bad_pc() const; ++ ++ private: ++ enum special_values { ++ // Known bad pc value to ensure that the simulator does not execute ++ // without being properly setup. ++ bad_ra = -1, ++ // A pc value used to signal the simulator to stop execution. Generally ++ // the ra is set to this value on transition from native C code to ++ // simulated execution, so that the simulator can "return" to the native ++ // C code. ++ end_sim_pc = -2, ++ // Unpredictable value. ++ Unpredictable = 0xbadbeaf ++ }; ++ ++ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count, ++ const intptr_t* arguments); ++ ++ // Unsupported instructions use Format to print an error and stop execution. ++ void Format(Instruction* instr, const char* format); ++ ++ // Helpers for data value tracing. ++ enum TraceType { ++ BYTE, ++ HALF, ++ WORD, ++ DWORD, ++ FLOAT, ++ DOUBLE, ++ FLOAT_DOUBLE, ++ WORD_DWORD ++ }; ++ ++ // Read and write memory. ++ inline uint32_t ReadBU(int64_t addr); ++ inline int32_t ReadB(int64_t addr); ++ inline void WriteB(int64_t addr, uint8_t value); ++ inline void WriteB(int64_t addr, int8_t value); ++ ++ inline uint16_t ReadHU(int64_t addr, Instruction* instr); ++ inline int16_t ReadH(int64_t addr, Instruction* instr); ++ // Note: Overloaded on the sign of the value. ++ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr); ++ inline void WriteH(int64_t addr, int16_t value, Instruction* instr); ++ ++ inline uint32_t ReadWU(int64_t addr, Instruction* instr); ++ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD); ++ inline void WriteW(int64_t addr, int32_t value, Instruction* instr); ++ void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr, ++ int32_t rt_reg); ++ inline int64_t Read2W(int64_t addr, Instruction* instr); ++ inline void Write2W(int64_t addr, int64_t value, Instruction* instr); ++ inline void WriteConditional2W(int64_t addr, int64_t value, ++ Instruction* instr, int32_t rt_reg); ++ ++ inline double ReadD(int64_t addr, Instruction* instr); ++ inline void WriteD(int64_t addr, double value, Instruction* instr); ++ ++ template ++ T ReadMem(int64_t addr, Instruction* instr); ++ template ++ void WriteMem(int64_t addr, T value, Instruction* instr); ++ ++ // Helper for debugging memory access. ++ inline void DieOrDebug(); ++ ++ void TraceRegWr(int64_t value, TraceType t = DWORD); ++ void TraceMemWr(int64_t addr, int64_t value, TraceType t); ++ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD); ++ template ++ void TraceMemRd(int64_t addr, T value); ++ template ++ void TraceMemWr(int64_t addr, T value); ++ ++ SimInstruction instr_; ++ ++ // Executing is handled based on the instruction type. ++ void DecodeTypeOp6(); ++ void DecodeTypeOp7(); ++ void DecodeTypeOp8(); ++ void DecodeTypeOp10(); ++ void DecodeTypeOp12(); ++ void DecodeTypeOp14(); ++ void DecodeTypeOp17(); ++ void DecodeTypeOp22(); ++ ++ inline int32_t rj_reg() const { return instr_.RjValue(); } ++ inline int64_t rj() const { return get_register(rj_reg()); } ++ inline uint64_t rj_u() const { ++ return static_cast(get_register(rj_reg())); ++ } ++ inline int32_t rk_reg() const { return instr_.RkValue(); } ++ inline int64_t rk() const { return get_register(rk_reg()); } ++ inline uint64_t rk_u() const { ++ return static_cast(get_register(rk_reg())); ++ } ++ inline int32_t rd_reg() const { return instr_.RdValue(); } ++ inline int64_t rd() const { return get_register(rd_reg()); } ++ inline uint64_t rd_u() const { ++ return static_cast(get_register(rd_reg())); ++ } ++ inline int32_t fa_reg() const { return instr_.FaValue(); } ++ inline float fa_float() const { return get_fpu_register_float(fa_reg()); } ++ inline double fa_double() const { return get_fpu_register_double(fa_reg()); } ++ inline int32_t fj_reg() const { return instr_.FjValue(); } ++ inline float fj_float() const { return get_fpu_register_float(fj_reg()); } ++ inline double fj_double() const { return get_fpu_register_double(fj_reg()); } ++ inline int32_t fk_reg() const { return instr_.FkValue(); } ++ inline float fk_float() const { return get_fpu_register_float(fk_reg()); } ++ inline double fk_double() const { return get_fpu_register_double(fk_reg()); } ++ inline int32_t fd_reg() const { return instr_.FdValue(); } ++ inline float fd_float() const { return get_fpu_register_float(fd_reg()); } ++ inline double fd_double() const { return get_fpu_register_double(fd_reg()); } ++ inline int32_t cj_reg() const { return instr_.CjValue(); } ++ inline bool cj() const { return get_cf_register(cj_reg()); } ++ inline int32_t cd_reg() const { return instr_.CdValue(); } ++ inline bool cd() const { return get_cf_register(cd_reg()); } ++ inline int32_t ca_reg() const { return instr_.CaValue(); } ++ inline bool ca() const { return get_cf_register(ca_reg()); } ++ inline uint32_t sa2() const { return instr_.Sa2Value(); } ++ inline uint32_t sa3() const { return instr_.Sa3Value(); } ++ inline uint32_t ui5() const { return instr_.Ui5Value(); } ++ inline uint32_t ui6() const { return instr_.Ui6Value(); } ++ inline uint32_t lsbw() const { return instr_.LsbwValue(); } ++ inline uint32_t msbw() const { return instr_.MsbwValue(); } ++ inline uint32_t lsbd() const { return instr_.LsbdValue(); } ++ inline uint32_t msbd() const { return instr_.MsbdValue(); } ++ inline uint32_t cond() const { return instr_.CondValue(); } ++ inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; } ++ inline uint32_t ui12() const { return instr_.Ui12Value(); } ++ inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; } ++ inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; } ++ inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; } ++ ++ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) { ++ set_register(rd_reg, alu_out); ++ TraceRegWr(alu_out); ++ } ++ ++ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) { ++ set_fpu_register_word(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), WORD); ++ } ++ ++ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) { ++ set_fpu_register_word(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg)); ++ } ++ ++ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) { ++ set_fpu_register(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg)); ++ } ++ ++ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) { ++ set_fpu_register(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); ++ } ++ ++ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) { ++ set_fpu_register_float(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), FLOAT); ++ } ++ ++ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) { ++ set_fpu_register_double(fd_reg, alu_out); ++ TraceRegWr(get_fpu_register(fd_reg), DOUBLE); ++ } ++ ++ // Used for breakpoints. ++ void SoftwareInterrupt(); ++ ++ // Stop helper functions. ++ bool IsWatchpoint(uint64_t code); ++ void PrintWatchpoint(uint64_t code); ++ void HandleStop(uint64_t code, Instruction* instr); ++ bool IsStopInstruction(Instruction* instr); ++ bool IsEnabledStop(uint64_t code); ++ void EnableStop(uint64_t code); ++ void DisableStop(uint64_t code); ++ void IncreaseStopCounter(uint64_t code); ++ void PrintStopInfo(uint64_t code); ++ ++ // Executes one instruction. ++ void InstructionDecode(Instruction* instr); ++ // Execute one instruction placed in a branch delay slot. ++ ++ // ICache. ++ static void CheckICache(base::CustomMatcherHashMap* i_cache, ++ Instruction* instr); ++ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start, ++ size_t size); ++ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache, ++ void* page); ++ ++ enum Exception { ++ none, ++ kIntegerOverflow, ++ kIntegerUnderflow, ++ kDivideByZero, ++ kNumExceptions ++ }; ++ ++ // Exceptions. ++ void SignalException(Exception e); ++ ++ // Handle arguments and return value for runtime FP functions. ++ void GetFpArgs(double* x, double* y, int32_t* z); ++ void SetFpResult(const double& result); ++ ++ void CallInternal(Address entry); ++ ++ // Architecture state. ++ // Registers. ++ int64_t registers_[kNumSimuRegisters]; ++ // Floating point Registers. ++ int64_t FPUregisters_[kNumFPURegisters]; ++ // Condition flags Registers. ++ bool CFregisters_[kNumCFRegisters]; ++ // FPU control register. ++ uint32_t FCSR_; ++ ++ // Simulator support. ++ // Allocate 1MB for stack. ++ size_t stack_size_; ++ char* stack_; ++ bool pc_modified_; ++ int64_t icount_; ++ int break_count_; ++ EmbeddedVector trace_buf_; ++ ++ // Debugger input. ++ char* last_debugger_input_; ++ ++ v8::internal::Isolate* isolate_; ++ ++ // Registered breakpoints. ++ Instruction* break_pc_; ++ Instr break_instr_; ++ ++ // Stop is disabled if bit 31 is set. ++ static const uint32_t kStopDisabledBit = 1 << 31; ++ ++ // A stop is enabled, meaning the simulator will stop when meeting the ++ // instruction, if bit 31 of watched_stops_[code].count is unset. ++ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times ++ // the breakpoint was hit or gone through. ++ struct StopCountAndDesc { ++ uint32_t count; ++ char* desc; ++ }; ++ StopCountAndDesc watched_stops_[kMaxStopCode + 1]; ++ ++ // Synchronization primitives. ++ enum class MonitorAccess { ++ Open, ++ RMW, ++ }; ++ ++ enum class TransactionSize { ++ None = 0, ++ Word = 4, ++ DoubleWord = 8, ++ }; ++ ++ // The least-significant bits of the address are ignored. The number of bits ++ // is implementation-defined, between 3 and minimum page size. ++ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1); ++ ++ class LocalMonitor { ++ public: ++ LocalMonitor(); ++ ++ // These functions manage the state machine for the local monitor, but do ++ // not actually perform loads and stores. NotifyStoreConditional only ++ // returns true if the store conditional is allowed; the global monitor will ++ // still have to be checked to see whether the memory should be updated. ++ void NotifyLoad(); ++ void NotifyLoadLinked(uintptr_t addr, TransactionSize size); ++ void NotifyStore(); ++ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size); ++ ++ private: ++ void Clear(); ++ ++ MonitorAccess access_state_; ++ uintptr_t tagged_addr_; ++ TransactionSize size_; ++ }; ++ ++ class GlobalMonitor { ++ public: ++ class LinkedAddress { ++ public: ++ LinkedAddress(); ++ ++ private: ++ friend class GlobalMonitor; ++ // These functions manage the state machine for the global monitor, but do ++ // not actually perform loads and stores. ++ void Clear_Locked(); ++ void NotifyLoadLinked_Locked(uintptr_t addr); ++ void NotifyStore_Locked(); ++ bool NotifyStoreConditional_Locked(uintptr_t addr, ++ bool is_requesting_thread); ++ ++ MonitorAccess access_state_; ++ uintptr_t tagged_addr_; ++ LinkedAddress* next_; ++ LinkedAddress* prev_; ++ // A scd can fail due to background cache evictions. Rather than ++ // simulating this, we'll just occasionally introduce cases where an ++ // store conditional fails. This will happen once after every ++ // kMaxFailureCounter exclusive stores. ++ static const int kMaxFailureCounter = 5; ++ int failure_counter_; ++ }; ++ ++ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*. ++ base::Mutex mutex; ++ ++ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address); ++ void NotifyStore_Locked(LinkedAddress* linked_address); ++ bool NotifyStoreConditional_Locked(uintptr_t addr, ++ LinkedAddress* linked_address); ++ ++ // Called when the simulator is destroyed. ++ void RemoveLinkedAddress(LinkedAddress* linked_address); ++ ++ static GlobalMonitor* Get(); ++ ++ private: ++ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton. ++ GlobalMonitor() = default; ++ friend class base::LeakyObject; ++ ++ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const; ++ void PrependProcessor_Locked(LinkedAddress* linked_address); ++ ++ LinkedAddress* head_ = nullptr; ++ }; ++ ++ LocalMonitor local_monitor_; ++ GlobalMonitor::LinkedAddress global_monitor_thread_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // defined(USE_SIMULATOR) ++#endif // V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_ +diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc +index 72f28363..98c50263 100644 +--- a/deps/v8/src/execution/mips64/simulator-mips64.cc ++++ b/deps/v8/src/execution/mips64/simulator-mips64.cc +@@ -28,6 +28,8 @@ namespace internal { + DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor, + Simulator::GlobalMonitor::Get) + ++// #define PRINT_SIM_LOG ++ + // Util functions. + inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); } + +@@ -57,6 +59,17 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) { + return u1 * v1 + w2 + (w1 >> 32); + } + ++#ifdef PRINT_SIM_LOG ++inline void printf_instr(const char* _Format, ...) { ++ va_list varList; ++ va_start(varList, _Format); ++ vprintf(_Format, varList); ++ va_end(varList); ++} ++#else ++#define printf_instr(...) ++#endif ++ + // This macro provides a platform independent use of sscanf. The reason for + // SScanF not being implemented in a platform independent was through + // ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time +@@ -2195,6 +2208,7 @@ void Simulator::SoftwareInterrupt() { + uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1; + // We first check if we met a call_rt_redirected. + if (instr_.InstructionBits() == rtCallRedirInstr) { ++ printf_instr("Simulator::SoftwareInterrupt: BREAK 0xFFFFF\n"); + Redirection* redirection = Redirection::FromInstruction(instr_.instr()); + + int64_t* stack_pointer = reinterpret_cast(get_register(sp)); +@@ -2723,6 +2737,9 @@ void Simulator::DecodeTypeRegisterSRsType() { + KeepSign::yes, fs)); + break; + case SQRT_S: ++ printf_instr("sqrt_s\t %s: %016f, %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd, ++ FPURegisters::Name(fs_reg()), fs); + SetFPUFloatResult( + fd_reg(), + FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs)); +@@ -3115,6 +3132,10 @@ void Simulator::DecodeTypeRegisterDRsType() { + [](double lhs, double rhs) { return lhs + rhs; }, fs, ft)); + break; + case SUB_D: ++ printf_instr("sub_d\t %s: %016f, %s: %016f, %s: %016f\n", ++ FPURegisters::Name(fd_reg()), fd, ++ FPURegisters::Name(fs_reg()), fs, ++ FPURegisters::Name(ft_reg()), ft); + SetFPUDoubleResult( + fd_reg(), + FPUCanonalizeOperation( +@@ -3381,6 +3402,10 @@ void Simulator::DecodeTypeRegisterWRsType() { + int64_t alu_out = 0x12345678; + switch (instr_.FunctionFieldRaw()) { + case CVT_S_W: // Convert word to float (single). ++ printf_instr( ++ "CVT_S_W \t %s: %016f, %s: %016x\n", FPURegisters::Name(fd_reg()), ++ get_fpu_register_float(fd_reg()), FPURegisters::Name(fs_reg()), ++ get_fpu_register_signed_word(fs_reg())); + alu_out = get_fpu_register_signed_word(fs_reg()); + SetFPUFloatResult(fd_reg(), static_cast(alu_out)); + break; +@@ -3476,6 +3501,10 @@ void Simulator::DecodeTypeRegisterLRsType() { + SetFPUDoubleResult(fd_reg(), static_cast(i64)); + break; + case CVT_S_L: ++ printf_instr("CVT_S_L \t %s: %016f, %s: %016x\n", ++ FPURegisters::Name(fd_reg()), ++ get_fpu_register_float(fd_reg()), ++ FPURegisters::Name(fs_reg()), get_fpu_register(fs_reg())); + i64 = get_fpu_register(fs_reg()); + SetFPUFloatResult(fd_reg(), static_cast(i64)); + break; +@@ -3569,11 +3598,17 @@ void Simulator::DecodeTypeRegisterCOP1() { + SetResult(rt_reg(), FCSR_); + break; + case MFC1: ++ printf_instr("MFC1 \t %s: %016lx, %s: %016f\n", Registers::Name(rt_reg()), ++ rt(), FPURegisters::Name(fs_reg()), ++ get_fpu_register_float(fs_reg())); + set_register(rt_reg(), + static_cast(get_fpu_register_word(fs_reg()))); + TraceRegWr(get_register(rt_reg()), WORD_DWORD); + break; + case DMFC1: ++ printf_instr( ++ "DMFC1 \t %s: %016lx, %s: %016f\n", Registers::Name(rt_reg()), rt(), ++ FPURegisters::Name(fs_reg()), get_fpu_register_double(fs_reg())); + SetResult(rt_reg(), get_fpu_register(fs_reg())); + break; + case MFHC1: +@@ -3593,12 +3628,18 @@ void Simulator::DecodeTypeRegisterCOP1() { + break; + } + case MTC1: ++ printf_instr( ++ "MTC1 \t %s: %016f, %s: %016lx\n", FPURegisters::Name(fs_reg()), ++ get_fpu_register_float(fs_reg()), Registers::Name(rt_reg()), rt()); + // Hardware writes upper 32-bits to zero on mtc1. + set_fpu_register_hi_word(fs_reg(), 0); + set_fpu_register_word(fs_reg(), static_cast(rt())); + TraceRegWr(get_fpu_register(fs_reg()), FLOAT_DOUBLE); + break; + case DMTC1: ++ printf_instr( ++ "DMTC1 \t %s: %016f, %s: %016lx\n", FPURegisters::Name(fs_reg()), ++ get_fpu_register_float(fs_reg()), Registers::Name(rt_reg()), rt()); + SetFPUResult2(fs_reg(), rt()); + break; + case MTHC1: +@@ -3683,6 +3724,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + case JR: { + int64_t next_pc = rs(); + int64_t current_pc = get_pc(); ++ printf_instr("JALR\t %s: %016lx\n", Registers::Name(rs_reg()), rs()); + Instruction* branch_delay_instr = + reinterpret_cast(current_pc + kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); +@@ -3694,6 +3736,8 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + int64_t next_pc = rs(); + int64_t current_pc = get_pc(); + int32_t return_addr_reg = rd_reg(); ++ printf_instr("JALR\t %s: %016lx, %s: %016lx\n", Registers::Name(rd_reg()), ++ get_register(rd_reg()), Registers::Name(rs_reg()), rs()); + Instruction* branch_delay_instr = + reinterpret_cast(current_pc + kInstrSize); + BranchDelayInstructionDecode(branch_delay_instr); +@@ -3703,21 +3747,36 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + } + case SLL: ++ printf_instr("SLL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), static_cast(rt()) << sa()); + break; + case DSLL: ++ printf_instr("DSLL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() << sa()); + break; + case DSLL32: ++ printf_instr("DSLL32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() << sa() << 32); + break; + case SRL: + if (rs_reg() == 0) { ++ printf_instr("SRL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + // Sign-extend the 32-bit result. + alu_out = static_cast(static_cast(rt_u()) >> sa()); + } else if (rs_reg() == 1) { ++ printf_instr("ROTR\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. +@@ -3731,11 +3790,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case DSRL: + if (rs_reg() == 0) { ++ printf_instr("DSRL\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + // Sign-extend the 64-bit result. + alu_out = static_cast(rt_u() >> sa()); + } else if (rs_reg() == 1) { ++ printf_instr("DROTR\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. +@@ -3747,11 +3812,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case DSRL32: + if (rs_reg() == 0) { ++ printf_instr("DSRL32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + // Sign-extend the 64-bit result. + alu_out = static_cast(rt_u() >> sa() >> 32); + } else if (rs_reg() == 1) { ++ printf_instr("DROTR32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. +@@ -3763,26 +3834,51 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), alu_out); + break; + case SRA: ++ printf_instr("SRA\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), (int32_t)rt() >> sa()); + break; + case DSRA: ++ printf_instr("DSRA\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() >> sa()); + break; + case DSRA32: ++ printf_instr("DSRA32\t %s: %016lx, %s: %016lx, sa: %02x\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), sa()); + SetResult(rd_reg(), rt() >> sa() >> 32); + break; + case SLLV: ++ printf_instr("SLLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), (int32_t)rt() << rs()); + break; + case DSLLV: ++ printf_instr("DSLLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), rt() << rs()); + break; + case SRLV: + if (sa() == 0) { ++ printf_instr("SRLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Regular logical right-shift of a word by a variable number of + // bits instruction. SA field is always equal to 0. + alu_out = static_cast((uint32_t)rt_u() >> rs()); + } else { ++ printf_instr("ROTRV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Logical right-rotate of a word by a variable number of bits. + // This is special case od SRLV instruction, added in MIPS32 + // Release 2. SA field is equal to 00001. +@@ -3794,10 +3890,18 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case DSRLV: + if (sa() == 0) { ++ printf_instr("SRLV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Regular logical right-shift of a word by a variable number of + // bits instruction. SA field is always equal to 0. + alu_out = static_cast(rt_u() >> rs()); + } else { ++ printf_instr("DROTRV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + // Logical right-rotate of a word by a variable number of bits. + // This is special case od SRLV instruction, added in MIPS32 + // Release 2. SA field is equal to 00001. +@@ -3807,9 +3911,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), alu_out); + break; + case SRAV: ++ printf_instr("SRAV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), (int32_t)rt() >> rs()); + break; + case DSRAV: ++ printf_instr("DSRAV\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rt_reg()), rt(), Registers::Name(rs_reg()), ++ rs()); + SetResult(rd_reg(), rt() >> rs()); + break; + case LSA: { +@@ -4018,6 +4130,10 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case ADD: + case DADD: ++ printf_instr("DADD\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + if (HaveSameSign(rs(), rt())) { + if (rs() > 0) { + if (rs() > (Registers::kMaxValue - rt())) { +@@ -4032,16 +4148,28 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), rs() + rt()); + break; + case ADDU: { ++ printf_instr("ADDU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + int32_t alu32_out = static_cast(rs() + rt()); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rd_reg(), static_cast(alu32_out)); + break; + } + case DADDU: ++ printf_instr("DADDU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() + rt()); + break; + case SUB: + case DSUB: ++ printf_instr("DSUB\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + if (!HaveSameSign(rs(), rt())) { + if (rs() > 0) { + if (rs() > (Registers::kMaxValue + rt())) { +@@ -4056,30 +4184,62 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + SetResult(rd_reg(), rs() - rt()); + break; + case SUBU: { ++ printf_instr("SUBU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + int32_t alu32_out = static_cast(rs() - rt()); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rd_reg(), static_cast(alu32_out)); + break; + } + case DSUBU: ++ printf_instr("DSUBU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() - rt()); + break; + case AND: ++ printf_instr("AND\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() & rt()); + break; + case OR: ++ printf_instr("OR\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() | rt()); + break; + case XOR: ++ printf_instr("XOR\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() ^ rt()); + break; + case NOR: ++ printf_instr("NOR\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), ~(rs() | rt())); + break; + case SLT: ++ printf_instr("SLT\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs() < rt() ? 1 : 0); + break; + case SLTU: ++ printf_instr("SLTU\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + SetResult(rd_reg(), rs_u() < rt_u() ? 1 : 0); + break; + // Break and trap instructions. +@@ -4106,9 +4266,14 @@ void Simulator::DecodeTypeRegisterSPECIAL() { + break; + case SYNC: + // TODO(palfia): Ignore sync instruction for now. ++ printf_instr("sync\n"); + break; + // Conditional moves. + case MOVN: ++ printf_instr("MOVN\t %s: %016lx, %s: %016lx, %s: %016lx\n", ++ Registers::Name(rd_reg()), get_register(rd_reg()), ++ Registers::Name(rs_reg()), rs(), Registers::Name(rt_reg()), ++ rt()); + if (rt()) { + SetResult(rd_reg(), rs()); + } +@@ -4173,6 +4338,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() { + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa(); + uint16_t size = msbd + 1; ++ printf_instr("EXT\t %s: %016lx, %s: %016lx, pos: %d, size: %d\n", ++ Registers::Name(rt_reg()), get_register(rt_reg()), ++ Registers::Name(rs_reg()), rs(), lsb, size); + uint64_t mask = (1ULL << size) - 1; + alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); + SetResult(rt_reg(), alu_out); +@@ -4184,6 +4352,9 @@ void Simulator::DecodeTypeRegisterSPECIAL3() { + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa(); + uint16_t size = msbd + 1; ++ printf_instr("DEXT\t %s: %016lx, %s: %016lx, pos: %d, size: %d\n", ++ Registers::Name(rt_reg()), get_register(rt_reg()), ++ Registers::Name(rs_reg()), rs(), lsb, size); + uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1; + alu_out = static_cast((rs_u() & (mask << lsb)) >> lsb); + SetResult(rt_reg(), alu_out); +@@ -6553,6 +6724,7 @@ void Simulator::DecodeTypeImmediate() { + [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) { + execute_branch_delay_instruction = true; + int64_t current_pc = get_pc(); ++ printf_instr("Offs16: %04x\n", instr_.Imm16Value()); + set_register(31, current_pc + 2 * kInstrSize); + if (do_branch) { + int16_t imm16 = instr_.Imm16Value(); +@@ -6565,6 +6737,7 @@ void Simulator::DecodeTypeImmediate() { + auto BranchHelper = [this, &next_pc, + &execute_branch_delay_instruction](bool do_branch) { + execute_branch_delay_instruction = true; ++ printf_instr("Offs16: %04x\n", instr_.Imm16Value()); + int64_t current_pc = get_pc(); + if (do_branch) { + int16_t imm16 = instr_.Imm16Value(); +@@ -6601,6 +6774,7 @@ void Simulator::DecodeTypeImmediate() { + auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) { + int64_t current_pc = get_pc(); + CheckForbiddenSlot(current_pc); ++ printf_instr("Offs: %08x\n", instr_.ImmValue(bits)); + if (do_branch) { + int32_t imm = instr_.ImmValue(bits); + imm <<= 32 - bits; +@@ -6613,6 +6787,7 @@ void Simulator::DecodeTypeImmediate() { + auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) { + int64_t current_pc = get_pc(); + CheckForbiddenSlot(current_pc); ++ printf_instr("Offs: %08x\n", instr_.ImmValue(bits)); + if (do_branch) { + int32_t imm = instr_.ImmValue(bits); + imm <<= 32 - bits; +@@ -6707,15 +6882,19 @@ void Simulator::DecodeTypeImmediate() { + case REGIMM: + switch (instr_.RtFieldRaw()) { + case BLTZ: ++ printf_instr("BLTZ\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchHelper(rs < 0); + break; + case BGEZ: ++ printf_instr("BGEZ\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchHelper(rs >= 0); + break; + case BLTZAL: ++ printf_instr("BLTZAL\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs < 0); + break; + case BGEZAL: ++ printf_instr("BGEZAL\t %s: %016lx, ", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs >= 0); + break; + case DAHI: +@@ -6732,9 +6911,13 @@ void Simulator::DecodeTypeImmediate() { + // When comparing to zero, the encoding of rt field is always 0, so we don't + // need to replace rt with zero. + case BEQ: ++ printf_instr("BEQ\t %s: %016lx, %s: %016lx, ", Registers::Name(rs_reg), ++ rs, Registers::Name(rt_reg), rt); + BranchHelper(rs == rt); + break; + case BNE: ++ printf_instr("BNE\t %s: %016lx, %s: %016lx, ", Registers::Name(rs_reg), ++ rs, Registers::Name(rt_reg), rt); + BranchHelper(rs != rt); + break; + case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6) +@@ -6754,6 +6937,7 @@ void Simulator::DecodeTypeImmediate() { + BranchHelper(rs <= 0); + } + } else { // BLEZ ++ printf_instr("BLEZ\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchHelper(rs <= 0); + } + break; +@@ -6774,6 +6958,7 @@ void Simulator::DecodeTypeImmediate() { + BranchHelper(rs > 0); + } + } else { // BGTZ ++ printf_instr("BGTZ\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchHelper(rs > 0); + } + break; +@@ -6791,6 +6976,7 @@ void Simulator::DecodeTypeImmediate() { + } + } + } else { // BLEZL ++ printf_instr("BLEZL\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs <= 0); + } + break; +@@ -6808,6 +6994,7 @@ void Simulator::DecodeTypeImmediate() { + } + } + } else { // BGTZL ++ printf_instr("BGTZL\t %s: %016lx", Registers::Name(rs_reg), rs); + BranchAndLinkHelper(rs > 0); + } + break; +@@ -6846,6 +7033,9 @@ void Simulator::DecodeTypeImmediate() { + } + } + } else { // ADDI ++ printf_instr("ADDI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + if (HaveSameSign(rs, se_imm16)) { + if (rs > 0) { + if (rs <= Registers::kMaxValue - se_imm16) { +@@ -6876,27 +7066,48 @@ void Simulator::DecodeTypeImmediate() { + break; + // ------------- Arithmetic instructions. + case ADDIU: { ++ printf_instr("ADDIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + int32_t alu32_out = static_cast(rs + se_imm16); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rt_reg, static_cast(alu32_out)); + break; + } + case DADDIU: ++ printf_instr("DADDIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + SetResult(rt_reg, rs + se_imm16); + break; + case SLTI: ++ printf_instr("SLTI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + SetResult(rt_reg, rs < se_imm16 ? 1 : 0); + break; + case SLTIU: ++ printf_instr("SLTIU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + SetResult(rt_reg, rs_u < static_cast(se_imm16) ? 1 : 0); + break; + case ANDI: ++ printf_instr("ANDI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ oe_imm16); + SetResult(rt_reg, rs & oe_imm16); + break; + case ORI: ++ printf_instr("ORI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ oe_imm16); + SetResult(rt_reg, rs | oe_imm16); + break; + case XORI: ++ printf_instr("XORI\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ oe_imm16); + SetResult(rt_reg, rs ^ oe_imm16); + break; + case LUI: +@@ -6907,6 +7118,8 @@ void Simulator::DecodeTypeImmediate() { + SetResult(rt_reg, static_cast(alu32_out)); + } else { + // LUI instruction. ++ printf_instr("LUI\t %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, se_imm16); + int32_t alu32_out = static_cast(oe_imm16 << 16); + // Sign-extend result of 32bit operation into 64bit register. + SetResult(rt_reg, static_cast(alu32_out)); +@@ -6919,12 +7132,21 @@ void Simulator::DecodeTypeImmediate() { + break; + // ------------- Memory instructions. + case LB: ++ printf_instr("LB\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadB(rs + se_imm16)); + break; + case LH: ++ printf_instr("LH\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr())); + break; + case LWL: { ++ printf_instr("LWL\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + local_monitor_.NotifyLoad(); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; +@@ -6938,21 +7160,39 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LW: ++ printf_instr("LW\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr())); + break; + case LWU: ++ printf_instr("LWU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadWU(rs + se_imm16, instr_.instr())); + break; + case LD: ++ printf_instr("LD\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, Read2W(rs + se_imm16, instr_.instr())); + break; + case LBU: ++ printf_instr("LBU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadBU(rs + se_imm16)); + break; + case LHU: ++ printf_instr("LHU\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr())); + break; + case LWR: { ++ printf_instr("LWR\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint8_t byte_shift = kInt32AlignmentMask - al_offset; +@@ -6965,6 +7205,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LDL: { ++ printf_instr("LDL\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint8_t byte_shift = kInt64AlignmentMask - al_offset; +@@ -6977,6 +7220,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LDR: { ++ printf_instr("LDR\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint8_t byte_shift = kInt64AlignmentMask - al_offset; +@@ -6989,12 +7235,21 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SB: ++ printf_instr("SB\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + WriteB(rs + se_imm16, static_cast(rt)); + break; + case SH: ++ printf_instr("SH\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + WriteH(rs + se_imm16, static_cast(rt), instr_.instr()); + break; + case SWL: { ++ printf_instr("SWL\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint8_t byte_shift = kInt32AlignmentMask - al_offset; + uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0; +@@ -7005,12 +7260,21 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SW: ++ printf_instr("SW\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + WriteW(rs + se_imm16, static_cast(rt), instr_.instr()); + break; + case SD: ++ printf_instr("SD\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + Write2W(rs + se_imm16, rt, instr_.instr()); + break; + case SWR: { ++ printf_instr("SWR\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask; + uint32_t mask = (1 << al_offset * 8) - 1; + addr = rs + se_imm16 - al_offset; +@@ -7020,6 +7284,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SDL: { ++ printf_instr("SDL\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint8_t byte_shift = kInt64AlignmentMask - al_offset; + uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0; +@@ -7030,6 +7297,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SDR: { ++ printf_instr("SDR\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask; + uint64_t mask = (1UL << al_offset * 8) - 1; + addr = rs + se_imm16 - al_offset; +@@ -7055,6 +7325,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case LLD: { ++ printf_instr("LLD\t %s: %016lx, %s: %016lx, imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + DCHECK(kArchVariant != kMips64r6); + base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); + addr = rs + se_imm16; +@@ -7065,6 +7338,9 @@ void Simulator::DecodeTypeImmediate() { + break; + } + case SCD: { ++ printf_instr("SCD\t %s: %016lx, (%s: %016lx), imm16: %04lx\n", ++ Registers::Name(rt_reg), rt, Registers::Name(rs_reg), rs, ++ se_imm16); + DCHECK(kArchVariant != kMips64r6); + addr = rs + se_imm16; + WriteConditional2W(addr, rt, instr_.instr(), rt_reg); +@@ -7080,11 +7356,17 @@ void Simulator::DecodeTypeImmediate() { + TraceMemRd(addr, get_fpu_register(ft_reg), DOUBLE); + break; + case SWC1: { ++ printf_instr("SWC1\t %s: %016f, %s: %016lx, imm16: %04lx\n", ++ FPURegisters::Name(ft_reg), get_fpu_register_float(ft_reg), ++ Registers::Name(rs_reg), rs, se_imm16); + int32_t alu_out_32 = static_cast(get_fpu_register(ft_reg)); + WriteW(rs + se_imm16, alu_out_32, instr_.instr()); + break; + } + case SDC1: ++ printf_instr("SDC1\t %s: %016f, %s: %016lx, imm16: %04lx\n", ++ FPURegisters::Name(ft_reg), get_fpu_register_double(ft_reg), ++ Registers::Name(rs_reg), rs, se_imm16); + WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr()); + TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg), DWORD); + break; +@@ -7257,6 +7539,8 @@ void Simulator::DecodeTypeJump() { + int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000; + // Next pc. + int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2); ++ printf_instr("%s\t", simInstr.IsLinkingInstruction() ? "JAL" : "J"); ++ printf_instr("offs26: %x\n", instr_.Bits(25, 0)); + + // Execute branch delay slot. + // We don't check for end_sim_pc. First it should not be met as the current pc +@@ -7291,7 +7575,11 @@ void Simulator::InstructionDecode(Instruction* instr) { + dasm.InstructionDecode(buffer, reinterpret_cast(instr)); + } + ++ static int instr_count = 0; ++ USE(instr_count); + instr_ = instr; ++ printf_instr("\nInstr%3d: %08x, PC: %lx\t", instr_count++, instr_.Bits(31, 0), ++ get_pc()); + switch (instr_.InstructionType()) { + case Instruction::kRegisterType: + DecodeTypeRegister(); +diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h +index 58aa753a..abcc10d9 100644 +--- a/deps/v8/src/execution/simulator-base.h ++++ b/deps/v8/src/execution/simulator-base.h +@@ -87,7 +87,7 @@ class SimulatorBase { + static typename std::enable_if::value, intptr_t>::type + ConvertArg(T arg) { + static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize"); +-#if V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 + // The MIPS64 calling convention is to sign extend all values, even unsigned + // ones. + using signed_t = typename std::make_signed::type; +diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h +index a4e07b23..6d02114f 100644 +--- a/deps/v8/src/execution/simulator.h ++++ b/deps/v8/src/execution/simulator.h +@@ -24,6 +24,8 @@ + #include "src/execution/mips/simulator-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/execution/mips64/simulator-mips64.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/execution/loong64/simulator-loong64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/execution/s390/simulator-s390.h" + #else +diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h +index 30d5f091..618b31f3 100644 +--- a/deps/v8/src/flags/flag-definitions.h ++++ b/deps/v8/src/flags/flag-definitions.h +@@ -1270,7 +1270,7 @@ DEFINE_BOOL(check_icache, false, + "Check icache flushes in ARM and MIPS simulator") + DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions") + #if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \ +- defined(V8_TARGET_ARCH_PPC64) ++ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_LOONG64) + DEFINE_INT(sim_stack_alignment, 16, + "Stack alignment in bytes in simulator. This must be a power of two " + "and it must be at least 16. 16 is default.") +diff --git a/deps/v8/src/heap/cppgc/asm/loong64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/loong64/push_registers_asm.cc +new file mode 100644 +index 00000000..c9e6f5d2 +--- /dev/null ++++ b/deps/v8/src/heap/cppgc/asm/loong64/push_registers_asm.cc +@@ -0,0 +1,48 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Push all callee-saved registers to get them on the stack for conservative ++// stack scanning. ++// ++// See asm/x64/push_registers_clang.cc for why the function is not generated ++// using clang. ++// ++// Do not depend on V8_TARGET_OS_* defines as some embedders may override the ++// GN toolchain (e.g. ChromeOS) and not provide them. ++asm(".text \n" ++ ".global PushAllRegistersAndIterateStack \n" ++ ".type PushAllRegistersAndIterateStack, %function \n" ++ ".hidden PushAllRegistersAndIterateStack \n" ++ "PushAllRegistersAndIterateStack: \n" ++ // Push all callee-saved registers and save return address. ++ " addi.d $sp, $sp, -96 \n" ++ " st.d $ra, $sp, 88 \n" ++ " st.d $s8, $sp, 80 \n" ++ " st.d $sp, $sp, 72 \n" ++ " st.d $fp, $sp, 64 \n" ++ " st.d $s7, $sp, 56 \n" ++ " st.d $s6, $sp, 48 \n" ++ " st.d $s5, $sp, 40 \n" ++ " st.d $s4, $sp, 32 \n" ++ " st.d $s3, $sp, 24 \n" ++ " st.d $s2, $sp, 16 \n" ++ " st.d $s1, $sp, 8 \n" ++ " st.d $s0, $sp, 0 \n" ++ // Maintain frame pointer. ++ " addi.d $s8, $sp, 0 \n" ++ // Pass 1st parameter (a0) unchanged (Stack*). ++ // Pass 2nd parameter (a1) unchanged (StackVisitor*). ++ // Save 3rd parameter (a2; IterateStackCallback). ++ " addi.d $a3, $a2, 0 \n" ++ // Call the callback. ++ // Pass 3rd parameter as sp (stack pointer). ++ " addi.d $a2, $sp, 0 \n" ++ " jirl $ra, $a3, 0 \n" ++ // Load return address. ++ " ld.d $ra, $sp, 88 \n" ++ // Restore frame pointer. ++ " ld.d $s8, $sp, 80 \n" ++ // Discard all callee-saved registers. ++ " addi.d $sp, $sp, 96 \n" ++ " jirl $zero, $ra, 0 \n"); +diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc +index 49adee5b..313ab3f7 100644 +--- a/deps/v8/src/interpreter/interpreter-assembler.cc ++++ b/deps/v8/src/interpreter/interpreter-assembler.cc +@@ -1346,7 +1346,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(TNode target_bytecode) { + + // static + bool InterpreterAssembler::TargetSupportsUnalignedAccess() { +-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 + return false; + #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \ + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \ +diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc +index 0443657d..430e4693 100644 +--- a/deps/v8/src/libsampler/sampler.cc ++++ b/deps/v8/src/libsampler/sampler.cc +@@ -415,6 +415,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) { + state->pc = reinterpret_cast(mcontext.pc); + state->sp = reinterpret_cast(mcontext.gregs[29]); + state->fp = reinterpret_cast(mcontext.gregs[30]); ++#elif V8_HOST_ARCH_LOONG64 ++ state->pc = reinterpret_cast(mcontext.__pc); ++ state->sp = reinterpret_cast(mcontext.__gregs[3]); ++ state->fp = reinterpret_cast(mcontext.__gregs[22]); + #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 + #if V8_LIBC_GLIBC + state->pc = reinterpret_cast(ucontext->uc_mcontext.regs->nip); +diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc +index 00edcc8c..6a3133d8 100644 +--- a/deps/v8/src/logging/log.cc ++++ b/deps/v8/src/logging/log.cc +@@ -587,6 +587,8 @@ void LowLevelLogger::LogCodeInfo() { + const char arch[] = "ppc64"; + #elif V8_TARGET_ARCH_MIPS + const char arch[] = "mips"; ++#elif V8_TARGET_ARCH_LOONG64 ++ const char arch[] = "loong64"; + #elif V8_TARGET_ARCH_ARM64 + const char arch[] = "arm64"; + #elif V8_TARGET_ARCH_S390 +diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc +index bd9f39b7..46d31cf6 100644 +--- a/deps/v8/src/objects/backing-store.cc ++++ b/deps/v8/src/objects/backing-store.cc +@@ -29,7 +29,7 @@ constexpr bool kUseGuardRegions = true; + constexpr bool kUseGuardRegions = false; + #endif + +-#if V8_TARGET_ARCH_MIPS64 ++#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 + // MIPS64 has a user space of 2^40 bytes on most processors, + // address space limits needs to be smaller. + constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB +diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h +index ea6f52cc..7ba2beff 100644 +--- a/deps/v8/src/objects/code.h ++++ b/deps/v8/src/objects/code.h +@@ -421,6 +421,8 @@ class Code : public HeapObject { + static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28; + #elif V8_TARGET_ARCH_MIPS64 + static constexpr int kHeaderPaddingSize = 28; ++#elif V8_TARGET_ARCH_LOONG64 ++ static constexpr int kHeaderPaddingSize = 28; + #elif V8_TARGET_ARCH_X64 + static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28; + #elif V8_TARGET_ARCH_ARM +diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc +index 00bff91c..a95f3a74 100644 +--- a/deps/v8/src/profiler/tick-sample.cc ++++ b/deps/v8/src/profiler/tick-sample.cc +@@ -104,7 +104,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate, + state->sp = reinterpret_cast(simulator->sp()); + state->fp = reinterpret_cast(simulator->fp()); + state->lr = reinterpret_cast(simulator->lr()); +-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 + if (!simulator->has_bad_pc()) { + state->pc = reinterpret_cast(simulator->get_pc()); + } +diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc +new file mode 100644 +index 00000000..22b40fde +--- /dev/null ++++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc +@@ -0,0 +1,1266 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#if V8_TARGET_ARCH_LOONG64 ++ ++#include "src/regexp/loong64/regexp-macro-assembler-loong64.h" ++ ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/logging/log.h" ++#include "src/objects/objects-inl.h" ++#include "src/regexp/regexp-macro-assembler.h" ++#include "src/regexp/regexp-stack.h" ++#include "src/snapshot/embedded/embedded-data.h" ++#include "src/strings/unicode.h" ++ ++namespace v8 { ++namespace internal { ++ ++/* clang-format off ++ * ++ * This assembler uses the following register assignment convention ++ * - t3 : Temporarily stores the index of capture start after a matching pass ++ * for a global regexp. ++ * - a5 : Pointer to current Code object including heap object tag. ++ * - a6 : Current position in input, as negative offset from end of string. ++ * Please notice that this is the byte offset, not the character offset! ++ * - a7 : Currently loaded character. Must be loaded using ++ * LoadCurrentCharacter before using any of the dispatch methods. ++ * - t0 : Points to tip of backtrack stack ++ * - t1 : Unused. ++ * - t2 : End of input (points to byte after last character in input). ++ * - fp : Frame pointer. Used to access arguments, local variables and ++ * RegExp registers. ++ * - sp : Points to tip of C stack. ++ * ++ * The remaining registers are free for computations. ++ * Each call to a public method should retain this convention. ++ * ++ * TODO(plind): O32 documented here with intent of having single 32/64 codebase ++ * in the future. ++ * ++ * The O32 stack will have the following structure: ++ * ++ * - fp[72] Isolate* isolate (address of the current isolate) ++ * - fp[68] direct_call (if 1, direct call from JavaScript code, ++ * if 0, call through the runtime system). ++ * - fp[64] stack_area_base (High end of the memory area to use as ++ * backtracking stack). ++ * - fp[60] capture array size (may fit multiple sets of matches) ++ * - fp[44..59] MIPS O32 four argument slots ++ * - fp[40] int* capture_array (int[num_saved_registers_], for output). ++ * --- sp when called --- ++ * - fp[36] return address (lr). ++ * - fp[32] old frame pointer (r11). ++ * - fp[0..31] backup of registers s0..s7. ++ * --- frame pointer ---- ++ * - fp[-4] end of input (address of end of string). ++ * - fp[-8] start of input (address of first character in string). ++ * - fp[-12] start index (character index of start). ++ * - fp[-16] void* input_string (location of a handle containing the string). ++ * - fp[-20] success counter (only for global regexps to count matches). ++ * - fp[-24] Offset of location before start of input (effectively character ++ * string start - 1). Used to initialize capture registers to a ++ * non-position. ++ * - fp[-28] At start (if 1, we are starting at the start of the ++ * string, otherwise 0) ++ * - fp[-32] register 0 (Only positions must be stored in the first ++ * - register 1 num_saved_registers_ registers) ++ * - ... ++ * - register num_registers-1 ++ * --- sp --- ++ * ++ * ++ * The N64 stack will have the following structure: ++ * ++ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate ++ * kStackFrameHeader ++ * --- sp when called --- ++ * - fp[72] ra Return from RegExp code (ra). kReturnAddress ++ * - fp[64] s9, old-fp Old fp, callee saved(s9). ++ * - fp[0..63] s0..s7 Callee-saved registers s0..s7. ++ * --- frame pointer ---- ++ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall ++ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd ++ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters ++ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput ++ * - fp[-40] end of input (address of end of string). kInputEnd ++ * - fp[-48] start of input (address of first character in string). kInputStart ++ * - fp[-56] start index (character index of start). kStartIndex ++ * - fp[-64] void* input_string (location of a handle containing the string). kInputString ++ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures ++ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne ++ * position -1). Used to initialize capture registers to a ++ * non-position. ++ * --------- The following output registers are 32-bit values. --------- ++ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero ++ * - register 1 num_saved_registers_ registers) ++ * - ... ++ * - register num_registers-1 ++ * --- sp --- ++ * ++ * The first num_saved_registers_ registers are initialized to point to ++ * "character -1" in the string (i.e., char_size() bytes before the first ++ * character of the string). The remaining registers start out as garbage. ++ * ++ * The data up to the return address must be placed there by the calling ++ * code and the remaining arguments are passed in registers, e.g. by calling the ++ * code entry as cast to a function with the signature: ++ * int (*match)(String input_string, ++ * int start_index, ++ * Address start, ++ * Address end, ++ * int* capture_output_array, ++ * int num_capture_registers, ++ * byte* stack_area_base, ++ * bool direct_call = false, ++ * Isolate* isolate); ++ * The call is performed by NativeRegExpMacroAssembler::Execute() ++ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper. ++ * ++ * clang-format on ++ */ ++ ++#define __ ACCESS_MASM(masm_) ++ ++const int RegExpMacroAssemblerLOONG64::kRegExpCodeSize; ++ ++RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, ++ Mode mode, ++ int registers_to_save) ++ : NativeRegExpMacroAssembler(isolate, zone), ++ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes, ++ NewAssemblerBuffer(kRegExpCodeSize))), ++ mode_(mode), ++ num_registers_(registers_to_save), ++ num_saved_registers_(registers_to_save), ++ entry_label_(), ++ start_label_(), ++ success_label_(), ++ backtrack_label_(), ++ exit_label_(), ++ internal_failure_label_() { ++ masm_->set_root_array_available(false); ++ ++ DCHECK_EQ(0, registers_to_save % 2); ++ __ jmp(&entry_label_); // We'll write the entry code later. ++ // If the code gets too big or corrupted, an internal exception will be ++ // raised, and we will exit right away. ++ __ bind(&internal_failure_label_); ++ __ li(a0, Operand(FAILURE)); ++ __ Ret(); ++ __ bind(&start_label_); // And then continue from here. ++} ++ ++RegExpMacroAssemblerLOONG64::~RegExpMacroAssemblerLOONG64() { ++ delete masm_; ++ // Unuse labels in case we throw away the assembler without calling GetCode. ++ entry_label_.Unuse(); ++ start_label_.Unuse(); ++ success_label_.Unuse(); ++ backtrack_label_.Unuse(); ++ exit_label_.Unuse(); ++ check_preempt_label_.Unuse(); ++ stack_overflow_label_.Unuse(); ++ internal_failure_label_.Unuse(); ++} ++ ++int RegExpMacroAssemblerLOONG64::stack_limit_slack() { ++ return RegExpStack::kStackLimitSlack; ++} ++ ++void RegExpMacroAssemblerLOONG64::AdvanceCurrentPosition(int by) { ++ if (by != 0) { ++ __ Add_d(current_input_offset(), current_input_offset(), ++ Operand(by * char_size())); ++ } ++} ++ ++void RegExpMacroAssemblerLOONG64::AdvanceRegister(int reg, int by) { ++ DCHECK_LE(0, reg); ++ DCHECK_GT(num_registers_, reg); ++ if (by != 0) { ++ __ Ld_d(a0, register_location(reg)); ++ __ Add_d(a0, a0, Operand(by)); ++ __ St_d(a0, register_location(reg)); ++ } ++} ++ ++void RegExpMacroAssemblerLOONG64::Backtrack() { ++ CheckPreemption(); ++ if (has_backtrack_limit()) { ++ Label next; ++ __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCount)); ++ __ Add_d(a0, a0, Operand(1)); ++ __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCount)); ++ __ Branch(&next, ne, a0, Operand(backtrack_limit())); ++ ++ // Exceeded limits are treated as a failed match. ++ Fail(); ++ ++ __ bind(&next); ++ } ++ // Pop Code offset from backtrack stack, add Code and jump to location. ++ Pop(a0); ++ __ Add_d(a0, a0, code_pointer()); ++ __ Jump(a0); ++} ++ ++void RegExpMacroAssemblerLOONG64::Bind(Label* label) { __ bind(label); } ++ ++void RegExpMacroAssemblerLOONG64::CheckCharacter(uint32_t c, Label* on_equal) { ++ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckCharacterGT(uc16 limit, Label* on_greater) { ++ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckAtStart(int cp_offset, Label* on_at_start) { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(a0, current_input_offset(), ++ Operand(-char_size() + cp_offset * char_size())); ++ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckNotAtStart(int cp_offset, ++ Label* on_not_at_start) { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(a0, current_input_offset(), ++ Operand(-char_size() + cp_offset * char_size())); ++ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckCharacterLT(uc16 limit, Label* on_less) { ++ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckGreedyLoop(Label* on_equal) { ++ Label backtrack_non_equal; ++ __ Ld_w(a0, MemOperand(backtrack_stackpointer(), 0)); ++ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0)); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), ++ Operand(kIntSize)); ++ __ bind(&backtrack_non_equal); ++ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase( ++ int start_reg, bool read_backward, Label* on_no_match) { ++ Label fallthrough; ++ __ Ld_d(a0, register_location(start_reg)); // Index of start of capture. ++ __ Ld_d(a1, register_location(start_reg + 1)); // Index of end of capture. ++ __ Sub_d(a1, a1, a0); // Length of capture. ++ ++ // At this point, the capture registers are either both set or both cleared. ++ // If the capture length is zero, then the capture is either empty or cleared. ++ // Fall through in both cases. ++ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); ++ ++ if (read_backward) { ++ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(t1, t1, a1); ++ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); ++ } else { ++ __ Add_d(t1, a1, current_input_offset()); ++ // Check that there are enough characters left in the input. ++ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); ++ } ++ ++ if (mode_ == LATIN1) { ++ Label success; ++ Label fail; ++ Label loop_check; ++ ++ // a0 - offset of start of capture. ++ // a1 - length of capture. ++ __ Add_d(a0, a0, Operand(end_of_input_address())); ++ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset())); ++ if (read_backward) { ++ __ Sub_d(a2, a2, Operand(a1)); ++ } ++ __ Add_d(a1, a0, Operand(a1)); ++ ++ // a0 - Address of start of capture. ++ // a1 - Address of end of capture. ++ // a2 - Address of current input position. ++ ++ Label loop; ++ __ bind(&loop); ++ __ Ld_bu(a3, MemOperand(a0, 0)); ++ __ addi_d(a0, a0, char_size()); ++ __ Ld_bu(a4, MemOperand(a2, 0)); ++ __ addi_d(a2, a2, char_size()); ++ ++ __ Branch(&loop_check, eq, a4, Operand(a3)); ++ ++ // Mismatch, try case-insensitive match (converting letters to lower-case). ++ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case. ++ __ Or(a4, a4, Operand(0x20)); // Also convert input character. ++ __ Branch(&fail, ne, a4, Operand(a3)); ++ __ Sub_d(a3, a3, Operand('a')); ++ __ Branch(&loop_check, ls, a3, Operand('z' - 'a')); ++ // Latin-1: Check for values in range [224,254] but not 247. ++ __ Sub_d(a3, a3, Operand(224 - 'a')); ++ // Weren't Latin-1 letters. ++ __ Branch(&fail, hi, a3, Operand(254 - 224)); ++ // Check for 247. ++ __ Branch(&fail, eq, a3, Operand(247 - 224)); ++ ++ __ bind(&loop_check); ++ __ Branch(&loop, lt, a0, Operand(a1)); ++ __ jmp(&success); ++ ++ __ bind(&fail); ++ GoTo(on_no_match); ++ ++ __ bind(&success); ++ // Compute new value of character position after the matched part. ++ __ Sub_d(current_input_offset(), a2, end_of_input_address()); ++ if (read_backward) { ++ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture. ++ __ Ld_d(a2, ++ register_location(start_reg + 1)); // Index of end of capture. ++ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1)); ++ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2)); ++ } ++ } else { ++ DCHECK(mode_ == UC16); ++ // Put regexp engine registers on stack. ++ RegList regexp_registers_to_retain = current_input_offset().bit() | ++ current_character().bit() | ++ backtrack_stackpointer().bit(); ++ __ MultiPush(regexp_registers_to_retain); ++ ++ int argument_count = 4; ++ __ PrepareCallCFunction(argument_count, a2); ++ ++ // a0 - offset of start of capture. ++ // a1 - length of capture. ++ ++ // Put arguments into arguments registers. ++ // Parameters are ++ // a0: Address byte_offset1 - Address captured substring's start. ++ // a1: Address byte_offset2 - Address of current character position. ++ // a2: size_t byte_length - length of capture in bytes(!). ++ // a3: Isolate* isolate. ++ ++ // Address of start of capture. ++ __ Add_d(a0, a0, Operand(end_of_input_address())); ++ // Length of capture. ++ __ mov(a2, a1); ++ // Save length in callee-save register for use on return. ++ __ mov(s3, a1); ++ // Address of current input position. ++ __ Add_d(a1, current_input_offset(), Operand(end_of_input_address())); ++ if (read_backward) { ++ __ Sub_d(a1, a1, Operand(s3)); ++ } ++ // Isolate. ++ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate()))); ++ ++ { ++ AllowExternalCallThatCantCauseGC scope(masm_); ++ ExternalReference function = ++ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); ++ __ CallCFunction(function, argument_count); ++ } ++ ++ // Restore regexp engine registers. ++ __ MultiPop(regexp_registers_to_retain); ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ ++ // Check if function returned non-zero for success or zero for failure. ++ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); ++ // On success, increment position by length of capture. ++ if (read_backward) { ++ __ Sub_d(current_input_offset(), current_input_offset(), Operand(s3)); ++ } else { ++ __ Add_d(current_input_offset(), current_input_offset(), Operand(s3)); ++ } ++ } ++ ++ __ bind(&fallthrough); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckNotBackReference(int start_reg, ++ bool read_backward, ++ Label* on_no_match) { ++ Label fallthrough; ++ ++ // Find length of back-referenced capture. ++ __ Ld_d(a0, register_location(start_reg)); ++ __ Ld_d(a1, register_location(start_reg + 1)); ++ __ Sub_d(a1, a1, a0); // Length to check. ++ ++ // At this point, the capture registers are either both set or both cleared. ++ // If the capture length is zero, then the capture is either empty or cleared. ++ // Fall through in both cases. ++ __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); ++ ++ if (read_backward) { ++ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(t1, t1, a1); ++ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1)); ++ } else { ++ __ Add_d(t1, a1, current_input_offset()); ++ // Check that there are enough characters left in the input. ++ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg)); ++ } ++ ++ // Compute pointers to match string and capture string. ++ __ Add_d(a0, a0, Operand(end_of_input_address())); ++ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset())); ++ if (read_backward) { ++ __ Sub_d(a2, a2, Operand(a1)); ++ } ++ __ Add_d(a1, a1, Operand(a0)); ++ ++ Label loop; ++ __ bind(&loop); ++ if (mode_ == LATIN1) { ++ __ Ld_bu(a3, MemOperand(a0, 0)); ++ __ addi_d(a0, a0, char_size()); ++ __ Ld_bu(a4, MemOperand(a2, 0)); ++ __ addi_d(a2, a2, char_size()); ++ } else { ++ DCHECK(mode_ == UC16); ++ __ Ld_hu(a3, MemOperand(a0, 0)); ++ __ addi_d(a0, a0, char_size()); ++ __ Ld_hu(a4, MemOperand(a2, 0)); ++ __ addi_d(a2, a2, char_size()); ++ } ++ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4)); ++ __ Branch(&loop, lt, a0, Operand(a1)); ++ ++ // Move current character position to position after match. ++ __ Sub_d(current_input_offset(), a2, end_of_input_address()); ++ if (read_backward) { ++ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture. ++ __ Ld_d(a2, register_location(start_reg + 1)); // Index of end of capture. ++ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1)); ++ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2)); ++ } ++ __ bind(&fallthrough); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckNotCharacter(uint32_t c, ++ Label* on_not_equal) { ++ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckCharacterAfterAnd(uint32_t c, uint32_t mask, ++ Label* on_equal) { ++ __ And(a0, current_character(), Operand(mask)); ++ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); ++ BranchOrBacktrack(on_equal, eq, a0, rhs); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterAnd(uint32_t c, ++ uint32_t mask, ++ Label* on_not_equal) { ++ __ And(a0, current_character(), Operand(mask)); ++ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); ++ BranchOrBacktrack(on_not_equal, ne, a0, rhs); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterMinusAnd( ++ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) { ++ DCHECK_GT(String::kMaxUtf16CodeUnit, minus); ++ __ Sub_d(a0, current_character(), Operand(minus)); ++ __ And(a0, a0, Operand(mask)); ++ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckCharacterInRange(uc16 from, uc16 to, ++ Label* on_in_range) { ++ __ Sub_d(a0, current_character(), Operand(from)); ++ // Unsigned lower-or-same condition. ++ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckCharacterNotInRange( ++ uc16 from, uc16 to, Label* on_not_in_range) { ++ __ Sub_d(a0, current_character(), Operand(from)); ++ // Unsigned higher condition. ++ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckBitInTable(Handle table, ++ Label* on_bit_set) { ++ __ li(a0, Operand(table)); ++ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) { ++ __ And(a1, current_character(), Operand(kTableSize - 1)); ++ __ Add_d(a0, a0, a1); ++ } else { ++ __ Add_d(a0, a0, current_character()); ++ } ++ ++ __ Ld_bu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize)); ++ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg)); ++} ++ ++bool RegExpMacroAssemblerLOONG64::CheckSpecialCharacterClass(uc16 type, ++ Label* on_no_match) { ++ // Range checks (c in min..max) are generally implemented by an unsigned ++ // (c - min) <= (max - min) check. ++ switch (type) { ++ case 's': ++ // Match space-characters. ++ if (mode_ == LATIN1) { ++ // One byte space characters are '\t'..'\r', ' ' and \u00a0. ++ Label success; ++ __ Branch(&success, eq, current_character(), Operand(' ')); ++ // Check range 0x09..0x0D. ++ __ Sub_d(a0, current_character(), Operand('\t')); ++ __ Branch(&success, ls, a0, Operand('\r' - '\t')); ++ // \u00a0 (NBSP). ++ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t')); ++ __ bind(&success); ++ return true; ++ } ++ return false; ++ case 'S': ++ // The emitted code for generic character classes is good enough. ++ return false; ++ case 'd': ++ // Match Latin1 digits ('0'..'9'). ++ __ Sub_d(a0, current_character(), Operand('0')); ++ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0')); ++ return true; ++ case 'D': ++ // Match non Latin1-digits. ++ __ Sub_d(a0, current_character(), Operand('0')); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0')); ++ return true; ++ case '.': { ++ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). ++ __ Xor(a0, current_character(), Operand(0x01)); ++ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. ++ __ Sub_d(a0, a0, Operand(0x0B)); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B)); ++ if (mode_ == UC16) { ++ // Compare original value to 0x2028 and 0x2029, using the already ++ // computed (current_char ^ 0x01 - 0x0B). I.e., check for ++ // 0x201D (0x2028 - 0x0B) or 0x201E. ++ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B)); ++ BranchOrBacktrack(on_no_match, ls, a0, Operand(1)); ++ } ++ return true; ++ } ++ case 'n': { ++ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). ++ __ Xor(a0, current_character(), Operand(0x01)); ++ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. ++ __ Sub_d(a0, a0, Operand(0x0B)); ++ if (mode_ == LATIN1) { ++ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B)); ++ } else { ++ Label done; ++ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B)); ++ // Compare original value to 0x2028 and 0x2029, using the already ++ // computed (current_char ^ 0x01 - 0x0B). I.e., check for ++ // 0x201D (0x2028 - 0x0B) or 0x201E. ++ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B)); ++ BranchOrBacktrack(on_no_match, hi, a0, Operand(1)); ++ __ bind(&done); ++ } ++ return true; ++ } ++ case 'w': { ++ if (mode_ != LATIN1) { ++ // Table is 256 entries, so all Latin1 characters can be tested. ++ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z')); ++ } ++ ExternalReference map = ++ ExternalReference::re_word_character_map(isolate()); ++ __ li(a0, Operand(map)); ++ __ Add_d(a0, a0, current_character()); ++ __ Ld_bu(a0, MemOperand(a0, 0)); ++ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); ++ return true; ++ } ++ case 'W': { ++ Label done; ++ if (mode_ != LATIN1) { ++ // Table is 256 entries, so all Latin1 characters can be tested. ++ __ Branch(&done, hi, current_character(), Operand('z')); ++ } ++ ExternalReference map = ++ ExternalReference::re_word_character_map(isolate()); ++ __ li(a0, Operand(map)); ++ __ Add_d(a0, a0, current_character()); ++ __ Ld_bu(a0, MemOperand(a0, 0)); ++ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg)); ++ if (mode_ != LATIN1) { ++ __ bind(&done); ++ } ++ return true; ++ } ++ case '*': ++ // Match any character. ++ return true; ++ // No custom implementation (yet): s(UC16), S(UC16). ++ default: ++ return false; ++ } ++} ++ ++void RegExpMacroAssemblerLOONG64::Fail() { ++ __ li(a0, Operand(FAILURE)); ++ __ jmp(&exit_label_); ++} ++ ++Handle RegExpMacroAssemblerLOONG64::GetCode(Handle source) { ++ Label return_v0; ++ if (0 /* todo masm_->has_exception()*/) { ++ // If the code gets corrupted due to long regular expressions and lack of ++ // space on trampolines, an internal exception flag is set. If this case ++ // is detected, we will jump into exit sequence right away. ++ //__ bind_to(&entry_label_, internal_failure_label_.pos()); ++ } else { ++ // Finalize code - write the entry point code now we know how many ++ // registers we need. ++ ++ // Entry code: ++ __ bind(&entry_label_); ++ ++ // Tell the system that we have a stack frame. Because the type is MANUAL, ++ // no is generated. ++ FrameScope scope(masm_, StackFrame::MANUAL); ++ ++ // Actually emit code to start a new stack frame. ++ // Push arguments ++ // Save callee-save registers. ++ // Start new stack frame. ++ // Store link register in existing stack-cell. ++ // Order here should correspond to order of offset constants in header file. ++ // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs ++ // or dont save. ++ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | s3.bit() | ++ s4.bit() | s5.bit() | s6.bit() | s7.bit(); ++ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit(); ++ ++ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit(); ++ ++ __ MultiPush(ra.bit(), fp.bit(), argument_registers | registers_to_retain); ++ // Set frame pointer in space for it if this is not a direct call ++ // from generated code. ++ // TODO(plind): this 8 is the # of argument regs, should have definition. ++ __ Add_d(frame_pointer(), sp, Operand(8 * kPointerSize)); ++ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize); ++ __ mov(a0, zero_reg); ++ __ push(a0); // Make room for success counter and initialize it to 0. ++ STATIC_ASSERT(kStringStartMinusOne == ++ kSuccessfulCaptures - kSystemPointerSize); ++ __ push(a0); // Make room for "string start - 1" constant. ++ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize); ++ __ push(a0); // The backtrack counter ++ ++ // Check if we have space on the stack for registers. ++ Label stack_limit_hit; ++ Label stack_ok; ++ ++ ExternalReference stack_limit = ++ ExternalReference::address_of_jslimit(masm_->isolate()); ++ __ li(a0, Operand(stack_limit)); ++ __ Ld_d(a0, MemOperand(a0, 0)); ++ __ Sub_d(a0, sp, a0); ++ // Handle it if the stack pointer is already below the stack limit. ++ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg)); ++ // Check if there is room for the variable number of registers above ++ // the stack limit. ++ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize)); ++ // Exit with OutOfMemory exception. There is not enough space on the stack ++ // for our working registers. ++ __ li(a0, Operand(EXCEPTION)); ++ __ jmp(&return_v0); ++ ++ __ bind(&stack_limit_hit); ++ CallCheckStackGuardState(a0); ++ // If returned value is non-zero, we exit with the returned value as result. ++ __ Branch(&return_v0, ne, a0, Operand(zero_reg)); ++ ++ __ bind(&stack_ok); ++ // Allocate space on stack for registers. ++ __ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize)); ++ // Load string end. ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ // Load input start. ++ __ Ld_d(a0, MemOperand(frame_pointer(), kInputStart)); ++ // Find negative length (offset of start relative to end). ++ __ Sub_d(current_input_offset(), a0, end_of_input_address()); ++ // Set a0 to address of char before start of the input string ++ // (effectively string position -1). ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndex)); ++ __ Sub_d(a0, current_input_offset(), Operand(char_size())); ++ __ slli_d(t1, a1, (mode_ == UC16) ? 1 : 0); ++ __ Sub_d(a0, a0, t1); ++ // Store this value in a local variable, for use when clearing ++ // position registers. ++ __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ ++ // Initialize code pointer register ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ ++ Label load_char_start_regexp, start_regexp; ++ // Load newline if index is at start, previous character otherwise. ++ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); ++ __ li(current_character(), Operand('\n')); ++ __ jmp(&start_regexp); ++ ++ // Global regexp restarts matching here. ++ __ bind(&load_char_start_regexp); ++ // Load previous char as initial value of current character register. ++ LoadCurrentCharacterUnchecked(-1, 1); ++ __ bind(&start_regexp); ++ ++ // Initialize on-stack registers. ++ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. ++ // Fill saved registers with initial value = start offset - 1. ++ if (num_saved_registers_ > 8) { ++ // Address of register 0. ++ __ Add_d(a1, frame_pointer(), Operand(kRegisterZero)); ++ __ li(a2, Operand(num_saved_registers_)); ++ Label init_loop; ++ __ bind(&init_loop); ++ __ St_d(a0, MemOperand(a1, 0)); ++ __ Add_d(a1, a1, Operand(-kPointerSize)); ++ __ Sub_d(a2, a2, Operand(1)); ++ __ Branch(&init_loop, ne, a2, Operand(zero_reg)); ++ } else { ++ for (int i = 0; i < num_saved_registers_; i++) { ++ __ St_d(a0, register_location(i)); ++ } ++ } ++ } ++ ++ // Initialize backtrack stack pointer. ++ __ Ld_d(backtrack_stackpointer(), ++ MemOperand(frame_pointer(), kStackHighEnd)); ++ ++ __ jmp(&start_label_); ++ ++ // Exit code: ++ if (success_label_.is_linked()) { ++ // Save captures when successful. ++ __ bind(&success_label_); ++ if (num_saved_registers_ > 0) { ++ // Copy captures to output. ++ __ Ld_d(a1, MemOperand(frame_pointer(), kInputStart)); ++ __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutput)); ++ __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndex)); ++ __ Sub_d(a1, end_of_input_address(), a1); ++ // a1 is length of input in bytes. ++ if (mode_ == UC16) { ++ __ srli_d(a1, a1, 1); ++ } ++ // a1 is length of input in characters. ++ __ Add_d(a1, a1, Operand(a2)); ++ // a1 is length of string in characters. ++ ++ DCHECK_EQ(0, num_saved_registers_ % 2); ++ // Always an even number of capture registers. This allows us to ++ // unroll the loop once to add an operation between a load of a register ++ // and the following use of that register. ++ for (int i = 0; i < num_saved_registers_; i += 2) { ++ __ Ld_d(a2, register_location(i)); ++ __ Ld_d(a3, register_location(i + 1)); ++ if (i == 0 && global_with_zero_length_check()) { ++ // Keep capture start in a4 for the zero-length check later. ++ __ mov(t3, a2); ++ } ++ if (mode_ == UC16) { ++ __ srai_d(a2, a2, 1); ++ __ Add_d(a2, a2, a1); ++ __ srai_d(a3, a3, 1); ++ __ Add_d(a3, a3, a1); ++ } else { ++ __ Add_d(a2, a1, Operand(a2)); ++ __ Add_d(a3, a1, Operand(a3)); ++ } ++ // V8 expects the output to be an int32_t array. ++ __ St_w(a2, MemOperand(a0, 0)); ++ __ Add_d(a0, a0, kIntSize); ++ __ St_w(a3, MemOperand(a0, 0)); ++ __ Add_d(a0, a0, kIntSize); ++ } ++ } ++ ++ if (global()) { ++ // Restart matching if the regular expression is flagged as global. ++ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); ++ __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutput)); ++ // Increment success counter. ++ __ Add_d(a0, a0, 1); ++ __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ // Capture results have been stored, so the number of remaining global ++ // output registers is reduced by the number of stored captures. ++ __ Sub_d(a1, a1, num_saved_registers_); ++ // Check whether we have enough room for another set of capture results. ++ //__ mov(v0, a0); ++ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_)); ++ ++ __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); ++ // Advance the location for output. ++ __ Add_d(a2, a2, num_saved_registers_ * kIntSize); ++ __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutput)); ++ ++ // Prepare a0 to initialize registers with its value in the next run. ++ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ ++ if (global_with_zero_length_check()) { ++ // Special case for zero-length matches. ++ // t3: capture start index ++ // Not a zero-length match, restart. ++ __ Branch(&load_char_start_regexp, ne, current_input_offset(), ++ Operand(t3)); ++ // Offset from the end is zero if we already reached the end. ++ __ Branch(&exit_label_, eq, current_input_offset(), ++ Operand(zero_reg)); ++ // Advance current position after a zero-length match. ++ Label advance; ++ __ bind(&advance); ++ __ Add_d(current_input_offset(), current_input_offset(), ++ Operand((mode_ == UC16) ? 2 : 1)); ++ if (global_unicode()) CheckNotInSurrogatePair(0, &advance); ++ } ++ ++ __ Branch(&load_char_start_regexp); ++ } else { ++ __ li(a0, Operand(SUCCESS)); ++ } ++ } ++ // Exit and return v0. ++ __ bind(&exit_label_); ++ if (global()) { ++ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); ++ } ++ ++ __ bind(&return_v0); ++ // Skip sp past regexp registers and local variables.. ++ __ mov(sp, frame_pointer()); ++ // Restore registers s0..s7 and return (restoring ra to pc). ++ __ MultiPop(ra.bit(), fp.bit(), registers_to_retain); ++ __ Ret(); ++ ++ // Backtrack code (branch target for conditional backtracks). ++ if (backtrack_label_.is_linked()) { ++ __ bind(&backtrack_label_); ++ Backtrack(); ++ } ++ ++ Label exit_with_exception; ++ ++ // Preempt-code. ++ if (check_preempt_label_.is_linked()) { ++ SafeCallTarget(&check_preempt_label_); ++ // Put regexp engine registers on stack. ++ RegList regexp_registers_to_retain = current_input_offset().bit() | ++ current_character().bit() | ++ backtrack_stackpointer().bit(); ++ __ MultiPush(regexp_registers_to_retain); ++ CallCheckStackGuardState(a0); ++ __ MultiPop(regexp_registers_to_retain); ++ // If returning non-zero, we should end execution with the given ++ // result as return value. ++ __ Branch(&return_v0, ne, a0, Operand(zero_reg)); ++ ++ // String might have moved: Reload end of string from frame. ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ SafeReturn(); ++ } ++ ++ // Backtrack stack overflow code. ++ if (stack_overflow_label_.is_linked()) { ++ SafeCallTarget(&stack_overflow_label_); ++ // Reached if the backtrack-stack limit has been hit. ++ // Put regexp engine registers on stack first. ++ RegList regexp_registers = ++ current_input_offset().bit() | current_character().bit(); ++ __ MultiPush(regexp_registers); ++ ++ // Call GrowStack(backtrack_stackpointer(), &stack_base) ++ static const int num_arguments = 3; ++ __ PrepareCallCFunction(num_arguments, a0); ++ __ mov(a0, backtrack_stackpointer()); ++ __ Add_d(a1, frame_pointer(), Operand(kStackHighEnd)); ++ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate()))); ++ ExternalReference grow_stack = ++ ExternalReference::re_grow_stack(masm_->isolate()); ++ __ CallCFunction(grow_stack, num_arguments); ++ // Restore regexp registers. ++ __ MultiPop(regexp_registers); ++ // If return nullptr, we have failed to grow the stack, and ++ // must exit with a stack-overflow exception. ++ __ Branch(&exit_with_exception, eq, a0, Operand(zero_reg)); ++ // Otherwise use return value as new stack pointer. ++ __ mov(backtrack_stackpointer(), a0); ++ // Restore saved registers and continue. ++ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); ++ SafeReturn(); ++ } ++ ++ if (exit_with_exception.is_linked()) { ++ // If any of the code above needed to exit with an exception. ++ __ bind(&exit_with_exception); ++ // Exit with Result EXCEPTION(-1) to signal thrown exception. ++ __ li(a0, Operand(EXCEPTION)); ++ __ jmp(&return_v0); ++ } ++ } ++ ++ CodeDesc code_desc; ++ masm_->GetCode(isolate(), &code_desc); ++ Handle code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP) ++ .set_self_reference(masm_->CodeObject()) ++ .Build(); ++ LOG(masm_->isolate(), ++ RegExpCodeCreateEvent(Handle::cast(code), source)); ++ return Handle::cast(code); ++} ++ ++void RegExpMacroAssemblerLOONG64::GoTo(Label* to) { ++ if (to == nullptr) { ++ Backtrack(); ++ return; ++ } ++ __ jmp(to); ++ return; ++} ++ ++void RegExpMacroAssemblerLOONG64::IfRegisterGE(int reg, int comparand, ++ Label* if_ge) { ++ __ Ld_d(a0, register_location(reg)); ++ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand)); ++} ++ ++void RegExpMacroAssemblerLOONG64::IfRegisterLT(int reg, int comparand, ++ Label* if_lt) { ++ __ Ld_d(a0, register_location(reg)); ++ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand)); ++} ++ ++void RegExpMacroAssemblerLOONG64::IfRegisterEqPos(int reg, Label* if_eq) { ++ __ Ld_d(a0, register_location(reg)); ++ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset())); ++} ++ ++RegExpMacroAssembler::IrregexpImplementation ++RegExpMacroAssemblerLOONG64::Implementation() { ++ return kLOONG64Implementation; ++} ++ ++void RegExpMacroAssemblerLOONG64::PopCurrentPosition() { ++ Pop(current_input_offset()); ++} ++ ++void RegExpMacroAssemblerLOONG64::PopRegister(int register_index) { ++ Pop(a0); ++ __ St_d(a0, register_location(register_index)); ++} ++ ++void RegExpMacroAssemblerLOONG64::PushBacktrack(Label* label) { ++ if (label->is_bound()) { ++ int target = label->pos(); ++ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); ++ } else { ++ // TODO: Optimize like arm64 without ld_wu? ++ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); ++ Label after_constant; ++ __ Branch(&after_constant); ++ int offset = masm_->pc_offset(); ++ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag; ++ //__ emit(0); ++ __ nop(); ++ masm_->label_at_put(label, offset); ++ __ bind(&after_constant); ++ if (is_int12(cp_offset)) { ++ __ Ld_wu(a0, MemOperand(code_pointer(), cp_offset)); ++ } else { ++ __ Add_d(a0, code_pointer(), cp_offset); ++ __ Ld_wu(a0, MemOperand(a0, 0)); ++ } ++ } ++ Push(a0); ++ CheckStackLimit(); ++} ++ ++void RegExpMacroAssemblerLOONG64::PushCurrentPosition() { ++ Push(current_input_offset()); ++} ++ ++void RegExpMacroAssemblerLOONG64::PushRegister(int register_index, ++ StackCheckFlag check_stack_limit) { ++ __ Ld_d(a0, register_location(register_index)); ++ Push(a0); ++ if (check_stack_limit) CheckStackLimit(); ++} ++ ++void RegExpMacroAssemblerLOONG64::ReadCurrentPositionFromRegister(int reg) { ++ __ Ld_d(current_input_offset(), register_location(reg)); ++} ++ ++void RegExpMacroAssemblerLOONG64::ReadStackPointerFromRegister(int reg) { ++ __ Ld_d(backtrack_stackpointer(), register_location(reg)); ++ __ Ld_d(a0, MemOperand(frame_pointer(), kStackHighEnd)); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLOONG64::SetCurrentPositionFromEnd(int by) { ++ Label after_position; ++ __ Branch(&after_position, ge, current_input_offset(), ++ Operand(-by * char_size())); ++ __ li(current_input_offset(), -by * char_size()); ++ // On RegExp code entry (where this operation is used), the character before ++ // the current position is expected to be already loaded. ++ // We have advanced the position, so it's safe to read backwards. ++ LoadCurrentCharacterUnchecked(-1, 1); ++ __ bind(&after_position); ++} ++ ++void RegExpMacroAssemblerLOONG64::SetRegister(int register_index, int to) { ++ DCHECK(register_index >= num_saved_registers_); // Reserved for positions! ++ __ li(a0, Operand(to)); ++ __ St_d(a0, register_location(register_index)); ++} ++ ++bool RegExpMacroAssemblerLOONG64::Succeed() { ++ __ jmp(&success_label_); ++ return global(); ++} ++ ++void RegExpMacroAssemblerLOONG64::WriteCurrentPositionToRegister(int reg, ++ int cp_offset) { ++ if (cp_offset == 0) { ++ __ St_d(current_input_offset(), register_location(reg)); ++ } else { ++ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size())); ++ __ St_d(a0, register_location(reg)); ++ } ++} ++ ++void RegExpMacroAssemblerLOONG64::ClearRegisters(int reg_from, int reg_to) { ++ DCHECK(reg_from <= reg_to); ++ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ for (int reg = reg_from; reg <= reg_to; reg++) { ++ __ St_d(a0, register_location(reg)); ++ } ++} ++ ++void RegExpMacroAssemblerLOONG64::WriteStackPointerToRegister(int reg) { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStackHighEnd)); ++ __ Sub_d(a0, backtrack_stackpointer(), a1); ++ __ St_d(a0, register_location(reg)); ++} ++ ++bool RegExpMacroAssemblerLOONG64::CanReadUnaligned() { return false; } ++ ++// Private methods: ++ ++void RegExpMacroAssemblerLOONG64::CallCheckStackGuardState(Register scratch) { ++ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); ++ DCHECK(!masm_->options().isolate_independent_code); ++ ++ int stack_alignment = base::OS::ActivationFrameAlignment(); ++ ++ // Align the stack pointer and save the original sp value on the stack. ++ __ mov(scratch, sp); ++ __ Sub_d(sp, sp, Operand(kPointerSize)); ++ DCHECK(base::bits::IsPowerOfTwo(stack_alignment)); ++ __ And(sp, sp, Operand(-stack_alignment)); ++ __ St_d(scratch, MemOperand(sp, 0)); ++ ++ __ mov(a2, frame_pointer()); ++ // Code of self. ++ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); ++ ++ // We need to make room for the return address on the stack. ++ DCHECK(IsAligned(stack_alignment, kPointerSize)); ++ __ Sub_d(sp, sp, Operand(stack_alignment)); ++ ++ // The stack pointer now points to cell where the return address will be ++ // written. Arguments are in registers, meaning we treat the return address as ++ // argument 5. Since DirectCEntry will handle allocating space for the C ++ // argument slots, we don't need to care about that here. This is how the ++ // stack will look (sp meaning the value of sp at this moment): ++ // [sp + 3] - empty slot if needed for alignment. ++ // [sp + 2] - saved sp. ++ // [sp + 1] - second word reserved for return value. ++ // [sp + 0] - first word reserved for return value. ++ ++ // a0 will point to the return address, placed by DirectCEntry. ++ __ mov(a0, sp); ++ ++ ExternalReference stack_guard_check = ++ ExternalReference::re_check_stack_guard_state(masm_->isolate()); ++ __ li(t7, Operand(stack_guard_check)); ++ ++ EmbeddedData d = EmbeddedData::FromBlob(); ++ CHECK(Builtins::IsIsolateIndependent(Builtins::kDirectCEntry)); ++ Address entry = d.InstructionStartOfBuiltin(Builtins::kDirectCEntry); ++ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); ++ __ Call(kScratchReg); ++ ++ // DirectCEntry allocated space for the C argument slots so we have to ++ // drop them with the return address from the stack with loading saved sp. ++ // At this point stack must look: ++ // [sp + 7] - empty slot if needed for alignment. ++ // [sp + 6] - saved sp. ++ // [sp + 5] - second word reserved for return value. ++ // [sp + 4] - first word reserved for return value. ++ // [sp + 3] - C argument slot. ++ // [sp + 2] - C argument slot. ++ // [sp + 1] - C argument slot. ++ // [sp + 0] - C argument slot. ++ __ Ld_d(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize)); ++ ++ __ li(code_pointer(), Operand(masm_->CodeObject())); ++} ++ ++// Helper function for reading a value out of a stack frame. ++template ++static T& frame_entry(Address re_frame, int frame_offset) { ++ return reinterpret_cast(Memory(re_frame + frame_offset)); ++} ++ ++template ++static T* frame_entry_address(Address re_frame, int frame_offset) { ++ return reinterpret_cast(re_frame + frame_offset); ++} ++ ++int64_t RegExpMacroAssemblerLOONG64::CheckStackGuardState(Address* return_address, ++ Address raw_code, ++ Address re_frame) { ++ Code re_code = Code::cast(Object(raw_code)); ++ return NativeRegExpMacroAssembler::CheckStackGuardState( ++ frame_entry(re_frame, kIsolate), ++ static_cast(frame_entry(re_frame, kStartIndex)), ++ static_cast( ++ frame_entry(re_frame, kDirectCall)), ++ return_address, re_code, ++ frame_entry_address
(re_frame, kInputString), ++ frame_entry_address(re_frame, kInputStart), ++ frame_entry_address(re_frame, kInputEnd)); ++} ++ ++MemOperand RegExpMacroAssemblerLOONG64::register_location(int register_index) { ++ DCHECK(register_index < (1 << 30)); ++ if (num_registers_ <= register_index) { ++ num_registers_ = register_index + 1; ++ } ++ return MemOperand(frame_pointer(), ++ kRegisterZero - register_index * kPointerSize); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckPosition(int cp_offset, ++ Label* on_outside_input) { ++ if (cp_offset >= 0) { ++ BranchOrBacktrack(on_outside_input, ge, current_input_offset(), ++ Operand(-cp_offset * char_size())); ++ } else { ++ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); ++ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size())); ++ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1)); ++ } ++} ++ ++void RegExpMacroAssemblerLOONG64::BranchOrBacktrack(Label* to, Condition condition, ++ Register rs, ++ const Operand& rt) { ++ if (condition == al) { // Unconditional. ++ if (to == nullptr) { ++ Backtrack(); ++ return; ++ } ++ __ jmp(to); ++ return; ++ } ++ if (to == nullptr) { ++ __ Branch(&backtrack_label_, condition, rs, rt); ++ return; ++ } ++ __ Branch(to, condition, rs, rt); ++} ++ ++void RegExpMacroAssemblerLOONG64::SafeCall(Label* to, Condition cond, Register rs, ++ const Operand& rt) { ++ __ Branch(to, cond, rs, rt, true); ++} ++ ++void RegExpMacroAssemblerLOONG64::SafeReturn() { ++ __ pop(ra); ++ __ Add_d(t1, ra, Operand(masm_->CodeObject())); ++ __ Jump(t1); ++} ++ ++void RegExpMacroAssemblerLOONG64::SafeCallTarget(Label* name) { ++ __ bind(name); ++ __ Sub_d(ra, ra, Operand(masm_->CodeObject())); ++ __ push(ra); ++} ++ ++void RegExpMacroAssemblerLOONG64::Push(Register source) { ++ DCHECK(source != backtrack_stackpointer()); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), ++ Operand(-kIntSize)); ++ __ St_w(source, MemOperand(backtrack_stackpointer(), 0)); ++} ++ ++void RegExpMacroAssemblerLOONG64::Pop(Register target) { ++ DCHECK(target != backtrack_stackpointer()); ++ __ Ld_w(target, MemOperand(backtrack_stackpointer(), 0)); ++ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckPreemption() { ++ // Check for preemption. ++ ExternalReference stack_limit = ++ ExternalReference::address_of_jslimit(masm_->isolate()); ++ __ li(a0, Operand(stack_limit)); ++ __ Ld_d(a0, MemOperand(a0, 0)); ++ SafeCall(&check_preempt_label_, ls, sp, Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLOONG64::CheckStackLimit() { ++ ExternalReference stack_limit = ++ ExternalReference::address_of_regexp_stack_limit_address( ++ masm_->isolate()); ++ ++ __ li(a0, Operand(stack_limit)); ++ __ Ld_d(a0, MemOperand(a0, 0)); ++ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0)); ++} ++ ++void RegExpMacroAssemblerLOONG64::LoadCurrentCharacterUnchecked(int cp_offset, ++ int characters) { ++ Register offset = current_input_offset(); ++ if (cp_offset != 0) { ++ // t3 is not being used to store the capture start index at this point. ++ __ Add_d(t3, current_input_offset(), Operand(cp_offset * char_size())); ++ offset = t3; ++ } ++ // We assume that we cannot do unaligned loads on LOONG64, so this function ++ // must only be used to load a single character at a time. ++ DCHECK_EQ(1, characters); ++ __ Add_d(t1, end_of_input_address(), Operand(offset)); ++ if (mode_ == LATIN1) { ++ __ Ld_bu(current_character(), MemOperand(t1, 0)); ++ } else { ++ DCHECK(mode_ == UC16); ++ __ Ld_hu(current_character(), MemOperand(t1, 0)); ++ } ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_TARGET_ARCH_LOONG64 +diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h +new file mode 100644 +index 00000000..dd6f0123 +--- /dev/null ++++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h +@@ -0,0 +1,211 @@ ++// Copyright 2011 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_ ++#define V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_ ++ ++#include "src/codegen/loong64/assembler-loong64.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/regexp/regexp-macro-assembler.h" ++ ++namespace v8 { ++namespace internal { ++ ++class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64 ++ : public NativeRegExpMacroAssembler { ++ public: ++ RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, Mode mode, ++ int registers_to_save); ++ virtual ~RegExpMacroAssemblerLOONG64(); ++ virtual int stack_limit_slack(); ++ virtual void AdvanceCurrentPosition(int by); ++ virtual void AdvanceRegister(int reg, int by); ++ virtual void Backtrack(); ++ virtual void Bind(Label* label); ++ virtual void CheckAtStart(int cp_offset, Label* on_at_start); ++ virtual void CheckCharacter(uint32_t c, Label* on_equal); ++ virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask, ++ Label* on_equal); ++ virtual void CheckCharacterGT(uc16 limit, Label* on_greater); ++ virtual void CheckCharacterLT(uc16 limit, Label* on_less); ++ // A "greedy loop" is a loop that is both greedy and with a simple ++ // body. It has a particularly simple implementation. ++ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); ++ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start); ++ virtual void CheckNotBackReference(int start_reg, bool read_backward, ++ Label* on_no_match); ++ virtual void CheckNotBackReferenceIgnoreCase(int start_reg, ++ bool read_backward, ++ Label* on_no_match); ++ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal); ++ virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask, ++ Label* on_not_equal); ++ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask, ++ Label* on_not_equal); ++ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range); ++ virtual void CheckCharacterNotInRange(uc16 from, uc16 to, ++ Label* on_not_in_range); ++ virtual void CheckBitInTable(Handle table, Label* on_bit_set); ++ ++ // Checks whether the given offset from the current position is before ++ // the end of the string. ++ virtual void CheckPosition(int cp_offset, Label* on_outside_input); ++ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match); ++ virtual void Fail(); ++ virtual Handle GetCode(Handle source); ++ virtual void GoTo(Label* label); ++ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); ++ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); ++ virtual void IfRegisterEqPos(int reg, Label* if_eq); ++ virtual IrregexpImplementation Implementation(); ++ virtual void LoadCurrentCharacterUnchecked(int cp_offset, ++ int character_count); ++ virtual void PopCurrentPosition(); ++ virtual void PopRegister(int register_index); ++ virtual void PushBacktrack(Label* label); ++ virtual void PushCurrentPosition(); ++ virtual void PushRegister(int register_index, ++ StackCheckFlag check_stack_limit); ++ virtual void ReadCurrentPositionFromRegister(int reg); ++ virtual void ReadStackPointerFromRegister(int reg); ++ virtual void SetCurrentPositionFromEnd(int by); ++ virtual void SetRegister(int register_index, int to); ++ virtual bool Succeed(); ++ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); ++ virtual void ClearRegisters(int reg_from, int reg_to); ++ virtual void WriteStackPointerToRegister(int reg); ++ virtual bool CanReadUnaligned(); ++ ++ // Called from RegExp if the stack-guard is triggered. ++ // If the code object is relocated, the return address is fixed before ++ // returning. ++ // {raw_code} is an Address because this is called via ExternalReference. ++ static int64_t CheckStackGuardState(Address* return_address, Address raw_code, ++ Address re_frame); ++ ++ void print_regexp_frame_constants(); ++ ++ private: ++ // Offsets from frame_pointer() of function parameters and stored registers. ++ static const int kFramePointer = 0; ++ ++ // Above the frame pointer - Stored registers and stack passed parameters. ++ // Registers s0 to s7, fp, and ra. ++ static const int kStoredRegisters = kFramePointer; ++ // Return address (stored from link register, read into pc on return). ++ ++ // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp. ++ ++ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize; ++ // Stack frame header. ++ static const int kStackFrameHeader = kReturnAddress; ++ // Stack parameters placed by caller. ++ static const int kIsolate = kStackFrameHeader + kPointerSize; ++ ++ // Below the frame pointer. ++ // Register parameters stored by setup code. ++ static const int kDirectCall = kFramePointer - kPointerSize; ++ static const int kStackHighEnd = kDirectCall - kPointerSize; ++ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize; ++ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize; ++ static const int kInputEnd = kRegisterOutput - kPointerSize; ++ static const int kInputStart = kInputEnd - kPointerSize; ++ static const int kStartIndex = kInputStart - kPointerSize; ++ static const int kInputString = kStartIndex - kPointerSize; ++ // When adding local variables remember to push space for them in ++ // the frame in GetCode. ++ static const int kSuccessfulCaptures = kInputString - kPointerSize; ++ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize; ++ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize; ++ // First register address. Following registers are below it on the stack. ++ static const int kRegisterZero = kBacktrackCount - kSystemPointerSize; ++ ++ // Initial size of code buffer. ++ static const int kRegExpCodeSize = 1024; ++ ++ // Check whether preemption has been requested. ++ void CheckPreemption(); ++ ++ // Check whether we are exceeding the stack limit on the backtrack stack. ++ void CheckStackLimit(); ++ ++ // Generate a call to CheckStackGuardState. ++ void CallCheckStackGuardState(Register scratch); ++ ++ // The ebp-relative location of a regexp register. ++ MemOperand register_location(int register_index); ++ ++ // Register holding the current input position as negative offset from ++ // the end of the string. ++ inline Register current_input_offset() { return a6; } ++ ++ // The register containing the current character after LoadCurrentCharacter. ++ inline Register current_character() { return a7; } ++ ++ // Register holding address of the end of the input string. ++ inline Register end_of_input_address() { return t2; } ++ ++ // Register holding the frame address. Local variables, parameters and ++ // regexp registers are addressed relative to this. ++ inline Register frame_pointer() { return fp; } ++ ++ // The register containing the backtrack stack top. Provides a meaningful ++ // name to the register. ++ inline Register backtrack_stackpointer() { return t0; } ++ ++ // Register holding pointer to the current code object. ++ inline Register code_pointer() { return a5; } ++ ++ // Byte size of chars in the string to match (decided by the Mode argument). ++ inline int char_size() { return static_cast(mode_); } ++ ++ // Equivalent to a conditional branch to the label, unless the label ++ // is nullptr, in which case it is a conditional Backtrack. ++ void BranchOrBacktrack(Label* to, Condition condition, Register rs, ++ const Operand& rt); ++ ++ // Call and return internally in the generated code in a way that ++ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) ++ inline void SafeCall(Label* to, Condition cond, Register rs, ++ const Operand& rt); ++ inline void SafeReturn(); ++ inline void SafeCallTarget(Label* name); ++ ++ // Pushes the value of a register on the backtrack stack. Decrements the ++ // stack pointer by a word size and stores the register's value there. ++ inline void Push(Register source); ++ ++ // Pops a value from the backtrack stack. Reads the word at the stack pointer ++ // and increments it by a word size. ++ inline void Pop(Register target); ++ ++ Isolate* isolate() const { return masm_->isolate(); } ++ ++ MacroAssembler* masm_; ++ ++ // Which mode to generate code for (Latin1 or UC16). ++ Mode mode_; ++ ++ // One greater than maximal register index actually used. ++ int num_registers_; ++ ++ // Number of registers to output at the end (the saved registers ++ // are always 0..num_saved_registers_-1). ++ int num_saved_registers_; ++ ++ // Labels used internally. ++ Label entry_label_; ++ Label start_label_; ++ Label success_label_; ++ Label backtrack_label_; ++ Label exit_label_; ++ Label check_preempt_label_; ++ Label stack_overflow_label_; ++ Label internal_failure_label_; ++}; ++ ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_ +diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h +index 8ec12a0a..e4503090 100644 +--- a/deps/v8/src/regexp/regexp-macro-assembler-arch.h ++++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h +@@ -21,6 +21,8 @@ + #include "src/regexp/mips/regexp-macro-assembler-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/regexp/mips64/regexp-macro-assembler-mips64.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/regexp/loong64/regexp-macro-assembler-loong64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/regexp/s390/regexp-macro-assembler-s390.h" + #else +diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc +index 0a122017..37b22105 100644 +--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc ++++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc +@@ -15,8 +15,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer( + : RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) { + IrregexpImplementation type = assembler->Implementation(); + DCHECK_LT(type, 9); +- const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "S390", +- "PPC", "X64", "X87", "Bytecode"}; ++ const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "LOONG64", ++ "S390", "PPC", "X64", "X87", "Bytecode"}; + PrintF("RegExpMacroAssembler%s();\n", impl_names[type]); + } + +diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h +index 289c2a97..0e283e78 100644 +--- a/deps/v8/src/regexp/regexp-macro-assembler.h ++++ b/deps/v8/src/regexp/regexp-macro-assembler.h +@@ -44,6 +44,7 @@ class RegExpMacroAssembler { + kARMImplementation, + kARM64Implementation, + kMIPSImplementation, ++ kLOONG64Implementation, + kS390Implementation, + kPPCImplementation, + kX64Implementation, +diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc +index 7b8da4d8..da50d8f9 100644 +--- a/deps/v8/src/regexp/regexp.cc ++++ b/deps/v8/src/regexp/regexp.cc +@@ -781,6 +781,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data, + #elif V8_TARGET_ARCH_MIPS64 + macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode, + output_register_count)); ++#elif V8_TARGET_ARCH_LOONG64 ++ macro_assembler.reset(new RegExpMacroAssemblerLOONG64(isolate, zone, mode, ++ output_register_count)); + #else + #error "Unsupported architecture" + #endif +diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc +index 34259c6e..14a5372d 100644 +--- a/deps/v8/src/runtime/runtime-atomics.cc ++++ b/deps/v8/src/runtime/runtime-atomics.cc +@@ -20,7 +20,8 @@ namespace internal { + + // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h. + #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ +- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X ++ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \ ++ V8_TARGET_ARCH_LOONG64 + + namespace { + +diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h +index 72ca7297..0c54d6d0 100644 +--- a/deps/v8/src/snapshot/deserializer.h ++++ b/deps/v8/src/snapshot/deserializer.h +@@ -28,8 +28,9 @@ class Object; + // Used for platforms with embedded constant pools to trigger deserialization + // of objects found in code. + #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ +- defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \ +- defined(V8_TARGET_ARCH_PPC64) || V8_EMBEDDED_CONSTANT_POOL ++ defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_PPC) || \ ++ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_PPC64) || \ ++ V8_EMBEDDED_CONSTANT_POOL + #define V8_CODE_EMBEDS_OBJECT_POINTER 1 + #else + #define V8_CODE_EMBEDS_OBJECT_POINTER 0 +diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h +index 781fb87d..f744596f 100644 +--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h ++++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h +@@ -46,6 +46,14 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = + constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf( + f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26); + ++#elif V8_TARGET_ARCH_LOONG64 ++/*todo*/ ++constexpr RegList kLiftoffAssemblerGpCacheRegs = ++ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7); ++ ++constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf( ++ f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26); ++ + #elif V8_TARGET_ARCH_ARM + + // r7: cp, r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc. +@@ -90,7 +98,7 @@ constexpr Condition kUnsignedLessEqual = below_equal; + constexpr Condition kUnsignedGreaterThan = above; + constexpr Condition kUnsignedGreaterEqual = above_equal; + +-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ++#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 + + constexpr Condition kEqual = eq; + constexpr Condition kUnequal = ne; +diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h +index 701b4b8e..b85494dc 100644 +--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h ++++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h +@@ -1254,6 +1254,8 @@ class LiftoffStackSlots { + #include "src/wasm/baseline/mips/liftoff-assembler-mips.h" + #elif V8_TARGET_ARCH_MIPS64 + #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" ++#elif V8_TARGET_ARCH_LOONG64 ++#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h" + #elif V8_TARGET_ARCH_S390 + #include "src/wasm/baseline/s390/liftoff-assembler-s390.h" + #else +diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h.orig b/deps/v8/src/wasm/baseline/liftoff-assembler.h.orig +new file mode 100644 +index 00000000..701b4b8e +--- /dev/null ++++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h.orig +@@ -0,0 +1,1263 @@ ++// Copyright 2017 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ ++#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ ++ ++#include ++#include ++ ++#include "src/base/bits.h" ++#include "src/base/small-vector.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/wasm/baseline/liftoff-assembler-defs.h" ++#include "src/wasm/baseline/liftoff-compiler.h" ++#include "src/wasm/baseline/liftoff-register.h" ++#include "src/wasm/function-body-decoder.h" ++#include "src/wasm/wasm-code-manager.h" ++#include "src/wasm/wasm-module.h" ++#include "src/wasm/wasm-opcodes.h" ++#include "src/wasm/wasm-value.h" ++ ++namespace v8 { ++namespace internal { ++ ++// Forward declarations. ++namespace compiler { ++class CallDescriptor; ++} ++ ++namespace wasm { ++ ++class LiftoffAssembler : public TurboAssembler { ++ public: ++ // Each slot in our stack frame currently has exactly 8 bytes. ++ static constexpr int kStackSlotSize = 8; ++ ++ static constexpr ValueType kWasmIntPtr = ++ kSystemPointerSize == 8 ? kWasmI64 : kWasmI32; ++ ++ class VarState { ++ public: ++ enum Location : uint8_t { kStack, kRegister, kIntConst }; ++ ++ explicit VarState(ValueType type, int offset) ++ : loc_(kStack), type_(type), spill_offset_(offset) {} ++ explicit VarState(ValueType type, LiftoffRegister r, int offset) ++ : loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) { ++ DCHECK_EQ(r.reg_class(), reg_class_for(type)); ++ } ++ explicit VarState(ValueType type, int32_t i32_const, int offset) ++ : loc_(kIntConst), ++ type_(type), ++ i32_const_(i32_const), ++ spill_offset_(offset) { ++ DCHECK(type_ == kWasmI32 || type_ == kWasmI64); ++ } ++ ++ bool operator==(const VarState& other) const { ++ if (loc_ != other.loc_) return false; ++ if (type_ != other.type_) return false; ++ switch (loc_) { ++ case kStack: ++ return true; ++ case kRegister: ++ return reg_ == other.reg_; ++ case kIntConst: ++ return i32_const_ == other.i32_const_; ++ } ++ UNREACHABLE(); ++ } ++ ++ bool is_stack() const { return loc_ == kStack; } ++ bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); } ++ bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); } ++ bool is_reg() const { return loc_ == kRegister; } ++ bool is_const() const { return loc_ == kIntConst; } ++ ++ ValueType type() const { return type_; } ++ ++ Location loc() const { return loc_; } ++ ++ int32_t i32_const() const { ++ DCHECK_EQ(loc_, kIntConst); ++ return i32_const_; ++ } ++ WasmValue constant() const { ++ DCHECK(type_ == kWasmI32 || type_ == kWasmI64); ++ DCHECK_EQ(loc_, kIntConst); ++ return type_ == kWasmI32 ? WasmValue(i32_const_) ++ : WasmValue(int64_t{i32_const_}); ++ } ++ ++ int offset() const { return spill_offset_; } ++ ++ Register gp_reg() const { return reg().gp(); } ++ DoubleRegister fp_reg() const { return reg().fp(); } ++ LiftoffRegister reg() const { ++ DCHECK_EQ(loc_, kRegister); ++ return reg_; ++ } ++ RegClass reg_class() const { return reg().reg_class(); } ++ ++ void MakeStack() { loc_ = kStack; } ++ ++ void MakeRegister(LiftoffRegister r) { ++ reg_ = r; ++ loc_ = kRegister; ++ } ++ ++ // Copy src to this, except for offset, since src and this could have been ++ // from different stack states. ++ void Copy(VarState src) { ++ loc_ = src.loc(); ++ type_ = src.type(); ++ if (loc_ == kRegister) { ++ reg_ = src.reg(); ++ } else if (loc_ == kIntConst) { ++ i32_const_ = src.i32_const(); ++ } ++ } ++ ++ private: ++ Location loc_; ++ // TODO(wasm): This is redundant, the decoder already knows the type of each ++ // stack value. Try to collapse. ++ ValueType type_; ++ ++ union { ++ LiftoffRegister reg_; // used if loc_ == kRegister ++ int32_t i32_const_; // used if loc_ == kIntConst ++ }; ++ int spill_offset_; ++ }; ++ ++ ASSERT_TRIVIALLY_COPYABLE(VarState); ++ ++ struct CacheState { ++ // Allow default construction, move construction, and move assignment. ++ CacheState() = default; ++ CacheState(CacheState&&) V8_NOEXCEPT = default; ++ CacheState& operator=(CacheState&&) V8_NOEXCEPT = default; ++ ++ base::SmallVector stack_state; ++ LiftoffRegList used_registers; ++ uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0}; ++ LiftoffRegList last_spilled_regs; ++ ++ bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const { ++ if (kNeedI64RegPair && rc == kGpRegPair) { ++ LiftoffRegList available_regs = ++ kGpCacheRegList.MaskOut(used_registers).MaskOut(pinned); ++ return available_regs.GetNumRegsSet() >= 2; ++ } else if (kNeedS128RegPair && rc == kFpRegPair) { ++ LiftoffRegList available_regs = ++ kFpCacheRegList.MaskOut(used_registers).MaskOut(pinned); ++ return available_regs.HasAdjacentFpRegsSet(); ++ } ++ DCHECK(rc == kGpReg || rc == kFpReg); ++ LiftoffRegList candidates = GetCacheRegList(rc); ++ return has_unused_register(candidates, pinned); ++ } ++ ++ bool has_unused_register(LiftoffRegList candidates, ++ LiftoffRegList pinned = {}) const { ++ LiftoffRegList available_regs = ++ candidates.MaskOut(used_registers).MaskOut(pinned); ++ return !available_regs.is_empty(); ++ } ++ ++ LiftoffRegister unused_register(RegClass rc, ++ LiftoffRegList pinned = {}) const { ++ if (kNeedI64RegPair && rc == kGpRegPair) { ++ Register low = pinned.set(unused_register(kGpReg, pinned)).gp(); ++ Register high = unused_register(kGpReg, pinned).gp(); ++ return LiftoffRegister::ForPair(low, high); ++ } else if (kNeedS128RegPair && rc == kFpRegPair) { ++ LiftoffRegList available_regs = ++ kFpCacheRegList.MaskOut(used_registers).MaskOut(pinned); ++ DoubleRegister low = ++ available_regs.GetAdjacentFpRegsSet().GetFirstRegSet().fp(); ++ DCHECK(is_free(LiftoffRegister::ForFpPair(low))); ++ return LiftoffRegister::ForFpPair(low); ++ } ++ DCHECK(rc == kGpReg || rc == kFpReg); ++ LiftoffRegList candidates = GetCacheRegList(rc); ++ return unused_register(candidates, pinned); ++ } ++ ++ LiftoffRegister unused_register(LiftoffRegList candidates, ++ LiftoffRegList pinned = {}) const { ++ LiftoffRegList available_regs = ++ candidates.MaskOut(used_registers).MaskOut(pinned); ++ return available_regs.GetFirstRegSet(); ++ } ++ ++ void inc_used(LiftoffRegister reg) { ++ if (reg.is_pair()) { ++ inc_used(reg.low()); ++ inc_used(reg.high()); ++ return; ++ } ++ used_registers.set(reg); ++ DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]); ++ ++register_use_count[reg.liftoff_code()]; ++ } ++ ++ // Returns whether this was the last use. ++ void dec_used(LiftoffRegister reg) { ++ DCHECK(is_used(reg)); ++ if (reg.is_pair()) { ++ dec_used(reg.low()); ++ dec_used(reg.high()); ++ return; ++ } ++ int code = reg.liftoff_code(); ++ DCHECK_LT(0, register_use_count[code]); ++ if (--register_use_count[code] == 0) used_registers.clear(reg); ++ } ++ ++ bool is_used(LiftoffRegister reg) const { ++ if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high()); ++ bool used = used_registers.has(reg); ++ DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0); ++ return used; ++ } ++ ++ uint32_t get_use_count(LiftoffRegister reg) const { ++ if (reg.is_pair()) { ++ DCHECK_EQ(register_use_count[reg.low().liftoff_code()], ++ register_use_count[reg.high().liftoff_code()]); ++ reg = reg.low(); ++ } ++ DCHECK_GT(arraysize(register_use_count), reg.liftoff_code()); ++ return register_use_count[reg.liftoff_code()]; ++ } ++ ++ void clear_used(LiftoffRegister reg) { ++ register_use_count[reg.liftoff_code()] = 0; ++ used_registers.clear(reg); ++ } ++ ++ bool is_free(LiftoffRegister reg) const { return !is_used(reg); } ++ ++ void reset_used_registers() { ++ used_registers = {}; ++ memset(register_use_count, 0, sizeof(register_use_count)); ++ } ++ ++ LiftoffRegister GetNextSpillReg(LiftoffRegList candidates, ++ LiftoffRegList pinned = {}) { ++ LiftoffRegList unpinned = candidates.MaskOut(pinned); ++ DCHECK(!unpinned.is_empty()); ++ // This method should only be called if none of the candidates is free. ++ DCHECK(unpinned.MaskOut(used_registers).is_empty()); ++ LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs); ++ if (unspilled.is_empty()) { ++ unspilled = unpinned; ++ last_spilled_regs = {}; ++ } ++ LiftoffRegister reg = unspilled.GetFirstRegSet(); ++ return reg; ++ } ++ ++ // TODO(clemensb): Don't copy the full parent state (this makes us N^2). ++ void InitMerge(const CacheState& source, uint32_t num_locals, ++ uint32_t arity, uint32_t stack_depth); ++ ++ void Steal(const CacheState& source); ++ ++ void Split(const CacheState& source); ++ ++ uint32_t stack_height() const { ++ return static_cast(stack_state.size()); ++ } ++ ++ private: ++ // Make the copy assignment operator private (to be used from {Split()}). ++ CacheState& operator=(const CacheState&) V8_NOEXCEPT = default; ++ // Disallow copy construction. ++ CacheState(const CacheState&) = delete; ++ }; ++ ++ explicit LiftoffAssembler(std::unique_ptr); ++ ~LiftoffAssembler() override; ++ ++ LiftoffRegister PopToRegister(LiftoffRegList pinned = {}); ++ ++ // Returns the register which holds the value of stack slot {index}. If the ++ // value is not stored in a register yet, a register is allocated for it. The ++ // register is then assigned to the stack slot. The value stack height is not ++ // modified. The top of the stack is index 0, i.e. {PopToRegister()} and ++ // {PeekToRegister(0)} should result in the same register. ++ // {PeekToRegister} already decrements the used count of the register of the ++ // stack slot. Therefore the register must not be popped by {PopToRegister} ++ // but discarded with {stack_state.pop_back(count)}. ++ LiftoffRegister PeekToRegister(int index, LiftoffRegList pinned); ++ ++ // Ensure that the loop inputs are either in a register or spilled to the ++ // stack, so that we can merge different values on the back-edge. ++ void PrepareLoopArgs(int num); ++ ++ int NextSpillOffset(ValueType type) { ++ int offset = TopSpillOffset() + SlotSizeForType(type); ++ if (NeedsAlignment(type)) { ++ offset = RoundUp(offset, SlotSizeForType(type)); ++ } ++ return offset; ++ } ++ ++ int TopSpillOffset() const { ++ return cache_state_.stack_state.empty() ++ ? StaticStackFrameSize() ++ : cache_state_.stack_state.back().offset(); ++ } ++ ++ void PushRegister(ValueType type, LiftoffRegister reg) { ++ DCHECK_EQ(reg_class_for(type), reg.reg_class()); ++ cache_state_.inc_used(reg); ++ cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type)); ++ } ++ ++ void PushConstant(ValueType type, int32_t i32_const) { ++ DCHECK(type == kWasmI32 || type == kWasmI64); ++ cache_state_.stack_state.emplace_back(type, i32_const, ++ NextSpillOffset(type)); ++ } ++ ++ void PushStack(ValueType type) { ++ cache_state_.stack_state.emplace_back(type, NextSpillOffset(type)); ++ } ++ ++ void SpillRegister(LiftoffRegister); ++ ++ uint32_t GetNumUses(LiftoffRegister reg) { ++ return cache_state_.get_use_count(reg); ++ } ++ ++ // Get an unused register for class {rc}, reusing one of {try_first} if ++ // possible. ++ LiftoffRegister GetUnusedRegister( ++ RegClass rc, std::initializer_list try_first, ++ LiftoffRegList pinned) { ++ for (LiftoffRegister reg : try_first) { ++ DCHECK_EQ(reg.reg_class(), rc); ++ if (cache_state_.is_free(reg)) return reg; ++ } ++ return GetUnusedRegister(rc, pinned); ++ } ++ ++ // Get an unused register for class {rc}, potentially spilling to free one. ++ LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) { ++ if (kNeedI64RegPair && rc == kGpRegPair) { ++ LiftoffRegList candidates = kGpCacheRegList; ++ Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp(); ++ Register high = GetUnusedRegister(candidates, pinned).gp(); ++ return LiftoffRegister::ForPair(low, high); ++ } else if (kNeedS128RegPair && rc == kFpRegPair) { ++ // kFpRegPair specific logic here because we need adjacent registers, not ++ // just any two registers (like kGpRegPair). ++ if (cache_state_.has_unused_register(rc, pinned)) { ++ return cache_state_.unused_register(rc, pinned); ++ } ++ DoubleRegister low_fp = SpillAdjacentFpRegisters(pinned).fp(); ++ return LiftoffRegister::ForFpPair(low_fp); ++ } ++ DCHECK(rc == kGpReg || rc == kFpReg); ++ LiftoffRegList candidates = GetCacheRegList(rc); ++ return GetUnusedRegister(candidates, pinned); ++ } ++ ++ // Get an unused register of {candidates}, potentially spilling to free one. ++ LiftoffRegister GetUnusedRegister(LiftoffRegList candidates, ++ LiftoffRegList pinned = {}) { ++ if (cache_state_.has_unused_register(candidates, pinned)) { ++ return cache_state_.unused_register(candidates, pinned); ++ } ++ return SpillOneRegister(candidates, pinned); ++ } ++ ++ void MergeFullStackWith(const CacheState& target, const CacheState& source); ++ void MergeStackWith(const CacheState& target, uint32_t arity); ++ ++ void Spill(VarState* slot); ++ void SpillLocals(); ++ void SpillAllRegisters(); ++ ++ // Clear any uses of {reg} in both the cache and in {possible_uses}. ++ // Any use in the stack is spilled. If any register in {possible_uses} matches ++ // {reg}, then the content of {reg} is moved to a new temporary register, and ++ // all matches in {possible_uses} are rewritten to that temporary register. ++ void ClearRegister(Register reg, ++ std::initializer_list possible_uses, ++ LiftoffRegList pinned); ++ ++ // Spills all passed registers. ++ template ++ void SpillRegisters(Regs... regs) { ++ for (LiftoffRegister r : {LiftoffRegister(regs)...}) { ++ if (cache_state()->is_used(r)) SpillRegister(r); ++ } ++ } ++ ++ // Call this method whenever spilling something, such that the number of used ++ // spill slot can be tracked and the stack frame will be allocated big enough. ++ void RecordUsedSpillOffset(int offset) { ++ if (offset >= max_used_spill_offset_) max_used_spill_offset_ = offset; ++ } ++ ++ // Load parameters into the right registers / stack slots for the call. ++ void PrepareBuiltinCall(const FunctionSig* sig, ++ compiler::CallDescriptor* call_descriptor, ++ std::initializer_list params); ++ ++ // Load parameters into the right registers / stack slots for the call. ++ // Move {*target} into another register if needed and update {*target} to that ++ // register, or {no_reg} if target was spilled to the stack. ++ void PrepareCall(const FunctionSig*, compiler::CallDescriptor*, ++ Register* target = nullptr, ++ Register* target_instance = nullptr); ++ // Process return values of the call. ++ void FinishCall(const FunctionSig*, compiler::CallDescriptor*); ++ ++ // Move {src} into {dst}. {src} and {dst} must be different. ++ void Move(LiftoffRegister dst, LiftoffRegister src, ValueType); ++ ++ // Parallel register move: For a list of tuples , move the ++ // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore ++ // that tuple. ++ struct ParallelRegisterMoveTuple { ++ LiftoffRegister dst; ++ LiftoffRegister src; ++ ValueType type; ++ template ++ ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type) ++ : dst(dst), src(src), type(type) {} ++ }; ++ void ParallelRegisterMove(Vector); ++ ++ void MoveToReturnLocations(const FunctionSig*, ++ compiler::CallDescriptor* descriptor); ++ ++#ifdef ENABLE_SLOW_DCHECKS ++ // Validate that the register use counts reflect the state of the cache. ++ bool ValidateCacheState() const; ++#endif ++ ++ //////////////////////////////////// ++ // Platform-specific part. // ++ //////////////////////////////////// ++ ++ // This function emits machine code to prepare the stack frame, before the ++ // size of the stack frame is known. It returns an offset in the machine code ++ // which can later be patched (via {PatchPrepareStackFrame)} when the size of ++ // the frame is known. ++ inline int PrepareStackFrame(); ++ inline void PatchPrepareStackFrame(int offset, int frame_size); ++ inline void FinishCode(); ++ inline void AbortCompilation(); ++ inline static constexpr int StaticStackFrameSize(); ++ inline static int SlotSizeForType(ValueType type); ++ inline static bool NeedsAlignment(ValueType type); ++ ++ inline void LoadConstant(LiftoffRegister, WasmValue, ++ RelocInfo::Mode rmode = RelocInfo::NONE); ++ inline void LoadFromInstance(Register dst, uint32_t offset, int size); ++ inline void LoadTaggedPointerFromInstance(Register dst, uint32_t offset); ++ inline void SpillInstance(Register instance); ++ inline void FillInstanceInto(Register dst); ++ inline void LoadTaggedPointer(Register dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LiftoffRegList pinned); ++ inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg, ++ uint32_t offset_imm, LoadType type, LiftoffRegList pinned, ++ uint32_t* protected_load_pc = nullptr, ++ bool is_load_mem = false); ++ inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm, ++ LiftoffRegister src, StoreType type, LiftoffRegList pinned, ++ uint32_t* protected_store_pc = nullptr, ++ bool is_store_mem = false); ++ inline void AtomicLoad(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, LiftoffRegList pinned); ++ inline void AtomicStore(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister src, ++ StoreType type, LiftoffRegList pinned); ++ ++ inline void AtomicAdd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type); ++ ++ inline void AtomicSub(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type); ++ ++ inline void AtomicAnd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type); ++ ++ inline void AtomicOr(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type); ++ ++ inline void AtomicXor(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type); ++ ++ inline void AtomicExchange(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type); ++ ++ inline void AtomicCompareExchange(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, ++ LiftoffRegister expected, ++ LiftoffRegister new_value, ++ LiftoffRegister value, StoreType type); ++ ++ inline void AtomicFence(); ++ ++ inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ++ ValueType); ++ inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, ++ ValueType); ++ inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ++ ValueType); ++ ++ inline void Move(Register dst, Register src, ValueType); ++ inline void Move(DoubleRegister dst, DoubleRegister src, ValueType); ++ ++ inline void Spill(int offset, LiftoffRegister, ValueType); ++ inline void Spill(int offset, WasmValue); ++ inline void Fill(LiftoffRegister, int offset, ValueType); ++ // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e. ++ // 4 bytes on the stack holding half of a 64-bit value. ++ inline void FillI64Half(Register, int offset, RegPairHalf); ++ inline void FillStackSlotsWithZero(int start, int size); ++ ++ // i32 binops. ++ inline void emit_i32_add(Register dst, Register lhs, Register rhs); ++ inline void emit_i32_addi(Register dst, Register lhs, int32_t imm); ++ inline void emit_i32_sub(Register dst, Register lhs, Register rhs); ++ inline void emit_i32_mul(Register dst, Register lhs, Register rhs); ++ inline void emit_i32_divs(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable); ++ inline void emit_i32_divu(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero); ++ inline void emit_i32_rems(Register dst, Register lhs, Register rhs, ++ Label* trap_rem_by_zero); ++ inline void emit_i32_remu(Register dst, Register lhs, Register rhs, ++ Label* trap_rem_by_zero); ++ inline void emit_i32_and(Register dst, Register lhs, Register rhs); ++ inline void emit_i32_andi(Register dst, Register lhs, int32_t imm); ++ inline void emit_i32_or(Register dst, Register lhs, Register rhs); ++ inline void emit_i32_ori(Register dst, Register lhs, int32_t imm); ++ inline void emit_i32_xor(Register dst, Register lhs, Register rhs); ++ inline void emit_i32_xori(Register dst, Register lhs, int32_t imm); ++ inline void emit_i32_shl(Register dst, Register src, Register amount); ++ inline void emit_i32_shli(Register dst, Register src, int32_t amount); ++ inline void emit_i32_sar(Register dst, Register src, Register amount); ++ inline void emit_i32_sari(Register dst, Register src, int32_t amount); ++ inline void emit_i32_shr(Register dst, Register src, Register amount); ++ inline void emit_i32_shri(Register dst, Register src, int32_t amount); ++ ++ // i32 unops. ++ inline void emit_i32_clz(Register dst, Register src); ++ inline void emit_i32_ctz(Register dst, Register src); ++ inline bool emit_i32_popcnt(Register dst, Register src); ++ ++ // i64 binops. ++ inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t imm); ++ inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable); ++ inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, Label* trap_div_by_zero); ++ inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, Label* trap_rem_by_zero); ++ inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, Label* trap_rem_by_zero); ++ inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t imm); ++ inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t imm); ++ inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t imm); ++ inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, ++ Register amount); ++ inline void emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, ++ int32_t amount); ++ inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, ++ Register amount); ++ inline void emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, ++ int32_t amount); ++ inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, ++ Register amount); ++ inline void emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, ++ int32_t amount); ++ ++ // i64 unops. ++ inline void emit_i64_clz(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src); ++ inline bool emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src); ++ ++ inline void emit_u32_to_intptr(Register dst, Register src); ++ ++ inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) { ++ if (kSystemPointerSize == 8) { ++ emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), ++ LiftoffRegister(rhs)); ++ } else { ++ emit_i32_add(dst, lhs, rhs); ++ } ++ } ++ inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) { ++ if (kSystemPointerSize == 8) { ++ emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs), ++ LiftoffRegister(rhs)); ++ } else { ++ emit_i32_sub(dst, lhs, rhs); ++ } ++ } ++ inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) { ++ if (kSystemPointerSize == 8) { ++ emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs), ++ LiftoffRegister(rhs)); ++ } else { ++ emit_i32_and(dst, lhs, rhs); ++ } ++ } ++ inline void emit_ptrsize_shri(Register dst, Register src, int amount) { ++ if (kSystemPointerSize == 8) { ++ emit_i64_shri(LiftoffRegister(dst), LiftoffRegister(src), amount); ++ } else { ++ emit_i32_shri(dst, src, amount); ++ } ++ } ++ ++ inline void emit_ptrsize_addi(Register dst, Register lhs, int32_t imm) { ++ if (kSystemPointerSize == 8) { ++ emit_i64_addi(LiftoffRegister(dst), LiftoffRegister(lhs), imm); ++ } else { ++ emit_i32_addi(dst, lhs, imm); ++ } ++ } ++ ++ // f32 binops. ++ inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f32_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ ++ // f32 unops. ++ inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src); ++ inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f32_floor(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src); ++ inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src); ++ ++ // f64 binops. ++ inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f64_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ inline void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs); ++ ++ // f64 unops. ++ inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src); ++ inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src); ++ inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src); ++ inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src); ++ ++ inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst, ++ LiftoffRegister src, Label* trap = nullptr); ++ ++ inline void emit_i32_signextend_i8(Register dst, Register src); ++ inline void emit_i32_signextend_i16(Register dst, Register src); ++ inline void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src); ++ ++ inline void emit_jump(Label*); ++ inline void emit_jump(Register); ++ ++ inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs, ++ Register rhs = no_reg); ++ // Set {dst} to 1 if condition holds, 0 otherwise. ++ inline void emit_i32_eqz(Register dst, Register src); ++ inline void emit_i32_set_cond(Condition, Register dst, Register lhs, ++ Register rhs); ++ inline void emit_i64_eqz(Register dst, LiftoffRegister src); ++ inline void emit_i64_set_cond(Condition condition, Register dst, ++ LiftoffRegister lhs, LiftoffRegister rhs); ++ inline void emit_f32_set_cond(Condition condition, Register dst, ++ DoubleRegister lhs, DoubleRegister rhs); ++ inline void emit_f64_set_cond(Condition condition, Register dst, ++ DoubleRegister lhs, DoubleRegister rhs); ++ ++ inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i64x2_splat(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_s128_not(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1, ++ LiftoffRegister src2, LiftoffRegister mask); ++ inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs); ++ inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_add_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_add_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_sub_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_sub_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_min_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_min_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_max_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs); ++ inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_add_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_add_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_sub_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_sub_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_min_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_min_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_max_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs); ++ inline void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_min_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_min_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_max_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i32x4_max_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs); ++ inline void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_abs(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f64x2_neg(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f64x2_sqrt(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_sconvert_i16x8(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_uconvert_i16x8(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_sconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_uconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, ++ LiftoffRegister src); ++ inline void emit_s128_and_not(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_rounding_average_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i16x8_rounding_average_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs); ++ inline void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src); ++ inline void emit_i8x16_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_i8x16_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_i16x8_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_i16x8_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_i32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_i64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, ++ uint8_t imm_lane_idx); ++ inline void emit_i8x16_replace_lane(LiftoffRegister dst, LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx); ++ inline void emit_i16x8_replace_lane(LiftoffRegister dst, LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx); ++ inline void emit_i32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx); ++ inline void emit_i64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx); ++ inline void emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx); ++ inline void emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx); ++ ++ inline void StackCheck(Label* ool_code, Register limit_address); ++ ++ inline void CallTrapCallbackForTesting(); ++ ++ inline void AssertUnreachable(AbortReason reason); ++ ++ inline void PushRegisters(LiftoffRegList); ++ inline void PopRegisters(LiftoffRegList); ++ ++ inline void DropStackSlotsAndRet(uint32_t num_stack_slots); ++ ++ // Execute a C call. Arguments are pushed to the stack and a pointer to this ++ // region is passed to the C function. If {out_argument_type != kWasmStmt}, ++ // this is the return value of the C function, stored in {rets[0]}. Further ++ // outputs (specified in {sig->returns()}) are read from the buffer and stored ++ // in the remaining {rets} registers. ++ inline void CallC(const FunctionSig* sig, const LiftoffRegister* args, ++ const LiftoffRegister* rets, ValueType out_argument_type, ++ int stack_bytes, ExternalReference ext_ref); ++ ++ inline void CallNativeWasmCode(Address addr); ++ // Indirect call: If {target == no_reg}, then pop the target from the stack. ++ inline void CallIndirect(const FunctionSig* sig, ++ compiler::CallDescriptor* call_descriptor, ++ Register target); ++ inline void CallRuntimeStub(WasmCode::RuntimeStubId sid); ++ ++ // Reserve space in the current frame, store address to space in {addr}. ++ inline void AllocateStackSlot(Register addr, uint32_t size); ++ inline void DeallocateStackSlot(uint32_t size); ++ ++ //////////////////////////////////// ++ // End of platform-specific part. // ++ //////////////////////////////////// ++ ++ uint32_t num_locals() const { return num_locals_; } ++ void set_num_locals(uint32_t num_locals); ++ ++ int GetTotalFrameSlotCount() const { ++ // TODO(zhin): Temporary for migration from index to offset. ++ return ((max_used_spill_offset_ + kStackSlotSize - 1) / kStackSlotSize); ++ } ++ ++ int GetTotalFrameSize() const { return max_used_spill_offset_; } ++ ++ ValueType local_type(uint32_t index) { ++ DCHECK_GT(num_locals_, index); ++ ValueType* locals = ++ num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_; ++ return locals[index]; ++ } ++ ++ void set_local_type(uint32_t index, ValueType type) { ++ ValueType* locals = ++ num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_; ++ locals[index] = type; ++ } ++ ++ CacheState* cache_state() { return &cache_state_; } ++ const CacheState* cache_state() const { return &cache_state_; } ++ ++ bool did_bailout() { return bailout_reason_ != kSuccess; } ++ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; } ++ const char* bailout_detail() const { return bailout_detail_; } ++ ++ void bailout(LiftoffBailoutReason reason, const char* detail) { ++ DCHECK_NE(kSuccess, reason); ++ if (bailout_reason_ != kSuccess) return; ++ AbortCompilation(); ++ bailout_reason_ = reason; ++ bailout_detail_ = detail; ++ } ++ ++ private: ++ LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned); ++ LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half); ++ ++ uint32_t num_locals_ = 0; ++ static constexpr uint32_t kInlineLocalTypes = 8; ++ union { ++ ValueType local_types_[kInlineLocalTypes]; ++ ValueType* more_local_types_; ++ }; ++ static_assert(sizeof(ValueType) == 4, ++ "Reconsider this inlining if ValueType gets bigger"); ++ CacheState cache_state_; ++ int max_used_spill_offset_ = StaticStackFrameSize(); ++ LiftoffBailoutReason bailout_reason_ = kSuccess; ++ const char* bailout_detail_ = nullptr; ++ ++ LiftoffRegister SpillOneRegister(LiftoffRegList candidates, ++ LiftoffRegList pinned); ++ // Spill one or two fp registers to get a pair of adjacent fp registers. ++ LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned); ++}; ++ ++std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState); ++ ++// ======================================================================= ++// Partially platform-independent implementations of the platform-dependent ++// part. ++ ++#ifdef V8_TARGET_ARCH_32_BIT ++ ++namespace liftoff { ++template ++void EmitI64IndependentHalfOperation(LiftoffAssembler* assm, ++ LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ // If {dst.low_gp()} does not overlap with {lhs.high_gp()} or {rhs.high_gp()}, ++ // just first compute the lower half, then the upper half. ++ if (dst.low() != lhs.high() && dst.low() != rhs.high()) { ++ (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp()); ++ (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp()); ++ return; ++ } ++ // If {dst.high_gp()} does not overlap with {lhs.low_gp()} or {rhs.low_gp()}, ++ // we can compute this the other way around. ++ if (dst.high() != lhs.low() && dst.high() != rhs.low()) { ++ (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp()); ++ (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp()); ++ return; ++ } ++ // Otherwise, we need a temporary register. ++ Register tmp = ++ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); ++ (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp()); ++ (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp()); ++ assm->Move(dst.low_gp(), tmp, kWasmI32); ++} ++ ++template ++void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm, ++ LiftoffRegister dst, ++ LiftoffRegister lhs, int32_t imm) { ++ // Top half of the immediate sign extended, either 0 or -1. ++ int32_t sign_extend = imm < 0 ? -1 : 0; ++ // If {dst.low_gp()} does not overlap with {lhs.high_gp()}, ++ // just first compute the lower half, then the upper half. ++ if (dst.low() != lhs.high()) { ++ (assm->*op)(dst.low_gp(), lhs.low_gp(), imm); ++ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend); ++ return; ++ } ++ // If {dst.high_gp()} does not overlap with {lhs.low_gp()}, ++ // we can compute this the other way around. ++ if (dst.high() != lhs.low()) { ++ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend); ++ (assm->*op)(dst.low_gp(), lhs.low_gp(), imm); ++ return; ++ } ++ // Otherwise, we need a temporary register. ++ Register tmp = ++ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp(); ++ (assm->*op)(tmp, lhs.low_gp(), imm); ++ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend); ++ assm->Move(dst.low_gp(), tmp, kWasmI32); ++} ++} // namespace liftoff ++ ++void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>( ++ this, dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t imm) { ++ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_andi>( ++ this, dst, lhs, imm); ++} ++ ++void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>( ++ this, dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t imm) { ++ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_ori>( ++ this, dst, lhs, imm); ++} ++ ++void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>( ++ this, dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t imm) { ++ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_xori>( ++ this, dst, lhs, imm); ++} ++ ++#endif // V8_TARGET_ARCH_32_BIT ++ ++// End of the partially platform-independent implementations of the ++// platform-dependent part. ++// ======================================================================= ++ ++class LiftoffStackSlots { ++ public: ++ explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {} ++ ++ void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset, ++ RegPairHalf half) { ++ slots_.emplace_back(src, src_offset, half); ++ } ++ void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); } ++ ++ inline void Construct(); ++ ++ private: ++ struct Slot { ++ // Allow move construction. ++ Slot(Slot&&) V8_NOEXCEPT = default; ++ Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset, ++ RegPairHalf half) ++ : src_(src), src_offset_(src_offset), half_(half) {} ++ explicit Slot(const LiftoffAssembler::VarState& src) ++ : src_(src), half_(kLowWord) {} ++ ++ const LiftoffAssembler::VarState src_; ++ uint32_t src_offset_ = 0; ++ RegPairHalf half_; ++ }; ++ ++ base::SmallVector slots_; ++ LiftoffAssembler* const asm_; ++ ++ DISALLOW_COPY_AND_ASSIGN(LiftoffStackSlots); ++}; ++ ++} // namespace wasm ++} // namespace internal ++} // namespace v8 ++ ++// Include platform specific implementation. ++#if V8_TARGET_ARCH_IA32 ++#include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h" ++#elif V8_TARGET_ARCH_X64 ++#include "src/wasm/baseline/x64/liftoff-assembler-x64.h" ++#elif V8_TARGET_ARCH_ARM64 ++#include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h" ++#elif V8_TARGET_ARCH_ARM ++#include "src/wasm/baseline/arm/liftoff-assembler-arm.h" ++#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 ++#include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h" ++#elif V8_TARGET_ARCH_MIPS ++#include "src/wasm/baseline/mips/liftoff-assembler-mips.h" ++#elif V8_TARGET_ARCH_MIPS64 ++#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" ++#elif V8_TARGET_ARCH_S390 ++#include "src/wasm/baseline/s390/liftoff-assembler-s390.h" ++#else ++#error Unsupported architecture. ++#endif ++ ++#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ +diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h +new file mode 100644 +index 00000000..9eae614b +--- /dev/null ++++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h +@@ -0,0 +1,1849 @@ ++// Copyright 2017 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_ ++#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_ ++ ++#include "src/wasm/baseline/liftoff-assembler.h" ++ ++namespace v8 { ++namespace internal { ++namespace wasm { ++ ++namespace liftoff { ++ ++// Liftoff Frames. ++// ++// slot Frame ++// +--------------------+--------------------------- ++// n+4 | optional padding slot to keep the stack 16 byte aligned. ++// n+3 | parameter n | ++// ... | ... | ++// 4 | parameter 1 | or parameter 2 ++// 3 | parameter 0 | or parameter 1 ++// 2 | (result address) | or parameter 0 ++// -----+--------------------+--------------------------- ++// 1 | return addr (ra) | ++// 0 | previous frame (fp)| ++// -----+--------------------+ <-- frame ptr (fp) ++// -1 | 0xa: WASM | ++// -2 | instance | ++// -----+--------------------+--------------------------- ++// -3 | slot 0 | ^ ++// -4 | slot 1 | | ++// | | Frame slots ++// | | | ++// | | v ++// | optional padding slot to keep the stack 16 byte aligned. ++// -----+--------------------+ <-- stack ptr (sp) ++// ++ ++// fp-8 holds the stack marker, fp-16 is the instance parameter. ++constexpr int kInstanceOffset = 16; ++ ++inline MemOperand GetStackSlot(int offset) { ++ return MemOperand(offset > 0 ? fp : sp, -offset); ++} ++ ++inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } ++ ++inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, ++ ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->Ld_w(dst.gp(), src); ++ break; ++ case ValueType::kI64: ++ assm->Ld_d(dst.gp(), src); ++ break; ++ case ValueType::kF32: ++ assm->Fld_s(dst.fp(), src); ++ break; ++ case ValueType::kF64: ++ assm->Fld_d(dst.fp(), src); ++ break; ++ case ValueType::kS128: ++ UNREACHABLE(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, ++ LiftoffRegister src, ValueType type) { ++ MemOperand dst(base, offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->St_w(src.gp(), dst); ++ break; ++ case ValueType::kI64: ++ assm->St_d(src.gp(), dst); ++ break; ++ case ValueType::kF32: ++ assm->Fst_s(src.fp(), dst); ++ break; ++ case ValueType::kF64: ++ assm->Fst_d(src.fp(), dst); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kI32: ++ assm->addi_d(sp, sp, -kSystemPointerSize); ++ assm->St_w(reg.gp(), MemOperand(sp, 0)); ++ break; ++ case ValueType::kI64: ++ assm->push(reg.gp()); ++ break; ++ case ValueType::kF32: ++ assm->addi_d(sp, sp, -kSystemPointerSize); ++ assm->Fst_s(reg.fp(), MemOperand(sp, 0)); ++ break; ++ case ValueType::kF64: ++ assm->addi_d(sp, sp, -kSystemPointerSize); ++ assm->Fst_d(reg.fp(), MemOperand(sp, 0)); ++ break; ++ case ValueType::kS128: ++ UNREACHABLE(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++} // namespace liftoff ++ ++int LiftoffAssembler::PrepareStackFrame() { ++ int offset = pc_offset(); ++ // When constant that represents size of stack frame can't be represented ++ // as 16bit we need three instructions to add it to sp, so we reserve space ++ // for this case. ++ addi_d(sp, sp, 0); ++ nop(); ++ nop(); ++ return offset; ++} ++ ++void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) { ++ // We can't run out of space, just pass anything big enough to not cause the ++ // assembler to try to grow the buffer. ++ constexpr int kAvailableSpace = 256; ++ TurboAssembler patching_assembler( ++ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ++ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ++ // If bytes can be represented as 16bit, daddiu will be generated and two ++ // nops will stay untouched. Otherwise, lui-ori sequence will load it to ++ // register and, as third instruction, daddu will be generated. ++ patching_assembler.Add_d(sp, sp, Operand(-frame_size)); ++} ++ ++void LiftoffAssembler::FinishCode() {} ++ ++void LiftoffAssembler::AbortCompilation() {} ++ ++// static ++constexpr int LiftoffAssembler::StaticStackFrameSize() { ++ return liftoff::kInstanceOffset; ++} ++ ++int LiftoffAssembler::SlotSizeForType(ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kS128: ++ return type.element_size_bytes(); ++ default: ++ return kStackSlotSize; ++ } ++} ++ ++bool LiftoffAssembler::NeedsAlignment(ValueType type) { ++ switch (type.kind()) { ++ case ValueType::kS128: ++ return true; ++ default: ++ // No alignment because all other types are kStackSlotSize. ++ return false; ++ } ++} ++ ++void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, ++ RelocInfo::Mode rmode) { ++ switch (value.type().kind()) { ++ case ValueType::kI32: ++ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); ++ break; ++ case ValueType::kI64: ++ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); ++ break; ++ case ValueType::kF32: ++ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, ++ int size) { ++ DCHECK_LE(offset, kMaxInt); ++ Ld_d(dst, liftoff::GetInstanceOperand()); ++ DCHECK(size == 4 || size == 8); ++ if (size == 4) { ++ Ld_w(dst, MemOperand(dst, offset)); ++ } else { ++ Ld_d(dst, MemOperand(dst, offset)); ++ } ++} ++ ++void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, ++ uint32_t offset) { ++ LoadFromInstance(dst, offset, kTaggedSize); ++} ++ ++void LiftoffAssembler::SpillInstance(Register instance) { ++ St_d(instance, liftoff::GetInstanceOperand()); ++} ++ ++void LiftoffAssembler::FillInstanceInto(Register dst) { ++ Ld_d(dst, liftoff::GetInstanceOperand()); ++} ++ ++void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, ++ Register offset_reg, ++ uint32_t offset_imm, ++ LiftoffRegList pinned) { ++ STATIC_ASSERT(kTaggedSize == kInt64Size); ++ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm, ++ LoadType::kI64Load, pinned); ++} ++ ++void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, LiftoffRegList pinned, ++ uint32_t* protected_load_pc, bool is_load_mem) { ++ Register src = no_reg; ++ if (offset_reg != no_reg) { ++ src = GetUnusedRegister(kGpReg, pinned).gp(); ++ emit_ptrsize_add(src, src_addr, offset_reg); ++ } ++ MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm) ++ : MemOperand(src_addr, offset_imm); ++ ++ if (protected_load_pc) *protected_load_pc = pc_offset(); ++ switch (type.value()) { ++ case LoadType::kI32Load8U: ++ case LoadType::kI64Load8U: ++ Ld_bu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load8S: ++ case LoadType::kI64Load8S: ++ Ld_b(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load16U: ++ case LoadType::kI64Load16U: ++ TurboAssembler::Ld_hu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load16S: ++ case LoadType::kI64Load16S: ++ TurboAssembler::Ld_h(dst.gp(), src_op); ++ break; ++ case LoadType::kI64Load32U: ++ TurboAssembler::Ld_wu(dst.gp(), src_op); ++ break; ++ case LoadType::kI32Load: ++ case LoadType::kI64Load32S: ++ TurboAssembler::Ld_w(dst.gp(), src_op); ++ break; ++ case LoadType::kI64Load: ++ TurboAssembler::Ld_d(dst.gp(), src_op); ++ break; ++ case LoadType::kF32Load: ++ TurboAssembler::Fld_s(dst.fp(), src_op); ++ break; ++ case LoadType::kF64Load: ++ TurboAssembler::Fld_d(dst.fp(), src_op); ++ break; ++ case LoadType::kS128Load: ++ UNREACHABLE(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister src, ++ StoreType type, LiftoffRegList pinned, ++ uint32_t* protected_store_pc, bool is_store_mem) { ++ Register dst = no_reg; ++ MemOperand dst_op = MemOperand(dst_addr, offset_imm); ++ if (offset_reg != no_reg) { ++ if (is_store_mem) { ++ pinned.set(src); ++ } ++ dst = GetUnusedRegister(kGpReg, pinned).gp(); ++ emit_ptrsize_add(dst, dst_addr, offset_reg); ++ dst_op = MemOperand(dst, offset_imm); ++ } ++ ++ if (protected_store_pc) *protected_store_pc = pc_offset(); ++ switch (type.value()) { ++ case StoreType::kI32Store8: ++ case StoreType::kI64Store8: ++ St_b(src.gp(), dst_op); ++ break; ++ case StoreType::kI32Store16: ++ case StoreType::kI64Store16: ++ TurboAssembler::St_h(src.gp(), dst_op); ++ break; ++ case StoreType::kI32Store: ++ case StoreType::kI64Store32: ++ TurboAssembler::St_w(src.gp(), dst_op); ++ break; ++ case StoreType::kI64Store: ++ TurboAssembler::St_d(src.gp(), dst_op); ++ break; ++ case StoreType::kF32Store: ++ TurboAssembler::Fst_s(src.fp(), dst_op); ++ break; ++ case StoreType::kF64Store: ++ TurboAssembler::Fst_d(src.fp(), dst_op); ++ break; ++ case StoreType::kS128Store: ++ UNREACHABLE(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, ++ Register offset_reg, uint32_t offset_imm, ++ LoadType type, LiftoffRegList pinned) { ++ bailout(kAtomics, "AtomicLoad"); ++} ++ ++void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister src, ++ StoreType type, LiftoffRegList pinned) { ++ bailout(kAtomics, "AtomicStore"); ++} ++ ++void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicAdd"); ++} ++ ++void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicSub"); ++} ++ ++void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicAnd"); ++} ++ ++void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicOr"); ++} ++ ++void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicXor"); ++} ++ ++void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, ++ uint32_t offset_imm, ++ LiftoffRegister value, ++ LiftoffRegister result, StoreType type) { ++ bailout(kAtomics, "AtomicExchange"); ++} ++ ++void LiftoffAssembler::AtomicCompareExchange( ++ Register dst_addr, Register offset_reg, uint32_t offset_imm, ++ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result, ++ StoreType type) { ++ bailout(kAtomics, "AtomicCompareExchange"); ++} ++ ++void LiftoffAssembler::AtomicFence() { dbar(0); } ++ ++void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ++ uint32_t caller_slot_idx, ++ ValueType type) { ++ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1)); ++ liftoff::Load(this, dst, src, type); ++} ++ ++void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, ++ uint32_t caller_slot_idx, ++ ValueType type) { ++ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1); ++ liftoff::Store(this, fp, offset, src, type); ++} ++ ++void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ++ ValueType type) { ++ DCHECK_NE(dst_offset, src_offset); ++ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); ++ Fill(reg, src_offset, type); ++ Spill(dst_offset, reg, type); ++} ++ ++void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { ++ DCHECK_NE(dst, src); ++ // TODO(ksreten): Handle different sizes here. ++ TurboAssembler::Move(dst, src); ++} ++ ++void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ++ ValueType type) { ++ DCHECK_NE(dst, src); ++ if (type != kWasmS128) { ++ TurboAssembler::Move(dst, src); ++ } else { ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ++ RecordUsedSpillOffset(offset); ++ MemOperand dst = liftoff::GetStackSlot(offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ St_w(reg.gp(), dst); ++ break; ++ case ValueType::kI64: ++ St_d(reg.gp(), dst); ++ break; ++ case ValueType::kF32: ++ Fst_s(reg.fp(), dst); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Fst_d(reg.fp(), dst); ++ break; ++ case ValueType::kS128: ++ UNREACHABLE(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Spill(int offset, WasmValue value) { ++ RecordUsedSpillOffset(offset); ++ MemOperand dst = liftoff::GetStackSlot(offset); ++ switch (value.type().kind()) { ++ case ValueType::kI32: { ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg); ++ TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); ++ St_w(tmp.gp(), dst); ++ break; ++ } ++ case ValueType::kI64: { ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg); ++ TurboAssembler::li(tmp.gp(), value.to_i64()); ++ St_d(tmp.gp(), dst); ++ break; ++ } ++ default: ++ // kWasmF32 and kWasmF64 are unreachable, since those ++ // constants are not tracked. ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { ++ MemOperand src = liftoff::GetStackSlot(offset); ++ switch (type.kind()) { ++ case ValueType::kI32: ++ Ld_w(reg.gp(), src); ++ break; ++ case ValueType::kI64: ++ Ld_d(reg.gp(), src); ++ break; ++ case ValueType::kF32: ++ Fld_s(reg.fp(), src); ++ break; ++ case ValueType::kF64: ++ TurboAssembler::Fld_d(reg.fp(), src); ++ break; ++ case ValueType::kS128: ++ UNREACHABLE(); ++ break; ++ default: ++ UNREACHABLE(); ++ } ++} ++ ++void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) { ++ UNREACHABLE(); ++} ++ ++void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ++ DCHECK_LT(0, size); ++ RecordUsedSpillOffset(start + size); ++ ++ if (size <= 12 * kStackSlotSize) { ++ // Special straight-line code for up to 12 slots. Generates one ++ // instruction per slot (<= 12 instructions total). ++ uint32_t remainder = size; ++ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) { ++ St_d(zero_reg, liftoff::GetStackSlot(start + remainder)); ++ } ++ DCHECK(remainder == 4 || remainder == 0); ++ if (remainder) { ++ St_w(zero_reg, liftoff::GetStackSlot(start + remainder)); ++ } ++ } else { ++ // General case for bigger counts (12 instructions). ++ // Use a0 for start address (inclusive), a1 for end address (exclusive). ++ Push(a1, a0); ++ Add_d(a0, fp, Operand(-start - size)); ++ Add_d(a1, fp, Operand(-start)); ++ ++ Label loop; ++ bind(&loop); ++ St_d(zero_reg, MemOperand(a0, kSystemPointerSize)); ++ addi_d(a0, a0, kSystemPointerSize); ++ BranchShort(&loop, ne, a0, Operand(a1)); ++ ++ Pop(a1, a0); ++ } ++} ++ ++void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { ++ TurboAssembler::Clz_d(dst.gp(), src.gp()); ++} ++ ++void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { ++ TurboAssembler::Ctz_d(dst.gp(), src.gp()); ++} ++ ++bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, ++ LiftoffRegister src) { ++ TurboAssembler::Popcnt_d(dst.gp(), src.gp()); ++ return true; ++} ++ ++void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { ++ TurboAssembler::Mul_w(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ ++ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. ++ TurboAssembler::li(kScratchReg, 1); ++ TurboAssembler::li(kScratchReg2, 1); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); ++ add_d(kScratchReg, kScratchReg, kScratchReg2); ++ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, ++ Operand(zero_reg)); ++ ++ TurboAssembler::Div_w(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ TurboAssembler::Div_wu(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ TurboAssembler::Mod_w(dst, lhs, rhs); ++} ++ ++void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); ++ TurboAssembler::Mod_wu(dst, lhs, rhs); ++} ++ ++#define I32_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ ++ Register rhs) { \ ++ instruction(dst, lhs, rhs); \ ++ } ++ ++// clang-format off ++I32_BINOP(add, add_w) ++I32_BINOP(sub, sub_w) ++I32_BINOP(and, and_) ++I32_BINOP(or, or_) ++I32_BINOP(xor, xor_) ++// clang-format on ++ ++#undef I32_BINOP ++ ++#define I32_BINOP_I(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \ ++ int32_t imm) { \ ++ instruction(dst, lhs, imm); \ ++ } ++ ++// clang-format off ++I32_BINOP_I(add, Add_w) ++I32_BINOP_I(and, And) ++I32_BINOP_I(or, Or) ++I32_BINOP_I(xor, Xor) ++// clang-format on ++ ++#undef I32_BINOP_I ++ ++void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { ++ TurboAssembler::Clz_w(dst, src); ++} ++ ++void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { ++ TurboAssembler::Ctz_w(dst, src); ++} ++ ++bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { ++ TurboAssembler::Popcnt_w(dst, src); ++ return true; ++} ++ ++#define I32_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ ++ Register amount) { \ ++ instruction(dst, src, amount); \ ++ } ++#define I32_SHIFTOP_I(name, instruction, instruction1) \ ++ I32_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \ ++ int amount) { \ ++ instruction1(dst, src, amount & 0x1f); \ ++ } ++ ++I32_SHIFTOP_I(shl, sll_w, slli_w) ++I32_SHIFTOP_I(sar, sra_w, srai_w) ++I32_SHIFTOP_I(shr, srl_w, srli_w) ++ ++#undef I32_SHIFTOP ++#undef I32_SHIFTOP_I ++ ++void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); ++} ++ ++bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero, ++ Label* trap_div_unrepresentable) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ ++ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. ++ TurboAssembler::li(kScratchReg, 1); ++ TurboAssembler::li(kScratchReg2, 1); ++ TurboAssembler::LoadZeroOnCondition( ++ kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), eq); ++ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); ++ add_d(kScratchReg, kScratchReg, kScratchReg2); ++ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, ++ Operand(zero_reg)); ++ ++ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs, ++ Label* trap_div_by_zero) { ++ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); ++ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); ++ return true; ++} ++ ++#define I64_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name( \ ++ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ ++ instruction(dst.gp(), lhs.gp(), rhs.gp()); \ ++ } ++ ++// clang-format off ++I64_BINOP(add, Add_d) ++I64_BINOP(sub, Sub_d) ++I64_BINOP(and, and_) ++I64_BINOP(or, or_) ++I64_BINOP(xor, xor_) ++// clang-format on ++ ++#undef I64_BINOP ++ ++#define I64_BINOP_I(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name##i( \ ++ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \ ++ instruction(dst.gp(), lhs.gp(), imm); \ ++ } ++ ++// clang-format off ++I64_BINOP_I(add, addi_d) ++I64_BINOP_I(and, And) ++I64_BINOP_I(or, Or) ++I64_BINOP_I(xor, Xor) ++// clang-format on ++ ++#undef I64_BINOP_I ++ ++#define I64_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name( \ ++ LiftoffRegister dst, LiftoffRegister src, Register amount) { \ ++ instruction(dst.gp(), src.gp(), amount); \ ++ } ++#define I64_SHIFTOP_I(name, instruction, instructioni) \ ++ I64_SHIFTOP(name, instruction) \ ++ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \ ++ LiftoffRegister src, int amount) { \ ++ DCHECK(is_uint6(amount)); \ ++ instructioni(dst.gp(), src.gp(), amount); \ ++ } ++ ++I64_SHIFTOP_I(shl, sll_d, slli_d) ++I64_SHIFTOP_I(sar, sra_d, srai_d) ++I64_SHIFTOP_I(shr, srl_d, srli_d) ++ ++#undef I64_SHIFTOP ++#undef I64_SHIFTOP_I ++ ++void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) { ++ add_w(dst, src, zero_reg); ++} ++ ++void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { ++ TurboAssembler::Neg_s(dst, src); ++} ++ ++void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { ++ TurboAssembler::Neg_d(dst, src); ++} ++ ++void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float32Min(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float32Max(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ bailout(kComplexOperation, "f32_copysign"); ++} ++ ++void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float64Min(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label ool, done; ++ TurboAssembler::Float64Max(dst, lhs, rhs, &ool); ++ Branch(&done); ++ ++ bind(&ool); ++ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); ++ bind(&done); ++} ++ ++void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, ++ DoubleRegister rhs) { ++ bailout(kComplexOperation, "f64_copysign"); ++} ++ ++#define FP_BINOP(name, instruction) \ ++ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \ ++ DoubleRegister rhs) { \ ++ instruction(dst, lhs, rhs); \ ++ } ++#define FP_UNOP(name, instruction) \ ++ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ ++ instruction(dst, src); \ ++ } ++#define FP_UNOP_RETURN_TRUE(name, instruction) \ ++ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ ++ instruction(dst, src); \ ++ return true; \ ++ } ++ ++FP_BINOP(f32_add, fadd_s) ++FP_BINOP(f32_sub, fsub_s) ++FP_BINOP(f32_mul, fmul_s) ++FP_BINOP(f32_div, fdiv_s) ++FP_UNOP(f32_abs, fabs_s) ++FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s) ++FP_UNOP_RETURN_TRUE(f32_floor, Floor_s) ++FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s) ++FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s) ++FP_UNOP(f32_sqrt, fsqrt_s) ++FP_BINOP(f64_add, fadd_d) ++FP_BINOP(f64_sub, fsub_d) ++FP_BINOP(f64_mul, fmul_d) ++FP_BINOP(f64_div, fdiv_d) ++FP_UNOP(f64_abs, fabs_d) ++FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d) ++FP_UNOP_RETURN_TRUE(f64_floor, Floor_d) ++FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d) ++FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d) ++FP_UNOP(f64_sqrt, fsqrt_d) ++ ++#undef FP_BINOP ++#undef FP_UNOP ++#undef FP_UNOP_RETURN_TRUE ++ ++bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, ++ LiftoffRegister dst, ++ LiftoffRegister src, Label* trap) { ++ switch (opcode) { ++ case kExprI32ConvertI64: ++ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); ++ return true; ++ case kExprI32SConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); ++ ftintrz_w_s(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_s(dst.gp(), kScratchDoubleReg); ++ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, ++ // because INT32_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ movgr2fr_w(kScratchDoubleReg, dst.gp()); ++ ffint_s_w(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32UConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); ++ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); ++ // Avoid UINT32_MAX as an overflow indicator and use 0 instead, ++ // because 0 allows easier out-of-bounds detection. ++ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); ++ fcvt_s_d(converted_back.fp(), converted_back.fp()); ++ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32SConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); ++ ftintrz_w_d(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_s(dst.gp(), kScratchDoubleReg); ++ ++ // Checking if trap. ++ ffint_d_w(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32UConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); ++ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); ++ ++ // Checking if trap. ++ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); ++ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI32ReinterpretF32: ++ TurboAssembler::FmoveLow(dst.gp(), src.fp()); ++ return true; ++ case kExprI64SConvertI32: ++ slli_w(dst.gp(), src.gp(), 0); ++ return true; ++ case kExprI64UConvertI32: ++ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); ++ return true; ++ case kExprI64SConvertF32: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_s(rounded.fp(), src.fp()); ++ ftintrz_l_s(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_d(dst.gp(), kScratchDoubleReg); ++ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, ++ // because INT64_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ movgr2fr_d(kScratchDoubleReg, dst.gp()); ++ ffint_s_l(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI64UConvertF32: { ++ // Real conversion. ++ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, ++ kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); ++ return true; ++ } ++ case kExprI64SConvertF64: { ++ LiftoffRegister rounded = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src)); ++ LiftoffRegister converted_back = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded)); ++ ++ // Real conversion. ++ TurboAssembler::Trunc_d(rounded.fp(), src.fp()); ++ ftintrz_l_d(kScratchDoubleReg, rounded.fp()); ++ movfr2gr_d(dst.gp(), kScratchDoubleReg); ++ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, ++ // because INT64_MIN allows easier out-of-bounds detection. ++ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); ++ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); ++ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); ++ ++ // Checking if trap. ++ movgr2fr_d(kScratchDoubleReg, dst.gp()); ++ ffint_d_l(converted_back.fp(), kScratchDoubleReg); ++ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); ++ TurboAssembler::BranchFalseF(trap); ++ return true; ++ } ++ case kExprI64UConvertF64: { ++ // Real conversion. ++ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, ++ kScratchReg); ++ ++ // Checking if trap. ++ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); ++ return true; ++ } ++ case kExprI64ReinterpretF64: ++ movfr2gr_d(dst.gp(), src.fp()); ++ return true; ++ case kExprF32SConvertI32: { ++ LiftoffRegister scratch = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); ++ movgr2fr_w(scratch.fp(), src.gp()); ++ ffint_s_w(dst.fp(), scratch.fp()); ++ return true; ++ } ++ case kExprF32UConvertI32: ++ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp()); ++ return true; ++ case kExprF32ConvertF64: ++ fcvt_s_d(dst.fp(), src.fp()); ++ return true; ++ case kExprF32ReinterpretI32: ++ TurboAssembler::FmoveLow(dst.fp(), src.gp()); ++ return true; ++ case kExprF64SConvertI32: { ++ LiftoffRegister scratch = ++ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst)); ++ movgr2fr_w(scratch.fp(), src.gp()); ++ ffint_d_w(dst.fp(), scratch.fp()); ++ return true; ++ } ++ case kExprF64UConvertI32: ++ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp()); ++ return true; ++ case kExprF64ConvertF32: ++ fcvt_d_s(dst.fp(), src.fp()); ++ return true; ++ case kExprF64ReinterpretI64: ++ movgr2fr_d(dst.fp(), src.gp()); ++ return true; ++ default: ++ return false; ++ } ++} ++ ++void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { ++ bailout(kComplexOperation, "i32_signextend_i8"); ++} ++ ++void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { ++ bailout(kComplexOperation, "i32_signextend_i16"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i8"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i16"); ++} ++ ++void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, ++ LiftoffRegister src) { ++ bailout(kComplexOperation, "i64_signextend_i32"); ++} ++ ++void LiftoffAssembler::emit_jump(Label* label) { ++ TurboAssembler::Branch(label); ++} ++ ++void LiftoffAssembler::emit_jump(Register target) { ++ TurboAssembler::Jump(target); ++} ++ ++void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, ++ ValueType type, Register lhs, ++ Register rhs) { ++ if (rhs != no_reg) { ++ TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); ++ } else { ++ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); ++ } ++} ++ ++void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { ++ sltui(dst, src, 1); ++} ++ ++void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, ++ Register lhs, Register rhs) { ++ Register tmp = dst; ++ if (dst == lhs || dst == rhs) { ++ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); ++ } ++ // Write 1 as result. ++ TurboAssembler::li(tmp, 1); ++ ++ // If negative condition is true, write 0 as result. ++ Condition neg_cond = NegateCondition(cond); ++ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); ++ ++ // If tmp != dst, result will be moved. ++ TurboAssembler::Move(dst, tmp); ++} ++ ++void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { ++ sltui(dst, src.gp(), 1); ++} ++ ++void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) { ++ Register tmp = dst; ++ if (dst == lhs.gp() || dst == rhs.gp()) { ++ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp(); ++ } ++ // Write 1 as result. ++ TurboAssembler::li(tmp, 1); ++ ++ // If negative condition is true, write 0 as result. ++ Condition neg_cond = NegateCondition(cond); ++ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), ++ neg_cond); ++ ++ // If tmp != dst, result will be moved. ++ TurboAssembler::Move(dst, tmp); ++} ++ ++namespace liftoff { ++ ++inline FPUCondition ConditionToConditionCmpFPU(Condition condition, ++ bool* predicate) { ++ switch (condition) { ++ case kEqual: ++ *predicate = true; ++ return CEQ; ++ case kUnequal: ++ *predicate = false; ++ return CEQ; ++ case kUnsignedLessThan: ++ *predicate = true; ++ return CLT; ++ case kUnsignedGreaterEqual: ++ *predicate = false; ++ return CLT; ++ case kUnsignedLessEqual: ++ *predicate = true; ++ return CLE; ++ case kUnsignedGreaterThan: ++ *predicate = false; ++ return CLE; ++ default: ++ *predicate = true; ++ break; ++ } ++ UNREACHABLE(); ++} ++ ++} // namespace liftoff ++ ++void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, ++ DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label not_nan, cont; ++ TurboAssembler::CompareIsNanF32(lhs, rhs); ++ TurboAssembler::BranchFalseF(¬_nan); ++ // If one of the operands is NaN, return 1 for f32.ne, else 0. ++ if (cond == ne) { ++ TurboAssembler::li(dst, 1); ++ } else { ++ TurboAssembler::Move(dst, zero_reg); ++ } ++ TurboAssembler::Branch(&cont); ++ ++ bind(¬_nan); ++ ++ TurboAssembler::li(dst, 1); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); ++ TurboAssembler::CompareF32(lhs, rhs, fcond); ++ if (predicate) { ++ TurboAssembler::LoadZeroIfNotFPUCondition(dst); ++ } else { ++ TurboAssembler::LoadZeroIfFPUCondition(dst); ++ } ++ ++ bind(&cont); ++} ++ ++void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, ++ DoubleRegister lhs, ++ DoubleRegister rhs) { ++ Label not_nan, cont; ++ TurboAssembler::CompareIsNanF64(lhs, rhs); ++ TurboAssembler::BranchFalseF(¬_nan); ++ // If one of the operands is NaN, return 1 for f64.ne, else 0. ++ if (cond == ne) { ++ TurboAssembler::li(dst, 1); ++ } else { ++ TurboAssembler::Move(dst, zero_reg); ++ } ++ TurboAssembler::Branch(&cont); ++ ++ bind(¬_nan); ++ ++ TurboAssembler::li(dst, 1); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); ++ TurboAssembler::CompareF64(lhs, rhs, fcond); ++ if (predicate) { ++ TurboAssembler::LoadZeroIfNotFPUCondition(dst); ++ } else { ++ TurboAssembler::LoadZeroIfFPUCondition(dst); ++ } ++ ++ bind(&cont); ++} ++ ++void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { ++} ++ ++void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_s128_select(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ LiftoffRegister mask) {} ++ ++void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_add_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_add_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_sub_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_sub_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_add_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_add_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_sub_saturate_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_sub_saturate_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, ++ int32_t rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ LiftoffRegister rhs) {} ++ ++void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst, ++ LiftoffRegister src) {} ++ ++void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, ++ LiftoffRegister lhs, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst, ++ LiftoffRegister src1, ++ LiftoffRegister src2, ++ uint8_t imm_lane_idx) {} ++ ++void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { ++ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); ++ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); ++} ++ ++void LiftoffAssembler::CallTrapCallbackForTesting() { ++ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp()); ++ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); ++} ++ ++void LiftoffAssembler::AssertUnreachable(AbortReason reason) { ++ if (emit_debug_code()) Abort(reason); ++} ++ ++void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { ++ LiftoffRegList gp_regs = regs & kGpCacheRegList; ++ unsigned num_gp_regs = gp_regs.GetNumRegsSet(); ++ if (num_gp_regs) { ++ unsigned offset = num_gp_regs * kSystemPointerSize; ++ addi_d(sp, sp, -offset); ++ while (!gp_regs.is_empty()) { ++ LiftoffRegister reg = gp_regs.GetFirstRegSet(); ++ offset -= kSystemPointerSize; ++ St_d(reg.gp(), MemOperand(sp, offset)); ++ gp_regs.clear(reg); ++ } ++ DCHECK_EQ(offset, 0); ++ } ++ LiftoffRegList fp_regs = regs & kFpCacheRegList; ++ unsigned num_fp_regs = fp_regs.GetNumRegsSet(); ++ if (num_fp_regs) { ++ unsigned slot_size = /*IsEnabled(MIPS_SIMD) ? 16 :*/ 8; ++ addi_d(sp, sp, -(num_fp_regs * slot_size)); ++ unsigned offset = 0; ++ while (!fp_regs.is_empty()) { ++ LiftoffRegister reg = fp_regs.GetFirstRegSet(); ++ if (0 /*IsEnabled(MIPS_SIMD)*/) { ++ // TurboAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset)); ++ } else { ++ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); ++ } ++ fp_regs.clear(reg); ++ offset += slot_size; ++ } ++ DCHECK_EQ(offset, num_fp_regs * slot_size); ++ } ++} ++ ++void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { ++ LiftoffRegList fp_regs = regs & kFpCacheRegList; ++ unsigned fp_offset = 0; ++ while (!fp_regs.is_empty()) { ++ LiftoffRegister reg = fp_regs.GetFirstRegSet(); ++ if (0 /*IsEnabled(MIPS_SIMD)*/) { ++ // TurboAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset)); ++ } else { ++ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); ++ } ++ fp_regs.clear(reg); ++ fp_offset += (/*IsEnabled(MIPS_SIMD) ? 16 :*/ 8); ++ } ++ if (fp_offset) addi_d(sp, sp, fp_offset); ++ LiftoffRegList gp_regs = regs & kGpCacheRegList; ++ unsigned gp_offset = 0; ++ while (!gp_regs.is_empty()) { ++ LiftoffRegister reg = gp_regs.GetLastRegSet(); ++ Ld_d(reg.gp(), MemOperand(sp, gp_offset)); ++ gp_regs.clear(reg); ++ gp_offset += kSystemPointerSize; ++ } ++ addi_d(sp, sp, gp_offset); ++} ++ ++void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ++ DCHECK_LT(num_stack_slots, ++ (1 << 16) / kSystemPointerSize); // 16 bit immediate ++ TurboAssembler::DropAndRet(static_cast(num_stack_slots)); ++} ++ ++void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ++ const LiftoffRegister* args, ++ const LiftoffRegister* rets, ++ ValueType out_argument_type, int stack_bytes, ++ ExternalReference ext_ref) { ++ addi_d(sp, sp, -stack_bytes); ++ ++ int arg_bytes = 0; ++ for (ValueType param_type : sig->parameters()) { ++ liftoff::Store(this, sp, arg_bytes, *args++, param_type); ++ arg_bytes += param_type.element_size_bytes(); ++ } ++ DCHECK_LE(arg_bytes, stack_bytes); ++ ++ // Pass a pointer to the buffer with the arguments to the C function. ++ // On mips, the first argument is passed in {a0}. ++ constexpr Register kFirstArgReg = a0; ++ mov(kFirstArgReg, sp); ++ ++ // Now call the C function. ++ constexpr int kNumCCallArgs = 1; ++ PrepareCallCFunction(kNumCCallArgs, kScratchReg); ++ CallCFunction(ext_ref, kNumCCallArgs); ++ ++ // Move return value to the right register. ++ const LiftoffRegister* next_result_reg = rets; ++ if (sig->return_count() > 0) { ++ DCHECK_EQ(1, sig->return_count()); ++ constexpr Register kReturnReg = a0; ++ if (kReturnReg != next_result_reg->gp()) { ++ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0)); ++ } ++ ++next_result_reg; ++ } ++ ++ // Load potential output value from the buffer on the stack. ++ if (out_argument_type != kWasmStmt) { ++ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type); ++ } ++ ++ addi_d(sp, sp, stack_bytes); ++} ++ ++void LiftoffAssembler::CallNativeWasmCode(Address addr) { ++ Call(addr, RelocInfo::WASM_CALL); ++} ++ ++void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, ++ compiler::CallDescriptor* call_descriptor, ++ Register target) { ++ if (target == no_reg) { ++ pop(kScratchReg); ++ Call(kScratchReg); ++ } else { ++ Call(target); ++ } ++} ++ ++void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { ++ // A direct call to a wasm runtime stub defined in this module. ++ // Just encode the stub index. This will be patched at relocation. ++ Call(static_cast
(sid), RelocInfo::WASM_STUB_CALL); ++} ++ ++void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { ++ addi_d(sp, sp, -size); ++ TurboAssembler::Move(addr, sp); ++} ++ ++void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ++ addi_d(sp, sp, size); ++} ++ ++void LiftoffStackSlots::Construct() { ++ for (auto& slot : slots_) { ++ const LiftoffAssembler::VarState& src = slot.src_; ++ switch (src.loc()) { ++ case LiftoffAssembler::VarState::kStack: ++ if (src.type() != kWasmS128) { ++ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_)); ++ asm_->push(kScratchReg); ++ } else { ++ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8)); ++ asm_->push(kScratchReg); ++ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_)); ++ asm_->push(kScratchReg); ++ } ++ break; ++ case LiftoffAssembler::VarState::kRegister: ++ liftoff::push(asm_, src.reg(), src.type()); ++ break; ++ case LiftoffAssembler::VarState::kIntConst: { ++ asm_->li(kScratchReg, Operand(src.i32_const())); ++ asm_->push(kScratchReg); ++ break; ++ } ++ } ++ } ++} ++ ++} // namespace wasm ++} // namespace internal ++} // namespace v8 ++ ++#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_ +diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc +index 90cdad46..5077edd4 100644 +--- a/deps/v8/src/wasm/jump-table-assembler.cc ++++ b/deps/v8/src/wasm/jump-table-assembler.cc +@@ -268,6 +268,37 @@ void JumpTableAssembler::NopBytes(int bytes) { + } + } + ++#elif V8_TARGET_ARCH_LOONG64 ++void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ++ Address lazy_compile_target) { ++ DCHECK(is_int32(func_index)); ++ int start = pc_offset(); ++ li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr ++ // Jump produces max. 3 instructions for 32-bit platform ++ // and max. 4 instructions for 64-bit platform. ++ Jump(lazy_compile_target, RelocInfo::NONE); ++ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset(); ++ DCHECK_EQ(nop_bytes % kInstrSize, 0); ++ for (int i = 0; i < nop_bytes; i += kInstrSize) nop(); ++} ++bool JumpTableAssembler::EmitJumpSlot(Address target) { ++ PatchAndJump(target); ++ return true; ++} ++void JumpTableAssembler::EmitFarJumpSlot(Address target) { ++ JumpToInstructionStream(target); ++} ++void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { ++ UNREACHABLE(); ++} ++void JumpTableAssembler::NopBytes(int bytes) { ++ DCHECK_LE(0, bytes); ++ DCHECK_EQ(0, bytes % kInstrSize); ++ for (; bytes > 0; bytes -= kInstrSize) { ++ nop(); ++ } ++} ++ + #elif V8_TARGET_ARCH_PPC64 + void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, + Address lazy_compile_target) { +diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h +index 253f0bc0..2137afcd 100644 +--- a/deps/v8/src/wasm/jump-table-assembler.h ++++ b/deps/v8/src/wasm/jump-table-assembler.h +@@ -215,6 +215,12 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { + static constexpr int kJumpTableSlotSize = 8 * kInstrSize; + static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize; + static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; ++#elif V8_TARGET_ARCH_LOONG64 ++ // TODO ++ static constexpr int kJumpTableLineSize = 8 * kInstrSize; ++ static constexpr int kJumpTableSlotSize = 8 * kInstrSize; ++ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize; ++ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize; + #else + #error Unknown architecture. + #endif +diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h +index 7e56ea6e..4e8d0c65 100644 +--- a/deps/v8/src/wasm/wasm-linkage.h ++++ b/deps/v8/src/wasm/wasm-linkage.h +@@ -75,6 +75,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1}; + constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14}; + constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4}; + ++#elif V8_TARGET_ARCH_LOONG64 ++// =========================================================================== ++// == LOONG64 TODO ============================================================= ++// =========================================================================== ++constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7}; ++constexpr Register kGpReturnRegisters[] = {a0, a1}; ++constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14}; ++constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4}; ++ + #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + // =========================================================================== + // == ppc & ppc64 ============================================================ +diff --git a/deps/v8/test/cctest/test-assembler-loong64.cc b/deps/v8/test/cctest/test-assembler-loong64.cc +new file mode 100644 +index 00000000..366bcb7c +--- /dev/null ++++ b/deps/v8/test/cctest/test-assembler-loong64.cc +@@ -0,0 +1,5127 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following ++// disclaimer in the documentation and/or other materials provided ++// with the distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived ++// from this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++#include // NOLINT(readability/streams) ++ ++#include "src/base/utils/random-number-generator.h" ++#include "src/codegen/assembler-inl.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/diagnostics/disassembler.h" ++#include "src/execution/simulator.h" ++#include "src/heap/factory.h" ++#include "src/init/v8.h" ++#include "test/cctest/cctest.h" ++ ++namespace v8 { ++namespace internal { ++ ++// Define these function prototypes to match JSEntryFunction in execution.cc. ++// TODO(mips64): Refine these signatures per test case. ++using F1 = void*(int x, int p1, int p2, int p3, int p4); ++using F2 = void*(int x, int y, int p2, int p3, int p4); ++using F3 = void*(void* p, int p1, int p2, int p3, int p4); ++using F4 = void*(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4); ++using F5 = void*(void* p0, void* p1, int p2, int p3, int p4); ++ ++#define __ assm. ++// v0->a2, v1->a3 ++TEST(LA0) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ // Addition. ++ __ addi_d(a2, a0, 0xC); ++ ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0xAB0, 0, 0, 0, 0)); ++ CHECK_EQ(0xABCL, res); ++} ++ ++TEST(LA1) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ Label L, C; ++ ++ __ ori(a1, a0, 0); ++ __ ori(a2, zero_reg, 0); ++ __ b(&C); ++ ++ __ bind(&L); ++ __ add_d(a2, a2, a1); ++ __ addi_d(a1, a1, -1); ++ ++ __ bind(&C); ++ __ ori(a3, a1, 0); ++ ++ __ Branch(&L, ne, a3, Operand((int64_t)0)); ++ ++ __ or_(a0, a2, zero_reg); ++ __ or_(a1, a3, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(50, 0, 0, 0, 0)); ++ CHECK_EQ(1275L, res); ++} ++ ++TEST(LA2) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ ori(a4, zero_reg, 0); // 00000000 ++ __ lu12i_w(a4, 0x12345); // 12345000 ++ __ ori(a4, a4, 0); // 12345000 ++ __ ori(a2, a4, 0xF0F); // 12345F0F ++ __ Branch(&error, ne, a2, Operand(0x12345F0F)); ++ ++ __ ori(a4, zero_reg, 0); ++ __ lu32i_d(a4, 0x12345); // 1 2345 0000 0000 ++ __ ori(a4, a4, 0xFFF); // 1 2345 0000 0FFF ++ __ addi_d(a2, a4, 1); ++ __ Branch(&error, ne, a2, Operand(0x1234500001000)); ++ ++ __ ori(a4, zero_reg, 0); ++ __ lu52i_d(a4, zero_reg, 0x123); // 1230 0000 0000 0000 ++ __ ori(a4, a4, 0xFFF); // 123F 0000 0000 0FFF ++ __ addi_d(a2, a4, 1); // 1230 0000 0000 1000 ++ __ Branch(&error, ne, a2, Operand(0x1230000000001000)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA3) { ++ // Test 32bit calculate instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ li(a4, 0x00000004); ++ __ li(a5, 0x00001234); ++ __ li(a6, 0x12345678); ++ __ li(a7, 0x7FFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFC)); ++ __ li(t1, static_cast(0xFFFFEDCC)); ++ __ li(t2, static_cast(0xEDCBA988)); ++ __ li(t3, static_cast(0x80000000)); ++ ++ __ ori(a2, zero_reg, 0); // 0x00000000 ++ __ add_w(a2, a4, a5); // 0x00001238 ++ __ sub_w(a2, a2, a4); // 0x00001234 ++ __ Branch(&error, ne, a2, Operand(0x00001234)); ++ __ ori(a3, zero_reg, 0); // 0x00000000 ++ __ add_w(a3, a7, a4); // 32bit addu result is sign-extended into 64bit reg. ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000003)); ++ ++ __ sub_w(a3, t3, a4); // 0x7FFFFFFC ++ __ Branch(&error, ne, a3, Operand(0x7FFFFFFC)); ++ ++ __ ori(a2, zero_reg, 0); // 0x00000000 ++ __ ori(a3, zero_reg, 0); // 0x00000000 ++ __ addi_w(a2, zero_reg, 0x421); // 0x00007421 ++ __ addi_w(a2, a2, -0x1); // 0x00007420 ++ __ addi_w(a2, a2, -0x20); // 0x00007400 ++ __ Branch(&error, ne, a2, Operand(0x0000400)); ++ __ addi_w(a3, a7, 0x1); // 0x80000000 - result is sign-extended. ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000000)); ++ ++ __ ori(a2, zero_reg, 0); // 0x00000000 ++ __ ori(a3, zero_reg, 0); // 0x00000000 ++ __ alsl_w(a2, a6, a4, 3); // 0xFFFFFFFF91A2B3C4 ++ __ alsl_w(a2, a2, a4, 2); // 0x468ACF14 ++ __ Branch(&error, ne, a2, Operand(0x468acf14)); ++ __ ori(a0, zero_reg, 31); ++ __ alsl_wu(a3, a6, a4, 3); // 0x91A2B3C4 ++ __ alsl_wu(a3, a3, a7, 1); // 0xFFFFFFFFA3456787 ++ __ Branch(&error, ne, a3, Operand(0xA3456787)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ ori(a3, zero_reg, 0); ++ __ mul_w(a2, a5, a7); ++ __ div_w(a2, a2, a4); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFB73)); ++ __ mul_w(a3, a4, t1); ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFB730)); ++ __ div_w(a3, t3, a4); ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFE0000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_w(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFFFF)); ++ __ mulh_w(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_wu(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0x3)); ++ __ mulh_wu(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulw_d_w(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFB730)); ++ __ mulw_d_w(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(0x48D159E0)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulw_d_wu(a2, a4, t1); ++ __ Branch(&error, ne, a2, Operand(0x3FFFFB730)); //========0xFFFFB730 ++ __ ori(a2, zero_reg, 81); ++ __ mulw_d_wu(a2, a4, a6); ++ __ Branch(&error, ne, a2, Operand(0x48D159E0)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ div_wu(a2, a7, a5); ++ __ Branch(&error, ne, a2, Operand(0x70821)); ++ __ div_wu(a2, t0, a5); ++ __ Branch(&error, ne, a2, Operand(0xE1042)); ++ __ div_wu(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_w(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ ori(a2, zero_reg, 0); ++ __ mod_w(a2, t2, a5); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258)); ++ __ ori(a2, zero_reg, 0); ++ __ mod_w(a2, t2, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_wu(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ mod_wu(a2, t2, a5); ++ __ Branch(&error, ne, a2, Operand(0xF0)); ++ __ mod_wu(a2, t2, t1); ++ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFEDCBA988)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA4) { ++ // Test 64bit calculate instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ add_d(a2, a4, a5); ++ __ sub_d(a2, a2, a4); ++ __ Branch(&error, ne, a2, Operand(0x1012131415161718)); ++ __ ori(a3, zero_reg, 0); ++ __ add_d(a3, a6, a7); //溢出 ++ __ Branch(&error, ne, a3, Operand(0xd1f4b764a26e7411)); ++ __ sub_d(a3, t3, a4); //溢出 ++ __ Branch(&error, ne, a3, Operand(0x7ffffffffffe8cee)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ addi_d(a2, a5, 0x412); //正值 ++ __ Branch(&error, ne, a2, Operand(0x1012131415161b2a)); ++ __ addi_d(a2, a7, 0x547); //负值 ++ __ Branch(&error, ne, a2, Operand(0x8000000000000546)); ++ ++ __ ori(t4, zero_reg, 0); ++ __ addu16i_d(a2, t4, 0x1234); ++ __ Branch(&error, ne, a2, Operand(0x12340000)); ++ __ addu16i_d(a2, a2, 0x9876); ++ __ Branch(&error, ne, a2, Operand(0xffffffffaaaa0000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ alsl_d(a2, t2, t0, 3); ++ __ Branch(&error, ne, a2, Operand(0xf92d43e211b374f)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mul_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0)); ++ __ mul_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x57ad69f40f870584)); ++ __ mul_d(a2, a4, t0); ++ __ Branch(&error, ne, a2, Operand(0xfffffffff07523fe)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); ++ __ mulh_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x15d)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mulh_du(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); ++ __ mulh_du(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xdf6b8f35a10e1700)); ++ __ mulh_du(a2, a4, t0); ++ __ Branch(&error, ne, a2, Operand(0x17311)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ div_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ div_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ div_d(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0xffffe985f631e6d9)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ div_du(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ div_du(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ div_du(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0x9a22ffd3973d)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_d(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ mod_d(a2, t2, t0); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffffb0a)); ++ __ mod_d(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffff6a1a)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ mod_du(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ mod_du(a2, t2, t0); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841)); ++ __ mod_du(a2, t1, a4); ++ __ Branch(&error, ne, a2, Operand(0x1712)); ++ ++ // Everything was correctly executed. Load the expected result. ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ // Got an error. Return a wrong result. ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA5) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ slt(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ slt(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ slt(a2, t1, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ sltu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ sltu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ sltu(a2, t1, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ slti(a2, a5, 0x123); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ slti(a2, t0, 0x123); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ sltui(a2, a5, 0x123); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ sltui(a2, t0, 0x123); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ and_(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x1310)); ++ __ and_(a2, a6, a7); ++ __ Branch(&error, ne, a2, Operand(0x51F4B764A26E7412)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ or_(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f)); ++ __ or_(a2, t2, t3); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ nor(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5)); ++ __ nor(a2, a6, a7); ++ __ Branch(&error, ne, a2, Operand(0x8000000000000000)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ xor_(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b)); ++ __ xor_(a2, t2, t3); ++ __ Branch(&error, ne, a2, Operand(0x1f25a87c4236841)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ andn(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x16002)); ++ __ andn(a2, a6, a7); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ ori(a2, zero_reg, 0); ++ __ orn(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); ++ __ orn(a2, t2, t3); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffff)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ andi(a2, a4, 0x123); ++ __ Branch(&error, ne, a2, Operand(0x102)); ++ __ andi(a2, a6, 0xDCB); ++ __ Branch(&error, ne, a2, Operand(0x402)); ++ ++ __ ori(a2, zero_reg, 0); ++ __ xori(a2, t0, 0x123); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffff464)); ++ __ xori(a2, t2, 0xDCB); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a)); ++ ++ // Everything was correctly executed. Load the expected result. ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ // Got an error. Return a wrong result. ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(LA6) { ++ // Test loads and stores instruction. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int64_t si3; ++ int64_t result_ld_b_si1; ++ int64_t result_ld_b_si2; ++ int64_t result_ld_h_si1; ++ int64_t result_ld_h_si2; ++ int64_t result_ld_w_si1; ++ int64_t result_ld_w_si2; ++ int64_t result_ld_d_si1; ++ int64_t result_ld_d_si3; ++ int64_t result_ld_bu_si2; ++ int64_t result_ld_hu_si2; ++ int64_t result_ld_wu_si2; ++ int64_t result_st_b; ++ int64_t result_st_h; ++ int64_t result_st_w; ++ }; ++ T t; ++ ++ // Ld_b ++ __ Ld_b(a4, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si1))); ++ ++ __ Ld_b(a4, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si2))); ++ ++ // Ld_h ++ __ Ld_h(a5, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si1))); ++ ++ __ Ld_h(a5, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si2))); ++ ++ // Ld_w ++ __ Ld_w(a6, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si1))); ++ ++ __ Ld_w(a6, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si2))); ++ ++ // Ld_d ++ __ Ld_d(a7, MemOperand(a0, offsetof(T, si1))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si1))); ++ ++ __ Ld_d(a7, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si3))); ++ ++ // Ld_bu ++ __ Ld_bu(t0, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_ld_bu_si2))); ++ ++ // Ld_hu ++ __ Ld_hu(t1, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_ld_hu_si2))); ++ ++ // Ld_wu ++ __ Ld_wu(t2, MemOperand(a0, offsetof(T, si2))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_ld_wu_si2))); ++ ++ // St ++ __ li(t4, 0x11111111); ++ ++ // St_b ++ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_st_b))); ++ __ St_b(t4, MemOperand(a0, offsetof(T, result_st_b))); ++ ++ // St_h ++ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t6, MemOperand(a0, offsetof(T, result_st_h))); ++ __ St_h(t4, MemOperand(a0, offsetof(T, result_st_h))); ++ ++ // St_w ++ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t7, MemOperand(a0, offsetof(T, result_st_w))); ++ __ St_w(t4, MemOperand(a0, offsetof(T, result_st_w))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x11223344; ++ t.si2 = 0x99AABBCC; ++ t.si3 = 0x1122334455667788; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x44), t.result_ld_b_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFFFCC), t.result_ld_b_si2); ++ ++ CHECK_EQ(static_cast(0x3344), t.result_ld_h_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFBBCC), t.result_ld_h_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ld_w_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), t.result_ld_w_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ld_d_si1); ++ CHECK_EQ(static_cast(0x1122334455667788), t.result_ld_d_si3); ++ ++ CHECK_EQ(static_cast(0xCC), t.result_ld_bu_si2); ++ CHECK_EQ(static_cast(0xBBCC), t.result_ld_hu_si2); ++ CHECK_EQ(static_cast(0x99AABBCC), t.result_ld_wu_si2); ++ ++ CHECK_EQ(static_cast(0x1122334455667711), t.result_st_b); ++ CHECK_EQ(static_cast(0x1122334455661111), t.result_st_h); ++ CHECK_EQ(static_cast(0x1122334411111111), t.result_st_w); ++} ++ ++TEST(LA7) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int64_t si3; ++ int64_t result_ldx_b_si1; ++ int64_t result_ldx_b_si2; ++ int64_t result_ldx_h_si1; ++ int64_t result_ldx_h_si2; ++ int64_t result_ldx_w_si1; ++ int64_t result_ldx_w_si2; ++ int64_t result_ldx_d_si1; ++ int64_t result_ldx_d_si3; ++ int64_t result_ldx_bu_si2; ++ int64_t result_ldx_hu_si2; ++ int64_t result_ldx_wu_si2; ++ int64_t result_stx_b; ++ int64_t result_stx_h; ++ int64_t result_stx_w; ++ }; ++ T t; ++ ++ // ldx_b ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_b(a4, MemOperand(a0, a2)); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_b(a4, MemOperand(a0, a2)); ++ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si2))); ++ ++ // ldx_h ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_h(a5, MemOperand(a0, a2)); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_h(a5, MemOperand(a0, a2)); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si2))); ++ ++ // ldx_w ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_w(a6, MemOperand(a0, a2)); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_w(a6, MemOperand(a0, a2)); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si2))); ++ ++ // Ld_d ++ __ li(a2, static_cast(offsetof(T, si1))); ++ __ Ld_d(a7, MemOperand(a0, a2)); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si1))); ++ ++ __ li(a2, static_cast(offsetof(T, si3))); ++ __ Ld_d(a7, MemOperand(a0, a2)); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si3))); ++ ++ // Ld_bu ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_bu(t0, MemOperand(a0, a2)); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_ldx_bu_si2))); ++ ++ // Ld_hu ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_hu(t1, MemOperand(a0, a2)); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_ldx_hu_si2))); ++ ++ // Ld_wu ++ __ li(a2, static_cast(offsetof(T, si2))); ++ __ Ld_wu(t2, MemOperand(a0, a2)); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_ldx_wu_si2))); ++ ++ // St ++ __ li(t4, 0x11111111); ++ ++ // St_b ++ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_stx_b))); ++ __ li(a2, static_cast(offsetof(T, result_stx_b))); ++ __ St_b(t4, MemOperand(a0, a2)); ++ ++ // St_h ++ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3))); ++ __ St_d(t6, MemOperand(a0, offsetof(T, result_stx_h))); ++ __ li(a2, static_cast(offsetof(T, result_stx_h))); ++ __ St_h(t4, MemOperand(a0, a2)); ++ ++ // St_w ++ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3))); ++ __ li(a2, static_cast(offsetof(T, result_stx_w))); ++ __ St_d(t7, MemOperand(a0, a2)); ++ __ li(a3, static_cast(offsetof(T, result_stx_w))); ++ __ St_w(t4, MemOperand(a0, a3)); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x11223344; ++ t.si2 = 0x99AABBCC; ++ t.si3 = 0x1122334455667788; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x44), t.result_ldx_b_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFFFCC), t.result_ldx_b_si2); ++ ++ CHECK_EQ(static_cast(0x3344), t.result_ldx_h_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFFFFFFBBCC), t.result_ldx_h_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ldx_w_si1); ++ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), t.result_ldx_w_si2); ++ ++ CHECK_EQ(static_cast(0x11223344), t.result_ldx_d_si1); ++ CHECK_EQ(static_cast(0x1122334455667788), t.result_ldx_d_si3); ++ ++ CHECK_EQ(static_cast(0xCC), t.result_ldx_bu_si2); ++ CHECK_EQ(static_cast(0xBBCC), t.result_ldx_hu_si2); ++ CHECK_EQ(static_cast(0x99AABBCC), t.result_ldx_wu_si2); ++ ++ CHECK_EQ(static_cast(0x1122334455667711), t.result_stx_b); ++ CHECK_EQ(static_cast(0x1122334455661111), t.result_stx_h); ++ CHECK_EQ(static_cast(0x1122334411111111), t.result_stx_w); ++} ++ ++TEST(LDPTR_STPTR) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ int64_t test[10]; ++ ++ __ ldptr_w(a4, a0, 0); ++ __ stptr_d(a4, a0, 24); // test[3] ++ ++ __ ldptr_w(a5, a0, 8); // test[1] ++ __ stptr_d(a5, a0, 32); // test[4] ++ ++ __ ldptr_d(a6, a0, 16); // test[2] ++ __ stptr_d(a6, a0, 40); // test[5] ++ ++ __ li(t0, 0x11111111); ++ ++ __ stptr_d(a6, a0, 48); // test[6] ++ __ stptr_w(t0, a0, 48); // test[6] ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test[0] = 0x11223344; ++ test[1] = 0x99AABBCC; ++ test[2] = 0x1122334455667788; ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x11223344), test[3]); ++ CHECK_EQ(static_cast(0xFFFFFFFF99AABBCC), test[4]); ++ CHECK_EQ(static_cast(0x1122334455667788), test[5]); ++ CHECK_EQ(static_cast(0x1122334411111111), test[6]); ++} ++ ++TEST(LA8) { ++ // Test 32bit shift instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ int32_t input; ++ int32_t result_sll_w_0; ++ int32_t result_sll_w_8; ++ int32_t result_sll_w_10; ++ int32_t result_sll_w_31; ++ int32_t result_srl_w_0; ++ int32_t result_srl_w_8; ++ int32_t result_srl_w_10; ++ int32_t result_srl_w_31; ++ int32_t result_sra_w_0; ++ int32_t result_sra_w_8; ++ int32_t result_sra_w_10; ++ int32_t result_sra_w_31; ++ int32_t result_rotr_w_0; ++ int32_t result_rotr_w_8; ++ int32_t result_slli_w_0; ++ int32_t result_slli_w_8; ++ int32_t result_slli_w_10; ++ int32_t result_slli_w_31; ++ int32_t result_srli_w_0; ++ int32_t result_srli_w_8; ++ int32_t result_srli_w_10; ++ int32_t result_srli_w_31; ++ int32_t result_srai_w_0; ++ int32_t result_srai_w_8; ++ int32_t result_srai_w_10; ++ int32_t result_srai_w_31; ++ int32_t result_rotri_w_0; ++ int32_t result_rotri_w_8; ++ int32_t result_rotri_w_10; ++ int32_t result_rotri_w_31; ++ }; ++ T t; ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ __ Ld_w(a4, MemOperand(a0, offsetof(T, input))); ++ ++ // sll_w ++ __ li(a5, 0); ++ __ sll_w(t0, a4, a5); ++ __ li(a5, 0x8); ++ __ sll_w(t1, a4, a5); ++ __ li(a5, 0xA); ++ __ sll_w(t2, a4, a5); ++ __ li(a5, 0x1F); ++ __ sll_w(t3, a4, a5); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_sll_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_sll_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_sll_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_sll_w_31))); ++ ++ // srl_w ++ __ li(a5, 0x0); ++ __ srl_w(t0, a4, a5); ++ __ li(a5, 0x8); ++ __ srl_w(t1, a4, a5); ++ __ li(a5, 0xA); ++ __ srl_w(t2, a4, a5); ++ __ li(a5, 0x1F); ++ __ srl_w(t3, a4, a5); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_srl_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_srl_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_srl_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_srl_w_31))); ++ ++ // sra_w ++ __ li(a5, 0x0); ++ __ sra_w(t0, a4, a5); ++ __ li(a5, 0x8); ++ __ sra_w(t1, a4, a5); ++ ++ __ li(a6, static_cast(0x80000000)); ++ __ add_w(a6, a6, a4); ++ __ li(a5, 0xA); ++ __ sra_w(t2, a6, a5); ++ __ li(a5, 0x1F); ++ __ sra_w(t3, a6, a5); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_sra_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_sra_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_sra_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_sra_w_31))); ++ ++ // rotr ++ __ li(a5, 0x0); ++ __ rotr_w(t0, a4, a5); ++ __ li(a6, 0x8); ++ __ rotr_w(t1, a4, a6); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_w_8))); ++ ++ // slli_w ++ __ slli_w(t0, a4, 0); ++ __ slli_w(t1, a4, 0x8); ++ __ slli_w(t2, a4, 0xA); ++ __ slli_w(t3, a4, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_slli_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_slli_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_slli_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_slli_w_31))); ++ ++ // srli_w ++ __ srli_w(t0, a4, 0); ++ __ srli_w(t1, a4, 0x8); ++ __ srli_w(t2, a4, 0xA); ++ __ srli_w(t3, a4, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_srli_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_srli_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_srli_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_srli_w_31))); ++ ++ // srai_w ++ __ srai_w(t0, a4, 0); ++ __ srai_w(t1, a4, 0x8); ++ ++ __ li(a6, static_cast(0x80000000)); ++ __ add_w(a6, a6, a4); ++ __ srai_w(t2, a6, 0xA); ++ __ srai_w(t3, a6, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_srai_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_srai_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_srai_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_srai_w_31))); ++ ++ // rotri_w ++ __ rotri_w(t0, a4, 0); ++ __ rotri_w(t1, a4, 0x8); ++ __ rotri_w(t2, a4, 0xA); ++ __ rotri_w(t3, a4, 0x1F); ++ ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_w_0))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_w_8))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_w_10))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_w_31))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x12345678; ++ f.Call(&t, 0x0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_sll_w_0); ++ CHECK_EQ(static_cast(0x34567800), t.result_sll_w_8); ++ CHECK_EQ(static_cast(0xD159E000), t.result_sll_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_sll_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_srl_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_srl_w_8); ++ CHECK_EQ(static_cast(0x48D15), t.result_srl_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_srl_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_sra_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_sra_w_8); ++ CHECK_EQ(static_cast(0xFFE48D15), t.result_sra_w_10); ++ CHECK_EQ(static_cast(0xFFFFFFFF), t.result_sra_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotr_w_0); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotr_w_8); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_slli_w_0); ++ CHECK_EQ(static_cast(0x34567800), t.result_slli_w_8); ++ CHECK_EQ(static_cast(0xD159E000), t.result_slli_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_slli_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_srli_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_srli_w_8); ++ CHECK_EQ(static_cast(0x48D15), t.result_srli_w_10); ++ CHECK_EQ(static_cast(0x0), t.result_srli_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_srai_w_0); ++ CHECK_EQ(static_cast(0x123456), t.result_srai_w_8); ++ CHECK_EQ(static_cast(0xFFE48D15), t.result_srai_w_10); ++ CHECK_EQ(static_cast(0xFFFFFFFF), t.result_srai_w_31); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotri_w_0); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotri_w_8); ++ CHECK_EQ(static_cast(0x9E048D15), t.result_rotri_w_10); ++ CHECK_EQ(static_cast(0x2468ACF0), t.result_rotri_w_31); ++} ++ ++TEST(LA9) { ++ // Test 64bit shift instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ int64_t input; ++ int64_t result_sll_d_0; ++ int64_t result_sll_d_13; ++ int64_t result_sll_d_30; ++ int64_t result_sll_d_63; ++ int64_t result_srl_d_0; ++ int64_t result_srl_d_13; ++ int64_t result_srl_d_30; ++ int64_t result_srl_d_63; ++ int64_t result_sra_d_0; ++ int64_t result_sra_d_13; ++ int64_t result_sra_d_30; ++ int64_t result_sra_d_63; ++ int64_t result_rotr_d_0; ++ int64_t result_rotr_d_13; ++ int64_t result_slli_d_0; ++ int64_t result_slli_d_13; ++ int64_t result_slli_d_30; ++ int64_t result_slli_d_63; ++ int64_t result_srli_d_0; ++ int64_t result_srli_d_13; ++ int64_t result_srli_d_30; ++ int64_t result_srli_d_63; ++ int64_t result_srai_d_0; ++ int64_t result_srai_d_13; ++ int64_t result_srai_d_30; ++ int64_t result_srai_d_63; ++ int64_t result_rotri_d_0; ++ int64_t result_rotri_d_13; ++ int64_t result_rotri_d_30; ++ int64_t result_rotri_d_63; ++ }; ++ ++ T t; ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, input))); ++ ++ // sll_d ++ __ li(a5, 0); ++ __ sll_d(t0, a4, a5); ++ __ li(a5, 0xD); ++ __ sll_d(t1, a4, a5); ++ __ li(a5, 0x1E); ++ __ sll_d(t2, a4, a5); ++ __ li(a5, 0x3F); ++ __ sll_d(t3, a4, a5); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_sll_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_sll_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_sll_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_sll_d_63))); ++ ++ // srl_d ++ __ li(a5, 0x0); ++ __ srl_d(t0, a4, a5); ++ __ li(a5, 0xD); ++ __ srl_d(t1, a4, a5); ++ __ li(a5, 0x1E); ++ __ srl_d(t2, a4, a5); ++ __ li(a5, 0x3F); ++ __ srl_d(t3, a4, a5); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_srl_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_srl_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_srl_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_srl_d_63))); ++ ++ // sra_d ++ __ li(a5, 0x0); ++ __ sra_d(t0, a4, a5); ++ __ li(a5, 0xD); ++ __ sra_d(t1, a4, a5); ++ ++ __ li(a6, static_cast(0x8000000000000000)); ++ __ add_d(a6, a6, a4); ++ __ li(a5, 0x1E); ++ __ sra_d(t2, a6, a5); ++ __ li(a5, 0x3F); ++ __ sra_d(t3, a6, a5); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_sra_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_sra_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_sra_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_sra_d_63))); ++ ++ // rotr ++ __ li(a5, 0x0); ++ __ rotr_d(t0, a4, a5); ++ __ li(a6, 0xD); ++ __ rotr_d(t1, a4, a6); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_d_13))); ++ ++ // slli_d ++ __ slli_d(t0, a4, 0); ++ __ slli_d(t1, a4, 0xD); ++ __ slli_d(t2, a4, 0x1E); ++ __ slli_d(t3, a4, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_slli_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_slli_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_slli_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_slli_d_63))); ++ ++ // srli_d ++ __ srli_d(t0, a4, 0); ++ __ srli_d(t1, a4, 0xD); ++ __ srli_d(t2, a4, 0x1E); ++ __ srli_d(t3, a4, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_srli_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_srli_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_srli_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_srli_d_63))); ++ ++ // srai_d ++ __ srai_d(t0, a4, 0); ++ __ srai_d(t1, a4, 0xD); ++ ++ __ li(a6, static_cast(0x8000000000000000)); ++ __ add_d(a6, a6, a4); ++ __ srai_d(t2, a6, 0x1E); ++ __ srai_d(t3, a6, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_srai_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_srai_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_srai_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_srai_d_63))); ++ ++ // rotri_d ++ __ rotri_d(t0, a4, 0); ++ __ rotri_d(t1, a4, 0xD); ++ __ rotri_d(t2, a4, 0x1E); ++ __ rotri_d(t3, a4, 0x3F); ++ ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_d_0))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_d_13))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_d_30))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_d_63))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x51F4B764A26E7412; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_sll_d_0); ++ CHECK_EQ(static_cast(0x96ec944dce824000), t.result_sll_d_13); ++ CHECK_EQ(static_cast(0x289b9d0480000000), t.result_sll_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_sll_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srl_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srl_d_13); ++ CHECK_EQ(static_cast(0x147d2dd92), t.result_srl_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_srl_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_sra_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_sra_d_13); ++ CHECK_EQ(static_cast(0xffffffff47d2dd92), t.result_sra_d_30); ++ CHECK_EQ(static_cast(0xffffffffffffffff), t.result_sra_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_rotr_d_0); ++ CHECK_EQ(static_cast(0xa0928fa5bb251373), t.result_rotr_d_13); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_slli_d_0); ++ CHECK_EQ(static_cast(0x96ec944dce824000), t.result_slli_d_13); ++ CHECK_EQ(static_cast(0x289b9d0480000000), t.result_slli_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_slli_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srli_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srli_d_13); ++ CHECK_EQ(static_cast(0x147d2dd92), t.result_srli_d_30); ++ CHECK_EQ(static_cast(0x0), t.result_srli_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_srai_d_0); ++ CHECK_EQ(static_cast(0x28fa5bb251373), t.result_srai_d_13); ++ CHECK_EQ(static_cast(0xffffffff47d2dd92), t.result_srai_d_30); ++ CHECK_EQ(static_cast(0xffffffffffffffff), t.result_srai_d_63); ++ ++ CHECK_EQ(static_cast(0x51f4b764a26e7412), t.result_rotri_d_0); ++ CHECK_EQ(static_cast(0xa0928fa5bb251373), t.result_rotri_d_13); ++ CHECK_EQ(static_cast(0x89b9d04947d2dd92), t.result_rotri_d_30); ++ CHECK_EQ(static_cast(0xa3e96ec944dce824), t.result_rotri_d_63); ++} ++ ++TEST(LA10) { ++ // Test 32bit bit operation instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int32_t result_ext_w_b_si1; ++ int32_t result_ext_w_b_si2; ++ int32_t result_ext_w_h_si1; ++ int32_t result_ext_w_h_si2; ++ int32_t result_clo_w_si1; ++ int32_t result_clo_w_si2; ++ int32_t result_clz_w_si1; ++ int32_t result_clz_w_si2; ++ int32_t result_cto_w_si1; ++ int32_t result_cto_w_si2; ++ int32_t result_ctz_w_si1; ++ int32_t result_ctz_w_si2; ++ int32_t result_bytepick_w_si1; ++ int32_t result_bytepick_w_si2; ++ int32_t result_revb_2h_si1; ++ int32_t result_revb_2h_si2; ++ int32_t result_bitrev_4b_si1; ++ int32_t result_bitrev_4b_si2; ++ int32_t result_bitrev_w_si1; ++ int32_t result_bitrev_w_si2; ++ int32_t result_bstrins_w_si1; ++ int32_t result_bstrins_w_si2; ++ int32_t result_bstrpick_w_si1; ++ int32_t result_bstrpick_w_si2; ++ }; ++ T t; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1))); ++ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2))); ++ ++ // ext_w_b ++ __ ext_w_b(t0, a4); ++ __ ext_w_b(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_b_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_b_si2))); ++ ++ // ext_w_h ++ __ ext_w_h(t0, a4); ++ __ ext_w_h(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_h_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_h_si2))); ++ ++ /* //clo_w ++ __ clo_w(t0, a4); ++ __ clo_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_w_si2)));*/ ++ ++ // clz_w ++ __ clz_w(t0, a4); ++ __ clz_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_clz_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_clz_w_si2))); ++ ++ /* //cto_w ++ __ cto_w(t0, a4); ++ __ cto_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_w_si2)));*/ ++ ++ // ctz_w ++ __ ctz_w(t0, a4); ++ __ ctz_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_ctz_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_ctz_w_si2))); ++ ++ // bytepick_w ++ __ bytepick_w(t0, a4, a5, 0); ++ __ bytepick_w(t1, a5, a4, 2); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bytepick_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bytepick_w_si2))); ++ ++ // revb_2h ++ __ revb_2h(t0, a4); ++ __ revb_2h(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_revb_2h_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_revb_2h_si2))); ++ ++ // bitrev ++ __ bitrev_4b(t0, a4); ++ __ bitrev_4b(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_4b_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_4b_si2))); ++ ++ // bitrev_w ++ __ bitrev_w(t0, a4); ++ __ bitrev_w(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_w_si2))); ++ ++ // bstrins ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrins_w(t0, a4, 0xD, 0x4); ++ __ bstrins_w(t1, a5, 0x16, 0x5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrins_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrins_w_si2))); ++ ++ // bstrpick ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrpick_w(t0, a4, 0xD, 0x4); ++ __ bstrpick_w(t1, a5, 0x16, 0x5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrpick_w_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrpick_w_si2))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x51F4B764A26E7412; ++ t.si2 = 0x81F25A87C423B891; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x12), t.result_ext_w_b_si1); ++ CHECK_EQ(static_cast(0xffffff91), t.result_ext_w_b_si2); ++ CHECK_EQ(static_cast(0x7412), t.result_ext_w_h_si1); ++ CHECK_EQ(static_cast(0xffffb891), t.result_ext_w_h_si2); ++ // CHECK_EQ(static_cast(0x1), t.result_clo_w_si1); ++ // CHECK_EQ(static_cast(0x2), t.result_clo_w_si2); ++ CHECK_EQ(static_cast(0x0), t.result_clz_w_si1); ++ CHECK_EQ(static_cast(0x0), t.result_clz_w_si2); ++ // CHECK_EQ(static_cast(0x0), t.result_cto_w_si1); ++ // CHECK_EQ(static_cast(0x1), t.result_cto_w_si2); ++ CHECK_EQ(static_cast(0x1), t.result_ctz_w_si1); ++ CHECK_EQ(static_cast(0x0), t.result_ctz_w_si2); ++ CHECK_EQ(static_cast(0xc423b891), t.result_bytepick_w_si1); ++ CHECK_EQ(static_cast(0x7412c423), ++ t.result_bytepick_w_si2); // 0xffffc423 ++ CHECK_EQ(static_cast(0x6ea21274), t.result_revb_2h_si1); ++ CHECK_EQ(static_cast(0x23c491b8), t.result_revb_2h_si2); ++ CHECK_EQ(static_cast(0x45762e48), t.result_bitrev_4b_si1); ++ CHECK_EQ(static_cast(0x23c41d89), t.result_bitrev_4b_si2); ++ CHECK_EQ(static_cast(0x482e7645), t.result_bitrev_w_si1); ++ CHECK_EQ(static_cast(0x891dc423), t.result_bitrev_w_si2); ++ CHECK_EQ(static_cast(0x120), t.result_bstrins_w_si1); ++ CHECK_EQ(static_cast(0x771220), t.result_bstrins_w_si2); ++ CHECK_EQ(static_cast(0x341), t.result_bstrpick_w_si1); ++ CHECK_EQ(static_cast(0x11dc4), t.result_bstrpick_w_si2); ++} ++ ++TEST(LA11) { ++ // Test 64bit bit operation instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct T { ++ int64_t si1; ++ int64_t si2; ++ int64_t result_clo_d_si1; ++ int64_t result_clo_d_si2; ++ int64_t result_clz_d_si1; ++ int64_t result_clz_d_si2; ++ int64_t result_cto_d_si1; ++ int64_t result_cto_d_si2; ++ int64_t result_ctz_d_si1; ++ int64_t result_ctz_d_si2; ++ int64_t result_bytepick_d_si1; ++ int64_t result_bytepick_d_si2; ++ int64_t result_revb_4h_si1; ++ int64_t result_revb_4h_si2; ++ int64_t result_revb_2w_si1; ++ int64_t result_revb_2w_si2; ++ int64_t result_revb_d_si1; ++ int64_t result_revb_d_si2; ++ int64_t result_revh_2w_si1; ++ int64_t result_revh_2w_si2; ++ int64_t result_revh_d_si1; ++ int64_t result_revh_d_si2; ++ int64_t result_bitrev_8b_si1; ++ int64_t result_bitrev_8b_si2; ++ int64_t result_bitrev_d_si1; ++ int64_t result_bitrev_d_si2; ++ int64_t result_bstrins_d_si1; ++ int64_t result_bstrins_d_si2; ++ int64_t result_bstrpick_d_si1; ++ int64_t result_bstrpick_d_si2; ++ int64_t result_maskeqz_si1; ++ int64_t result_maskeqz_si2; ++ int64_t result_masknez_si1; ++ int64_t result_masknez_si2; ++ }; ++ ++ T t; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1))); ++ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2))); ++ ++ /* //clo_d ++ __ clo_d(t0, a4); ++ __ clo_d(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_d_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_d_si2)));*/ ++ ++ // clz_d ++ __ or_(t0, zero_reg, zero_reg); ++ __ clz_d(t0, a4); ++ __ clz_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_clz_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_clz_d_si2))); ++ ++ /* //cto_d ++ __ cto_d(t0, a4); ++ __ cto_d(t1, a5); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_d_si1))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_d_si2)));*/ ++ ++ // ctz_d ++ __ ctz_d(t0, a4); ++ __ ctz_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_ctz_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_ctz_d_si2))); ++ ++ // bytepick_d ++ __ bytepick_d(t0, a4, a5, 0); ++ __ bytepick_d(t1, a5, a4, 5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bytepick_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bytepick_d_si2))); ++ ++ // revb_4h ++ __ revb_4h(t0, a4); ++ __ revb_4h(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_4h_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_4h_si2))); ++ ++ // revb_2w ++ __ revb_2w(t0, a4); ++ __ revb_2w(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_2w_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_2w_si2))); ++ ++ // revb_d ++ __ revb_d(t0, a4); ++ __ revb_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_d_si2))); ++ ++ // revh_2w ++ __ revh_2w(t0, a4); ++ __ revh_2w(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_2w_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_2w_si2))); ++ ++ // revh_d ++ __ revh_d(t0, a4); ++ __ revh_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_d_si2))); ++ ++ // bitrev_8b ++ __ bitrev_8b(t0, a4); ++ __ bitrev_8b(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_8b_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_8b_si2))); ++ ++ // bitrev_d ++ __ bitrev_d(t0, a4); ++ __ bitrev_d(t1, a5); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_d_si2))); ++ ++ // bstrins_d ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrins_d(t0, a4, 5, 0); ++ __ bstrins_d(t1, a5, 39, 12); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrins_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrins_d_si2))); ++ ++ // bstrpick_d ++ __ or_(t0, zero_reg, zero_reg); ++ __ or_(t1, zero_reg, zero_reg); ++ __ bstrpick_d(t0, a4, 5, 0); ++ __ bstrpick_d(t1, a5, 63, 48); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrpick_d_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrpick_d_si2))); ++ ++ // maskeqz ++ __ maskeqz(t0, a4, a4); ++ __ maskeqz(t1, a5, zero_reg); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_maskeqz_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_maskeqz_si2))); ++ ++ // masknez ++ __ masknez(t0, a4, a4); ++ __ masknez(t1, a5, zero_reg); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_masknez_si1))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_masknez_si2))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.si1 = 0x10C021098B710CDE; ++ t.si2 = 0xFB8017FF781A15C3; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ // CHECK_EQ(static_cast(0x0), t.result_clo_d_si1); ++ // CHECK_EQ(static_cast(0x5), t.result_clo_d_si2); ++ CHECK_EQ(static_cast(0x3), t.result_clz_d_si1); ++ CHECK_EQ(static_cast(0x0), t.result_clz_d_si2); ++ // CHECK_EQ(static_cast(0x0), t.result_cto_d_si1); ++ // CHECK_EQ(static_cast(0x2), t.result_cto_d_si2); ++ CHECK_EQ(static_cast(0x1), t.result_ctz_d_si1); ++ CHECK_EQ(static_cast(0x0), t.result_ctz_d_si2); ++ CHECK_EQ(static_cast(0xfb8017ff781a15c3), t.result_bytepick_d_si1); ++ CHECK_EQ(static_cast(0x710cde0000000000), t.result_bytepick_d_si2); ++ CHECK_EQ(static_cast(0xc0100921718bde0c), t.result_revb_4h_si1); ++ CHECK_EQ(static_cast(0x80fbff171a78c315), t.result_revb_4h_si2); ++ CHECK_EQ(static_cast(0x921c010de0c718b), t.result_revb_2w_si1); ++ CHECK_EQ(static_cast(0xff1780fbc3151a78), t.result_revb_2w_si2); ++ CHECK_EQ(static_cast(0xde0c718b0921c010), t.result_revb_d_si1); ++ CHECK_EQ(static_cast(0xc3151a78ff1780fb), t.result_revb_d_si2); ++ CHECK_EQ(static_cast(0x210910c00cde8b71), t.result_revh_2w_si1); ++ CHECK_EQ(static_cast(0x17fffb8015c3781a), t.result_revh_2w_si2); ++ CHECK_EQ(static_cast(0xcde8b71210910c0), t.result_revh_d_si1); ++ CHECK_EQ(static_cast(0x15c3781a17fffb80), t.result_revh_d_si2); ++ CHECK_EQ(static_cast(0x8038490d18e307b), t.result_bitrev_8b_si1); ++ CHECK_EQ(static_cast(0xdf01e8ff1e58a8c3), t.result_bitrev_8b_si2); ++ CHECK_EQ(static_cast(0x7b308ed190840308), t.result_bitrev_d_si1); ++ CHECK_EQ(static_cast(0xc3a8581effe801df), t.result_bitrev_d_si2); ++ CHECK_EQ(static_cast(0x1e), t.result_bstrins_d_si1); ++ CHECK_EQ(static_cast(0x81a15c3000), t.result_bstrins_d_si2); ++ CHECK_EQ(static_cast(0x1e), t.result_bstrpick_d_si1); ++ CHECK_EQ(static_cast(0xfb80), t.result_bstrpick_d_si2); ++ CHECK_EQ(static_cast(0), t.result_maskeqz_si1); ++ CHECK_EQ(static_cast(0xFB8017FF781A15C3), t.result_maskeqz_si2); ++ CHECK_EQ(static_cast(0x10C021098B710CDE), t.result_masknez_si1); ++ CHECK_EQ(static_cast(0), t.result_masknez_si2); ++} ++ ++uint64_t run_beq(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ beq(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BEQ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBeq { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBeq tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 0, -6, 0x3 }, ++ { 1, 1, -3, 0x30 }, ++ { -2, -2, 3, 0x300 }, ++ { 3, -3, 6, 0 }, ++ { 4, 4, 6, 0x700 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeq); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_beq(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bne(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bne(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BNE) { ++ CcTest::InitializeVM(); ++ struct TestCaseBne { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBne tc[] = { ++ // value1, value2, offset, expected_res ++ { 1, -1, -6, 0x3 }, ++ { 2, -2, -3, 0x30 }, ++ { 3, -3, 3, 0x300 }, ++ { 4, -4, 6, 0x700 }, ++ { 0, 0, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBne); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bne(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_blt(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ blt(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BLT) { ++ CcTest::InitializeVM(); ++ struct TestCaseBlt { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBlt tc[] = { ++ // value1, value2, offset, expected_res ++ { -1, 1, -6, 0x3 }, ++ { -2, 2, -3, 0x30 }, ++ { -3, 3, 3, 0x300 }, ++ { -4, 4, 6, 0x700 }, ++ { 5, -5, 6, 0 }, ++ { 0, 0, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBlt); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_blt(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bge(uint64_t value1, uint64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bge(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BGE) { ++ CcTest::InitializeVM(); ++ struct TestCaseBge { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBge tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 0, -6, 0x3 }, ++ { 1, 1, -3, 0x30 }, ++ { 2, -2, 3, 0x300 }, ++ { 3, -3, 6, 0x700 }, ++ { -4, 4, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBge); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bge(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bltu(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bltu(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BLTU) { ++ CcTest::InitializeVM(); ++ struct TestCaseBltu { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBltu tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 1, -6, 0x3 }, ++ { 1, -1, -3, 0x30 }, ++ { 2, -2, 3, 0x300 }, ++ { 3, -3, 6, 0x700 }, ++ { 4, 4, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBltu); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bltu(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bgeu(int64_t value1, int64_t value2, int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bgeu(a0, a1, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value1, value2, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BGEU) { ++ CcTest::InitializeVM(); ++ struct TestCaseBgeu { ++ int64_t value1; ++ int64_t value2; ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBgeu tc[] = { ++ // value1, value2, offset, expected_res ++ { 0, 0, -6, 0x3 }, ++ { -1, 1, -3, 0x30 }, ++ { -2, 2, 3, 0x300 }, ++ { -3, 3, 6, 0x700 }, ++ { 4, -4, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBgeu); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bgeu(tc[i].value1, tc[i].value2, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_beqz(int64_t value, int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(&L); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ beqz(a0, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(&L); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BEQZ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBeqz { ++ int64_t value; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBeqz tc[] = { ++ // value, offset, expected_res ++ { 0, -6, 0x3 }, ++ { 0, -3, 0x30 }, ++ { 0, 3, 0x300 }, ++ { 0, 6, 0x700 }, ++ { 1, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeqz); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_beqz(tc[i].value, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bnez_b(int64_t value, int32_t offset) { ++ // bnez, b. ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0l); ++ __ b(&main_block); ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ b(5); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ b(2); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bnez(a0, offset); ++ __ bind(&L); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ b(-4); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ b(-7); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(value, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BNEZ_B) { ++ CcTest::InitializeVM(); ++ struct TestCaseBnez { ++ int64_t value; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBnez tc[] = { ++ // value, offset, expected_res ++ { 1, -6, 0x3 }, ++ { -2, -3, 0x30 }, ++ { 3, 3, 0x300 }, ++ { -4, 6, 0x700 }, ++ { 0, 6, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBnez); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bnez_b(tc[i].value, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bl(int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block; ++ __ li(a2, 0l); ++ __ push(ra); // push is implemented by two instructions, addi_d and st_d ++ __ b(&main_block); ++ ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ bl(offset); ++ __ or_(a0, a2, zero_reg); ++ __ pop(ra); // pop is implemented by two instructions, ld_d and addi_d. ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BL) { ++ CcTest::InitializeVM(); ++ struct TestCaseBl { ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBl tc[] = { ++ // offset, expected_res ++ { -6, 0x3 }, ++ { -3, 0x30 }, ++ { 5, 0x300 }, ++ { 8, 0x700 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bl(tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++TEST(PCADD) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label exit, error; ++ __ push(ra); ++ ++ // pcaddi ++ __ li(a4, 0x1FFFFC); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0xFFE00000)); ++ ++ __ bl(1); ++ __ pcaddi(a3, 0x7FFFF); ++ __ add_d(a2, ra, a4); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddi(a3, 0); ++ __ add_d(a2, ra, a5); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddi(a3, 0x80000); ++ __ add_d(a2, ra, a6); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ // pcaddu12i ++ __ li(a4, 0x7FFFF000); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0x80000000)); ++ ++ __ bl(1); ++ __ pcaddu12i(a2, 0x7FFFF); ++ __ add_d(a3, ra, a4); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ bl(1); ++ __ pcaddu12i(a2, 0); ++ __ add_d(a3, ra, a5); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ bl(1); ++ __ pcaddu12i(a2, 0x80000); ++ __ add_d(a3, ra, a6); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ // pcaddu18i ++ __ li(a4, 0x1FFFFC0000); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0xFFFFFFE000000000)); ++ ++ __ bl(1); ++ __ pcaddu18i(a2, 0x7FFFF); ++ __ add_d(a3, ra, a4); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddu18i(a2, 0); ++ __ add_d(a3, ra, a5); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ bl(1); ++ __ pcaddu18i(a2, 0x80000); ++ __ add_d(a3, ra, a6); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ // pcalau12i ++ __ li(a4, 0x7FFFF000); ++ __ li(a5, 0); ++ __ li(a6, static_cast(0x80000000)); ++ __ li(a7, static_cast(0xFFFFFFFFFFFFF000)); ++ ++ __ bl(1); ++ __ pcalau12i(a3, 0x7FFFF); ++ __ add_d(a2, ra, a4); ++ __ and_(t0, a2, a7); ++ __ and_(t1, a3, a7); ++ __ Branch(&error, ne, t0, Operand(t1)); ++ ++ __ bl(1); ++ __ pcalau12i(a3, 0); ++ __ add_d(a2, ra, a5); ++ __ and_(t0, a2, a7); ++ __ and_(t1, a3, a7); ++ __ Branch(&error, ne, t0, Operand(t1)); ++ ++ __ bl(1); ++ __ pcalau12i(a2, 0x80000); ++ __ add_d(a3, ra, a6); ++ __ and_(t0, a2, a7); ++ __ and_(t1, a3, a7); ++ __ Branch(&error, ne, t0, Operand(t1)); ++ ++ __ li(a0, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a0, 0x666); ++ ++ __ bind(&exit); ++ __ pop(ra); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++uint64_t run_jirl(int16_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block; ++ __ li(a2, 0l); ++ __ push(ra); ++ __ b(&main_block); ++ ++ // Block 1 ++ __ addi_d(a2, a2, 0x1); ++ __ addi_d(a2, a2, 0x2); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 2 ++ __ addi_d(a2, a2, 0x10); ++ __ addi_d(a2, a2, 0x20); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ pcaddi(a3, 1); ++ __ jirl(ra, a3, offset); ++ __ or_(a0, a2, zero_reg); ++ __ pop(ra); // pop is implemented by two instructions, ld_d and addi_d. ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ __ addi_d(a2, a2, 0x100); ++ __ addi_d(a2, a2, 0x200); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 5 ++ __ addi_d(a2, a2, 0x300); ++ __ addi_d(a2, a2, 0x400); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(JIRL) { ++ CcTest::InitializeVM(); ++ struct TestCaseJirl { ++ int16_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseJirl tc[] = { ++ // offset, expected_res ++ { -7, 0x3 }, ++ { -4, 0x30 }, ++ { 5, 0x300 }, ++ { 8, 0x700 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseJirl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_jirl(tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++TEST(LA12) { ++ // Test floating point calculate instructions. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ double a; ++ double b; ++ double c; ++ double d; ++ double e; ++ double f; ++ double result_fadd_d; ++ double result_fsub_d; ++ double result_fmul_d; ++ double result_fdiv_d; ++ double result_fmadd_d; ++ double result_fmsub_d; ++ double result_fnmadd_d; ++ double result_fnmsub_d; ++ double result_fsqrt_d; ++ double result_frecip_d; ++ double result_frsqrt_d; ++ double result_fscaleb_d; ++ double result_flogb_d; ++ double result_fcopysign_d; ++ double result_fclass_d; ++ }; ++ T t; ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ // Double precision floating point instructions. ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(T, b))); ++ ++ __ fneg_d(f10, f8); ++ __ fadd_d(f11, f9, f10); ++ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_fadd_d))); ++ __ fabs_d(f11, f11); ++ __ fsub_d(f12, f11, f9); ++ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_fsub_d))); ++ ++ __ Fld_d(f13, MemOperand(a0, offsetof(T, c))); ++ __ Fld_d(f14, MemOperand(a0, offsetof(T, d))); ++ __ Fld_d(f15, MemOperand(a0, offsetof(T, e))); ++ ++ __ fmin_d(f16, f13, f14); ++ __ fmul_d(f17, f15, f16); ++ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_fmul_d))); ++ __ fmax_d(f18, f13, f14); ++ __ fdiv_d(f19, f15, f18); ++ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fdiv_d))); ++ ++ __ fmina_d(f16, f13, f14); ++ __ fmadd_d(f18, f17, f15, f16); ++ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fmadd_d))); ++ __ fnmadd_d(f19, f17, f15, f16); ++ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fnmadd_d))); ++ __ fmaxa_d(f16, f13, f14); ++ __ fmsub_d(f20, f17, f15, f16); ++ __ Fst_d(f20, MemOperand(a0, offsetof(T, result_fmsub_d))); ++ __ fnmsub_d(f21, f17, f15, f16); ++ __ Fst_d(f21, MemOperand(a0, offsetof(T, result_fnmsub_d))); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, f))); ++ __ fsqrt_d(f10, f8); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_fsqrt_d))); ++ //__ frecip_d(f11, f10); ++ //__ frsqrt_d(f12, f8); ++ //__ Fst_d(f11, MemOperand(a0, offsetof(T, result_frecip_d))); ++ //__ Fst_d(f12, MemOperand(a0, offsetof(T, result_frsqrt_d))); ++ ++ /*__ fscaleb_d(f16, f13, f15); ++ __ flogb_d(f17, f15); ++ __ fcopysign_d(f18, f8, f9); ++ __ fclass_d(f19, f9); ++ __ Fst_d(f16, MemOperand(a0, offsetof(T, result_fscaleb_d))); ++ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_flogb_d))); ++ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fcopysign_d))); ++ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fclass_d)));*/ ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ // Double test values. ++ t.a = 1.5e14; ++ t.b = -2.75e11; ++ t.c = 1.5; ++ t.d = -2.75; ++ t.e = 120.0; ++ t.f = 120.44; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(-1.502750e14), t.result_fadd_d); ++ CHECK_EQ(static_cast(1.505500e14), t.result_fsub_d); ++ CHECK_EQ(static_cast(-3.300000e02), t.result_fmul_d); ++ CHECK_EQ(static_cast(8.000000e01), t.result_fdiv_d); ++ CHECK_EQ(static_cast(-3.959850e04), t.result_fmadd_d); ++ CHECK_EQ(static_cast(-3.959725e04), t.result_fmsub_d); ++ CHECK_EQ(static_cast(3.959850e04), t.result_fnmadd_d); ++ CHECK_EQ(static_cast(3.959725e04), t.result_fnmsub_d); ++ CHECK_EQ(static_cast(10.97451593465515908537), t.result_fsqrt_d); ++ // CHECK_EQ(static_cast( 8.164965e-08), t.result_frecip_d); ++ // CHECK_EQ(static_cast( 8.164966e-08), t.result_frsqrt_d); ++ // CHECK_EQ(static_cast(), t.result_fscaleb_d); ++ // CHECK_EQ(static_cast( 6.906891), t.result_flogb_d); ++ // CHECK_EQ(static_cast( 2.75e11), t.result_fcopysign_d); ++ // CHECK_EQ(static_cast(), t.result_fclass_d); ++} ++ ++TEST(LA13) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ float a; ++ float b; ++ float c; ++ float d; ++ float e; ++ float result_fadd_s; ++ float result_fsub_s; ++ float result_fmul_s; ++ float result_fdiv_s; ++ float result_fmadd_s; ++ float result_fmsub_s; ++ float result_fnmadd_s; ++ float result_fnmsub_s; ++ float result_fsqrt_s; ++ float result_frecip_s; ++ float result_frsqrt_s; ++ float result_fscaleb_s; ++ float result_flogb_s; ++ float result_fcopysign_s; ++ float result_fclass_s; ++ }; ++ T t; ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ // Float precision floating point instructions. ++ __ Fld_s(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(T, b))); ++ ++ __ fneg_s(f10, f8); ++ __ fadd_s(f11, f9, f10); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_fadd_s))); ++ __ fabs_s(f11, f11); ++ __ fsub_s(f12, f11, f9); ++ __ Fst_s(f12, MemOperand(a0, offsetof(T, result_fsub_s))); ++ ++ __ Fld_s(f13, MemOperand(a0, offsetof(T, c))); ++ __ Fld_s(f14, MemOperand(a0, offsetof(T, d))); ++ __ Fld_s(f15, MemOperand(a0, offsetof(T, e))); ++ ++ __ fmin_s(f16, f13, f14); ++ __ fmul_s(f17, f15, f16); ++ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_fmul_s))); ++ __ fmax_s(f18, f13, f14); ++ __ fdiv_s(f19, f15, f18); ++ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fdiv_s))); ++ ++ __ fmina_s(f16, f13, f14); ++ __ fmadd_s(f18, f17, f15, f16); ++ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fmadd_s))); ++ __ fnmadd_s(f19, f17, f15, f16); ++ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fnmadd_s))); ++ __ fmaxa_s(f16, f13, f14); ++ __ fmsub_s(f20, f17, f15, f16); ++ __ Fst_s(f20, MemOperand(a0, offsetof(T, result_fmsub_s))); ++ __ fnmsub_s(f21, f17, f15, f16); ++ __ Fst_s(f21, MemOperand(a0, offsetof(T, result_fnmsub_s))); ++ ++ __ fsqrt_s(f10, f8); ++ //__ frecip_s(f11, f10); ++ //__ frsqrt_s(f12, f8); ++ __ Fst_s(f10, MemOperand(a0, offsetof(T, result_fsqrt_s))); ++ //__ Fst_s(f11, MemOperand(a0, offsetof(T, result_frecip_s))); ++ //__ Fst_s(f12, MemOperand(a0, offsetof(T, result_frsqrt_s))); ++ ++ /*__ fscaleb_s(f16, f13, f15); ++ __ flogb_s(f17, f15); ++ __ fcopysign_s(f18, f8, f9); ++ __ fclass_s(f19, f9); ++ __ Fst_s(f16, MemOperand(a0, offsetof(T, result_fscaleb_s))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_flogb_s))); ++ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fcopysign_s))); ++ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fclass_s)));*/ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ // Float test values. ++ t.a = 1.5e6; ++ t.b = -2.75e4; ++ t.c = 1.5; ++ t.d = -2.75; ++ t.e = 120.0; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(-1.527500e06), t.result_fadd_s); ++ CHECK_EQ(static_cast(1.555000e06), t.result_fsub_s); ++ CHECK_EQ(static_cast(-3.300000e02), t.result_fmul_s); ++ CHECK_EQ(static_cast(8.000000e01), t.result_fdiv_s); ++ CHECK_EQ(static_cast(-3.959850e04), t.result_fmadd_s); ++ CHECK_EQ(static_cast(-3.959725e04), t.result_fmsub_s); ++ CHECK_EQ(static_cast(3.959850e04), t.result_fnmadd_s); ++ CHECK_EQ(static_cast(3.959725e04), t.result_fnmsub_s); ++ CHECK_EQ(static_cast(1224.744873), t.result_fsqrt_s); ++ // CHECK_EQ(static_cast( 8.164966e-04), t.result_frecip_s); ++ // CHECK_EQ(static_cast( 8.164966e-04), t.result_frsqrt_s); ++ // CHECK_EQ(static_cast(), t.result_fscaleb_s); ++ // CHECK_EQ(static_cast( 6.906890), t.result_flogb_s); ++ // CHECK_EQ(static_cast( 2.75e4), t.result_fcopysign_s); ++ // CHECK_EQ(static_cast(), t.result_fclass_s); ++} ++ ++TEST(FCMP_COND) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double dTrue; ++ double dFalse; ++ double dOp1; ++ double dOp2; ++ double dCaf; ++ double dCun; ++ double dCeq; ++ double dCueq; ++ double dClt; ++ double dCult; ++ double dCle; ++ double dCule; ++ double dCne; ++ double dCor; ++ double dCune; ++ double dSaf; ++ double dSun; ++ double dSeq; ++ double dSueq; ++ double dSlt; ++ double dSult; ++ double dSle; ++ double dSule; ++ double dSne; ++ double dSor; ++ double dSune; ++ float fTrue; ++ float fFalse; ++ float fOp1; ++ float fOp2; ++ float fCaf; ++ float fCun; ++ float fCeq; ++ float fCueq; ++ float fClt; ++ float fCult; ++ float fCle; ++ float fCule; ++ float fCne; ++ float fCor; ++ float fCune; ++ float fSaf; ++ float fSun; ++ float fSeq; ++ float fSueq; ++ float fSlt; ++ float fSult; ++ float fSle; ++ float fSule; ++ float fSne; ++ float fSor; ++ float fSune; ++ }; ++ ++ TestFloat test; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, dOp1))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, dOp2))); ++ ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, fOp1))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, fOp2))); ++ ++ __ Fld_d(f12, MemOperand(a0, offsetof(TestFloat, dFalse))); ++ __ Fld_d(f13, MemOperand(a0, offsetof(TestFloat, dTrue))); ++ ++ __ Fld_s(f14, MemOperand(a0, offsetof(TestFloat, fFalse))); ++ __ Fld_s(f15, MemOperand(a0, offsetof(TestFloat, fTrue))); ++ ++ __ fcmp_cond_d(CAF, f8, f9, FCC0); ++ __ fcmp_cond_s(CAF, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCaf))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCaf))); ++ ++ __ fcmp_cond_d(CUN, f8, f9, FCC0); ++ __ fcmp_cond_s(CUN, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCun))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCun))); ++ ++ __ fcmp_cond_d(CEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(CEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCeq))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCeq))); ++ ++ __ fcmp_cond_d(CUEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(CUEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCueq))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCueq))); ++ ++ __ fcmp_cond_d(CLT, f8, f9, FCC0); ++ __ fcmp_cond_s(CLT, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dClt))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fClt))); ++ ++ __ fcmp_cond_d(CULT, f8, f9, FCC0); ++ __ fcmp_cond_s(CULT, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCult))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCult))); ++ ++ __ fcmp_cond_d(CLE, f8, f9, FCC0); ++ __ fcmp_cond_s(CLE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCle))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCle))); ++ ++ __ fcmp_cond_d(CULE, f8, f9, FCC0); ++ __ fcmp_cond_s(CULE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCule))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCule))); ++ ++ __ fcmp_cond_d(CNE, f8, f9, FCC0); ++ __ fcmp_cond_s(CNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCne))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCne))); ++ ++ __ fcmp_cond_d(COR, f8, f9, FCC0); ++ __ fcmp_cond_s(COR, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCor))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCor))); ++ ++ __ fcmp_cond_d(CUNE, f8, f9, FCC0); ++ __ fcmp_cond_s(CUNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCune))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCune))); ++ ++ /* __ fcmp_cond_d(SAF, f8, f9, FCC0); ++ __ fcmp_cond_s(SAF, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSaf))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSaf))); ++ ++ __ fcmp_cond_d(SUN, f8, f9, FCC0); ++ __ fcmp_cond_s(SUN, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSun))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSun))); ++ ++ __ fcmp_cond_d(SEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(SEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSeq))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSeq))); ++ ++ __ fcmp_cond_d(SUEQ, f8, f9, FCC0); ++ __ fcmp_cond_s(SUEQ, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSueq))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSueq))); ++ ++ __ fcmp_cond_d(SLT, f8, f9, FCC0); ++ __ fcmp_cond_s(SLT, f10, f11, FCC1); ++ __ fsel(f16, f12, f13, FCC0); ++ __ fsel(f17, f14, f15, FCC1); ++ __ Fld_d(f16, MemOperand(a0, offsetof(TestFloat, dSlt))); ++ __ Fst_d(f17, MemOperand(a0, offsetof(TestFloat, fSlt))); ++ ++ __ fcmp_cond_d(SULT, f8, f9, FCC0); ++ __ fcmp_cond_s(SULT, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSult))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSult))); ++ ++ __ fcmp_cond_d(SLE, f8, f9, FCC0); ++ __ fcmp_cond_s(SLE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSle))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSle))); ++ ++ __ fcmp_cond_d(SULE, f8, f9, FCC0); ++ __ fcmp_cond_s(SULE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSule))); ++ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSule))); ++ ++ __ fcmp_cond_d(SNE, f8, f9, FCC0); ++ __ fcmp_cond_s(SNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSne))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSne))); ++ ++ __ fcmp_cond_d(SOR, f8, f9, FCC0); ++ __ fcmp_cond_s(SOR, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSor))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSor))); ++ ++ __ fcmp_cond_d(SUNE, f8, f9, FCC0); ++ __ fcmp_cond_s(SUNE, f10, f11, FCC1); ++ __ fsel(FCC0, f16, f12, f13); ++ __ fsel(FCC1, f17, f14, f15); ++ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSune))); ++ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSune)));*/ ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.dTrue = 1234.0; ++ test.dFalse = 0.0; ++ test.fTrue = 12.0; ++ test.fFalse = 0.0; ++ ++ test.dOp1 = 2.0; ++ test.dOp2 = 3.0; ++ test.fOp1 = 2.0; ++ test.fOp2 = 3.0; ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.dCaf, test.dFalse); ++ CHECK_EQ(test.fCaf, test.fFalse); ++ CHECK_EQ(test.dCun, test.dFalse); ++ CHECK_EQ(test.fCun, test.fFalse); ++ CHECK_EQ(test.dCeq, test.dFalse); ++ CHECK_EQ(test.fCeq, test.fFalse); ++ CHECK_EQ(test.dCueq, test.dFalse); ++ CHECK_EQ(test.fCueq, test.fFalse); ++ CHECK_EQ(test.dClt, test.dTrue); ++ CHECK_EQ(test.fClt, test.fTrue); ++ CHECK_EQ(test.dCult, test.dTrue); ++ CHECK_EQ(test.fCult, test.fTrue); ++ CHECK_EQ(test.dCle, test.dTrue); ++ CHECK_EQ(test.fCle, test.fTrue); ++ CHECK_EQ(test.dCule, test.dTrue); ++ CHECK_EQ(test.fCule, test.fTrue); ++ CHECK_EQ(test.dCne, test.dTrue); ++ CHECK_EQ(test.fCne, test.fTrue); ++ CHECK_EQ(test.dCor, test.dTrue); ++ CHECK_EQ(test.fCor, test.fTrue); ++ CHECK_EQ(test.dCune, test.dTrue); ++ CHECK_EQ(test.fCune, test.fTrue); ++ /* CHECK_EQ(test.dSaf, test.dFalse); ++ CHECK_EQ(test.fSaf, test.fFalse); ++ CHECK_EQ(test.dSun, test.dFalse); ++ CHECK_EQ(test.fSun, test.fFalse); ++ CHECK_EQ(test.dSeq, test.dFalse); ++ CHECK_EQ(test.fSeq, test.fFalse); ++ CHECK_EQ(test.dSueq, test.dFalse); ++ CHECK_EQ(test.fSueq, test.fFalse); ++ CHECK_EQ(test.dClt, test.dTrue); ++ CHECK_EQ(test.fClt, test.fTrue); ++ CHECK_EQ(test.dCult, test.dTrue); ++ CHECK_EQ(test.fCult, test.fTrue); ++ CHECK_EQ(test.dSle, test.dTrue); ++ CHECK_EQ(test.fSle, test.fTrue); ++ CHECK_EQ(test.dSule, test.dTrue); ++ CHECK_EQ(test.fSule, test.fTrue); ++ CHECK_EQ(test.dSne, test.dTrue); ++ CHECK_EQ(test.fSne, test.fTrue); ++ CHECK_EQ(test.dSor, test.dTrue); ++ CHECK_EQ(test.fSor, test.fTrue); ++ CHECK_EQ(test.dSune, test.dTrue); ++ CHECK_EQ(test.fSune, test.fTrue);*/ ++ ++ test.dOp1 = std::numeric_limits::max(); ++ test.dOp2 = std::numeric_limits::min(); ++ test.fOp1 = std::numeric_limits::min(); ++ test.fOp2 = -std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.dCaf, test.dFalse); ++ CHECK_EQ(test.fCaf, test.fFalse); ++ CHECK_EQ(test.dCun, test.dFalse); ++ CHECK_EQ(test.fCun, test.fFalse); ++ CHECK_EQ(test.dCeq, test.dFalse); ++ CHECK_EQ(test.fCeq, test.fFalse); ++ CHECK_EQ(test.dCueq, test.dFalse); ++ CHECK_EQ(test.fCueq, test.fFalse); ++ CHECK_EQ(test.dClt, test.dFalse); ++ CHECK_EQ(test.fClt, test.fFalse); ++ CHECK_EQ(test.dCult, test.dFalse); ++ CHECK_EQ(test.fCult, test.fFalse); ++ CHECK_EQ(test.dCle, test.dFalse); ++ CHECK_EQ(test.fCle, test.fFalse); ++ CHECK_EQ(test.dCule, test.dFalse); ++ CHECK_EQ(test.fCule, test.fFalse); ++ CHECK_EQ(test.dCne, test.dTrue); ++ CHECK_EQ(test.fCne, test.fTrue); ++ CHECK_EQ(test.dCor, test.dTrue); ++ CHECK_EQ(test.fCor, test.fTrue); ++ CHECK_EQ(test.dCune, test.dTrue); ++ CHECK_EQ(test.fCune, test.fTrue); ++ /* CHECK_EQ(test.dSaf, test.dFalse); ++ CHECK_EQ(test.fSaf, test.fFalse); ++ CHECK_EQ(test.dSun, test.dFalse); ++ CHECK_EQ(test.fSun, test.fFalse); ++ CHECK_EQ(test.dSeq, test.dFalse); ++ CHECK_EQ(test.fSeq, test.fFalse); ++ CHECK_EQ(test.dSueq, test.dFalse); ++ CHECK_EQ(test.fSueq, test.fFalse); ++ CHECK_EQ(test.dSlt, test.dFalse); ++ CHECK_EQ(test.fSlt, test.fFalse); ++ CHECK_EQ(test.dSult, test.dFalse); ++ CHECK_EQ(test.fSult, test.fFalse); ++ CHECK_EQ(test.dSle, test.dFalse); ++ CHECK_EQ(test.fSle, test.fFalse); ++ CHECK_EQ(test.dSule, test.dFalse); ++ CHECK_EQ(test.fSule, test.fFalse); ++ CHECK_EQ(test.dSne, test.dTrue); ++ CHECK_EQ(test.fSne, test.fTrue); ++ CHECK_EQ(test.dSor, test.dTrue); ++ CHECK_EQ(test.fSor, test.fTrue); ++ CHECK_EQ(test.dSune, test.dTrue); ++ CHECK_EQ(test.fSune, test.fTrue);*/ ++ ++ test.dOp1 = std::numeric_limits::quiet_NaN(); ++ test.dOp2 = 0.0; ++ test.fOp1 = std::numeric_limits::quiet_NaN(); ++ test.fOp2 = 0.0; ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.dCaf, test.dFalse); ++ CHECK_EQ(test.fCaf, test.fFalse); ++ CHECK_EQ(test.dCun, test.dTrue); ++ CHECK_EQ(test.fCun, test.fTrue); ++ CHECK_EQ(test.dCeq, test.dFalse); ++ CHECK_EQ(test.fCeq, test.fFalse); ++ CHECK_EQ(test.dCueq, test.dTrue); ++ CHECK_EQ(test.fCueq, test.fTrue); ++ CHECK_EQ(test.dClt, test.dFalse); ++ CHECK_EQ(test.fClt, test.fFalse); ++ CHECK_EQ(test.dCult, test.dTrue); ++ CHECK_EQ(test.fCult, test.fTrue); ++ CHECK_EQ(test.dCle, test.dFalse); ++ CHECK_EQ(test.fCle, test.fFalse); ++ CHECK_EQ(test.dCule, test.dTrue); ++ CHECK_EQ(test.fCule, test.fTrue); ++ CHECK_EQ(test.dCne, test.dFalse); ++ CHECK_EQ(test.fCne, test.fFalse); ++ CHECK_EQ(test.dCor, test.dFalse); ++ CHECK_EQ(test.fCor, test.fFalse); ++ CHECK_EQ(test.dCune, test.dTrue); ++ CHECK_EQ(test.fCune, test.fTrue); ++ /* CHECK_EQ(test.dSaf, test.dTrue); ++ CHECK_EQ(test.fSaf, test.fTrue); ++ CHECK_EQ(test.dSun, test.dTrue); ++ CHECK_EQ(test.fSun, test.fTrue); ++ CHECK_EQ(test.dSeq, test.dFalse); ++ CHECK_EQ(test.fSeq, test.fFalse); ++ CHECK_EQ(test.dSueq, test.dTrue); ++ CHECK_EQ(test.fSueq, test.fTrue); ++ CHECK_EQ(test.dSlt, test.dFalse); ++ CHECK_EQ(test.fSlt, test.fFalse); ++ CHECK_EQ(test.dSult, test.dTrue); ++ CHECK_EQ(test.fSult, test.fTrue); ++ CHECK_EQ(test.dSle, test.dFalse); ++ CHECK_EQ(test.fSle, test.fFalse); ++ CHECK_EQ(test.dSule, test.dTrue); ++ CHECK_EQ(test.fSule, test.fTrue); ++ CHECK_EQ(test.dSne, test.dFalse); ++ CHECK_EQ(test.fSne, test.fFalse); ++ CHECK_EQ(test.dSor, test.dFalse); ++ CHECK_EQ(test.fSor, test.fFalse); ++ CHECK_EQ(test.dSune, test.dTrue); ++ CHECK_EQ(test.fSune, test.fTrue);*/ ++} ++ ++TEST(FCVT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float fcvt_d_s_in; ++ double fcvt_s_d_in; ++ double fcvt_d_s_out; ++ float fcvt_s_d_out; ++ int fcsr; ++ }; ++ TestFloat test; ++ __ xor_(a4, a4, a4); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr))); ++ __ movfcsr2gr(a5); ++ __ movgr2fcsr(a4); ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_in))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_in))); ++ __ fcvt_d_s(f10, f8); ++ __ fcvt_s_d(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_out))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_out))); ++ __ movgr2fcsr(a5); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.fcsr = kRoundToZero; ++ ++ test.fcvt_d_s_in = -0.51; ++ test.fcvt_s_d_in = -0.51; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = 0.49; ++ test.fcvt_s_d_in = 0.49; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = std::numeric_limits::max(); ++ test.fcvt_s_d_in = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = -std::numeric_limits::max(); ++ test.fcvt_s_d_in = -std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++ ++ test.fcvt_d_s_in = std::numeric_limits::min(); ++ test.fcvt_s_d_in = std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.fcvt_d_s_out, static_cast(test.fcvt_d_s_in)); ++ CHECK_EQ(test.fcvt_s_d_out, static_cast(test.fcvt_s_d_in)); ++} ++ ++TEST(FFINT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ int32_t ffint_s_w_in; ++ int64_t ffint_s_l_in; ++ int32_t ffint_d_w_in; ++ int64_t ffint_d_l_in; ++ float ffint_s_w_out; ++ float ffint_s_l_out; ++ double ffint_d_w_out; ++ double ffint_d_l_out; ++ int fcsr; ++ }; ++ TestFloat test; ++ __ xor_(a4, a4, a4); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr))); ++ __ movfcsr2gr(a5); ++ __ movgr2fcsr(a4); ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, ffint_s_w_in))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, ffint_s_l_in))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, ffint_d_w_in))); ++ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, ffint_d_l_in))); ++ __ ffint_s_w(f12, f8); ++ __ ffint_s_l(f13, f9); ++ __ ffint_d_w(f14, f10); ++ __ ffint_d_l(f15, f11); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, ffint_s_w_out))); ++ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, ffint_s_l_out))); ++ __ Fst_d(f14, MemOperand(a0, offsetof(TestFloat, ffint_d_w_out))); ++ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, ffint_d_l_out))); ++ __ movgr2fcsr(a5); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.fcsr = kRoundToZero; ++ ++ test.ffint_s_w_in = -1; ++ test.ffint_s_l_in = -1; ++ test.ffint_d_w_in = -1; ++ test.ffint_d_l_in = -1; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++ ++ test.ffint_s_w_in = 1; ++ test.ffint_s_l_in = 1; ++ test.ffint_d_w_in = 1; ++ test.ffint_d_l_in = 1; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++ ++ test.ffint_s_w_in = std::numeric_limits::max(); ++ test.ffint_s_l_in = std::numeric_limits::max(); ++ test.ffint_d_w_in = std::numeric_limits::max(); ++ test.ffint_d_l_in = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++ ++ test.ffint_s_w_in = std::numeric_limits::min(); ++ test.ffint_s_l_in = std::numeric_limits::min(); ++ test.ffint_d_w_in = std::numeric_limits::min(); ++ test.ffint_d_l_in = std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.ffint_s_w_out, static_cast(test.ffint_s_w_in)); ++ CHECK_EQ(test.ffint_s_l_out, static_cast(test.ffint_s_l_in)); ++ CHECK_EQ(test.ffint_d_w_out, static_cast(test.ffint_d_w_in)); ++ CHECK_EQ(test.ffint_d_l_out, static_cast(test.ffint_d_l_in)); ++} ++ ++TEST(FTINT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ int fcsr; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RN_W[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RN_L[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ double outputs_RZ_W[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RZ_L[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ double outputs_RP_W[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RP_L[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ double outputs_RM_W[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_RM_L[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf, ++ kRoundToMinusInf}; ++ double* outputs[8] = { ++ outputs_RN_W, outputs_RN_L, outputs_RZ_W, outputs_RZ_L, ++ outputs_RP_W, outputs_RP_L, outputs_RM_W, outputs_RM_L, ++ }; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr))); ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(a5); ++ __ ftint_w_d(f10, f8); ++ __ ftint_w_s(f11, f9); ++ __ ftint_l_d(f12, f8); ++ __ ftint_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int j = 0; j < 4; j++) { ++ test.fcsr = fcsr_inputs[j]; ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs[2 * j][i]); ++ CHECK_EQ(test.d, outputs[2 * j][i]); ++ CHECK_EQ(test.e, outputs[2 * j + 1][i]); ++ CHECK_EQ(test.f, outputs[2 * j + 1][i]); ++ } ++ } ++} ++ ++TEST(FTINTRM) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrm_w_d(f10, f8); ++ __ ftintrm_w_s(f11, f9); ++ __ ftintrm_l_d(f12, f8); ++ __ ftintrm_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FTINTRP) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrp_w_d(f10, f8); ++ __ ftintrp_w_s(f11, f9); ++ __ ftintrp_l_d(f12, f8); ++ __ ftintrp_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FTINTRZ) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrz_w_d(f10, f8); ++ __ ftintrz_w_s(f11, f9); ++ __ ftintrz_l_d(f12, f8); ++ __ ftintrz_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FTINTRNE) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ int32_t c; ++ int32_t d; ++ int64_t e; ++ int64_t f; ++ }; ++ Test test; ++ ++ const int kTableLength = 9; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5, ++ 2147483648.0, ++ std::numeric_limits::quiet_NaN(), ++ std::numeric_limits::infinity() ++ }; ++ double outputs_w[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ kFPUInvalidResult, 0, ++ kFPUInvalidResult}; ++ double outputs_l[kTableLength] = { ++ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0, ++ 2147483648.0, 0, ++ kFPU64InvalidResult}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ ftintrne_w_d(f10, f8); ++ __ ftintrne_w_s(f11, f9); ++ __ ftintrne_l_d(f12, f8); ++ __ ftintrne_l_s(f13, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_w[i]); ++ CHECK_EQ(test.d, outputs_w[i]); ++ CHECK_EQ(test.e, outputs_l[i]); ++ CHECK_EQ(test.f, outputs_l[i]); ++ } ++} ++ ++TEST(FRINT) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double a; ++ float b; ++ double c; ++ float d; ++ int fcsr; ++ }; ++ Test test; ++ ++ const int kTableLength = 32; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307, ++ 309485009821345068724781056.89, ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max() - 0.1, ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37, ++ 309485009821345068724781056.89, ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 0.6, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RN_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 0, ++ 309485009821345068724781057.0, ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RN_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 0, ++ 309485009821345068724781057.0, ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RZ_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RZ_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max() - 1, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RP_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 1, ++ 309485009821345068724781057.0, ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RP_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 1, ++ 309485009821345068724781057.0, ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_RM_S[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, ++ 1.7976931348623157E38, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_RM_D[kTableLength] = { ++ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0, ++ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, ++ 1.7976931348623157E308, 0, ++ 309485009821345068724781057.0, ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 37778931862957161709568.0, 37778931862957161709569.0, ++ 37778931862957161709580.0, 37778931862957161709581.0, ++ 37778931862957161709582.0, 37778931862957161709583.0, ++ 37778931862957161709584.0, 37778931862957161709585.0, ++ 37778931862957161709586.0, 37778931862957161709587.0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ // clang-format on ++ ++ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf, ++ kRoundToMinusInf}; ++ double* outputs_d[4] = {outputs_RN_D, outputs_RZ_D, outputs_RP_D, ++ outputs_RM_D}; ++ float* outputs_s[4] = {outputs_RN_S, outputs_RZ_S, outputs_RP_S, ++ outputs_RM_S}; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b))); ++ __ xor_(a5, a5, a5); ++ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr))); ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(a5); ++ __ frint_d(f10, f8); ++ __ frint_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(Test, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d))); ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int j = 0; j < 4; j++) { ++ test.fcsr = fcsr_inputs[j]; ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_d[i]; ++ test.b = inputs_s[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_d[j][i]); ++ CHECK_EQ(test.d, outputs_s[j][i]); ++ } ++ } ++} ++ ++TEST(FMOV) { ++ const int kTableLength = 7; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ float b; ++ double c; ++ float d; ++ }; ++ ++ TestFloat test; ++ ++ // clang-format off ++ double inputs_D[kTableLength] = { ++ 5.3, -5.3, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ float inputs_S[kTableLength] = { ++ 4.8, -4.8, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ ++ double outputs_D[kTableLength] = { ++ 5.3, -5.3, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ ++ float outputs_S[kTableLength] = { ++ 4.8, -4.8, 0.29, -0.29, 0, ++ std::numeric_limits::max(), ++ -std::numeric_limits::max() ++ }; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fmov_d(f10, f8); ++ __ fmov_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_D[i]; ++ test.b = inputs_S[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, outputs_D[i]); ++ CHECK_EQ(test.d, outputs_S[i]); ++ } ++} ++ ++TEST(LA14) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ double a; ++ double b; ++ double c; ++ double d; ++ int64_t high; ++ int64_t low; ++ }; ++ T t; ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(T, b))); ++ ++ __ movfr2gr_s(a4, f8); ++ __ movfrh2gr_s(a5, f8); ++ __ movfr2gr_d(a6, f9); ++ ++ __ movgr2fr_w(f9, a4); ++ __ movgr2frh_w(f9, a5); ++ __ movgr2fr_d(f8, a6); ++ ++ __ Fst_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fst_d(f9, MemOperand(a0, offsetof(T, c))); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, d))); ++ __ movfrh2gr_s(a4, f8); ++ __ movfr2gr_s(a5, f8); ++ ++ __ St_d(a4, MemOperand(a0, offsetof(T, high))); ++ __ St_d(a5, MemOperand(a0, offsetof(T, low))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ t.a = 1.5e22; ++ t.b = 2.75e11; ++ t.c = 17.17; ++ t.d = -2.75e11; ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(2.75e11, t.a); ++ CHECK_EQ(2.75e11, t.b); ++ CHECK_EQ(1.5e22, t.c); ++ CHECK_EQ(static_cast(0xFFFFFFFFC25001D1L), t.high); ++ CHECK_EQ(static_cast(0xFFFFFFFFBF800000L), t.low); ++ ++ t.a = -1.5e22; ++ t.b = -2.75e11; ++ t.c = 17.17; ++ t.d = 274999868928.0; ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(-2.75e11, t.a); ++ CHECK_EQ(-2.75e11, t.b); ++ CHECK_EQ(-1.5e22, t.c); ++ CHECK_EQ(static_cast(0x425001D1L), t.high); ++ CHECK_EQ(static_cast(0x3F800000L), t.low); ++} ++ ++uint64_t run_bceqz(int fcc_value, int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0); ++ __ li(t0, fcc_value); ++ __ b(&main_block); ++ // Block 1 ++ for (int32_t i = -104; i <= -55; ++i) { ++ __ addi_d(a2, a2, 0x1); ++ } ++ __ b(&L); ++ ++ // Block 2 ++ for (int32_t i = -53; i <= -4; ++i) { ++ __ addi_d(a2, a2, 0x10); ++ } ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ movcf2gr(t1, FCC0); ++ __ movgr2cf(FCC0, t0); ++ __ bceqz(FCC0, offset); ++ __ bind(&L); ++ __ movgr2cf(FCC0, t1); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ for (int32_t i = 4; i <= 53; ++i) { ++ __ addi_d(a2, a2, 0x100); ++ } ++ __ b(&L); ++ ++ // Block 5 ++ for (int32_t i = 55; i <= 104; ++i) { ++ __ addi_d(a2, a2, 0x300); ++ } ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BCEQZ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBceqz { ++ int fcc; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBceqz tc[] = { ++ // fcc, offset, expected_res ++ { 0, -90, 0x24 }, ++ { 0, -27, 0x180 }, ++ { 0, 47, 0x700 }, ++ { 0, 70, 0x6900 }, ++ { 1, -27, 0 }, ++ { 1, 47, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBceqz); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bceqz(tc[i].fcc, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++uint64_t run_bcnez(int fcc_value, int32_t offset) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label main_block, L; ++ __ li(a2, 0); ++ __ li(t0, fcc_value); ++ __ b(&main_block); ++ // Block 1 ++ for (int32_t i = -104; i <= -55; ++i) { ++ __ addi_d(a2, a2, 0x1); ++ } ++ __ b(&L); ++ ++ // Block 2 ++ for (int32_t i = -53; i <= -4; ++i) { ++ __ addi_d(a2, a2, 0x10); ++ } ++ __ b(&L); ++ ++ // Block 3 (Main) ++ __ bind(&main_block); ++ __ movcf2gr(t1, FCC0); ++ __ movgr2cf(FCC0, t0); ++ __ bcnez(FCC0, offset); ++ __ bind(&L); ++ __ movgr2cf(FCC0, t1); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ // Block 4 ++ for (int32_t i = 4; i <= 53; ++i) { ++ __ addi_d(a2, a2, 0x100); ++ } ++ __ b(&L); ++ ++ // Block 5 ++ for (int32_t i = 55; i <= 104; ++i) { ++ __ addi_d(a2, a2, 0x300); ++ } ++ __ b(&L); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(BCNEZ) { ++ CcTest::InitializeVM(); ++ struct TestCaseBcnez { ++ int fcc; ++ int32_t offset; ++ uint64_t expected_res; ++ }; ++ ++ // clang-format off ++ struct TestCaseBcnez tc[] = { ++ // fcc, offset, expected_res ++ { 1, -90, 0x24 }, ++ { 1, -27, 0x180 }, ++ { 1, 47, 0x700 }, ++ { 1, 70, 0x6900 }, ++ { 0, -27, 0 }, ++ { 0, 47, 0 }, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBcnez); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_bcnez(tc[i].fcc, tc[i].offset); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++TEST(jump_tables1) { ++ // Test jump tables with forward jumps. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ const int kNumCases = 512; ++ int values[kNumCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kNumCases]; ++ ++ __ addi_d(sp, sp, -8); ++ __ St_d(ra, MemOperand(sp, 0)); ++ __ Align(8); ++ ++ Label done; ++ { ++ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); ++ __ pcaddi(ra, 2); ++ __ slli_d(t7, a0, 3); ++ __ add_d(t7, t7, ra); ++ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); ++ __ jirl(zero_reg, t7, 0); ++ __ nop(); ++ for (int i = 0; i < kNumCases; ++i) { ++ __ dd(&labels[i]); ++ } ++ } ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF); ++ __ ori(a2, a2, values[i] & 0xFFF); ++ __ b(&done); ++ __ nop(); ++ } ++ ++ __ bind(&done); ++ __ Ld_d(ra, MemOperand(sp, 0)); ++ __ addi_d(sp, sp, 8); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CHECK_EQ(0, assm.UnboundLabelsCount()); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ((values[i]), static_cast(res)); ++ } ++} ++ ++TEST(jump_tables2) { ++ // Test jump tables with backward jumps. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ const int kNumCases = 512; ++ int values[kNumCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kNumCases]; ++ ++ __ addi_d(sp, sp, -8); ++ __ St_d(ra, MemOperand(sp, 0)); ++ ++ Label done, dispatch; ++ __ b(&dispatch); ++ __ nop(); ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF); ++ __ ori(a2, a2, values[i] & 0xFFF); ++ __ b(&done); ++ __ nop(); ++ } ++ ++ __ Align(8); ++ __ bind(&dispatch); ++ { ++ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); ++ __ pcaddi(ra, 2); ++ __ slli_d(t7, a0, 3); ++ __ add_d(t7, t7, ra); ++ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); ++ __ jirl(zero_reg, t7, 0); ++ __ nop(); ++ for (int i = 0; i < kNumCases; ++i) { ++ __ dd(&labels[i]); ++ } ++ } ++ ++ __ bind(&done); ++ __ Ld_d(ra, MemOperand(sp, 0)); ++ __ addi_d(sp, sp, 8); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ(values[i], res); ++ } ++} ++ ++TEST(jump_tables3) { ++ // Test jump tables with backward jumps and embedded heap objects. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ const int kNumCases = 512; ++ Handle values[kNumCases]; ++ for (int i = 0; i < kNumCases; ++i) { ++ double value = isolate->random_number_generator()->NextDouble(); ++ values[i] = isolate->factory()->NewHeapNumber(value); ++ } ++ Label labels[kNumCases]; ++ Object obj; ++ int64_t imm64; ++ ++ __ addi_d(sp, sp, -8); ++ __ St_d(ra, MemOperand(sp, 0)); ++ ++ Label done, dispatch; ++ __ b(&dispatch); ++ __ nop(); ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ obj = *values[i]; ++ imm64 = obj.ptr(); ++ __ lu12i_w(a2, (imm64 >> 12) & 0xFFFFF); ++ __ ori(a2, a2, imm64 & 0xFFF); ++ __ lu32i_d(a2, (imm64 >> 32) & 0xFFFFF); ++ __ lu52i_d(a2, a2, (imm64 >> 52) & 0xFFF); ++ __ b(&done); ++ } ++ ++ __ Align(8); ++ __ bind(&dispatch); ++ { ++ __ BlockTrampolinePoolFor(kNumCases * 2 + 6); ++ __ pcaddi(ra, 2); ++ __ slli_d(t7, a0, 3); // In delay slot. ++ __ add_d(t7, t7, ra); ++ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize)); ++ __ jirl(zero_reg, t7, 0); ++ __ nop(); ++ for (int i = 0; i < kNumCases; ++i) { ++ __ dd(&labels[i]); ++ } ++ } ++ __ bind(&done); ++ __ Ld_d(ra, MemOperand(sp, 0)); ++ __ addi_d(sp, sp, 8); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ Handle result( ++ Object(reinterpret_cast
(f.Call(i, 0, 0, 0, 0))), isolate); ++#ifdef OBJECT_PRINT ++ ::printf("f(%d) = ", i); ++ result->Print(std::cout); ++ ::printf("\n"); ++#endif ++ CHECK(values[i].is_identical_to(result)); ++ } ++} ++ ++uint64_t run_li_macro(int64_t imm, LiFlags mode, int32_t num_instr = 0) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ Label code_start; ++ __ bind(&code_start); ++ __ li(a2, imm, mode); ++ if (num_instr > 0) { ++ CHECK_EQ(assm.InstructionsGeneratedSince(&code_start), num_instr); ++ CHECK_EQ(__ InstrCountForLi64Bit(imm), num_instr); ++ } ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(li_macro) { ++ CcTest::InitializeVM(); ++ ++ // Test li macro-instruction for border cases. ++ ++ struct TestCase_li { ++ uint64_t imm; ++ int32_t num_instr; ++ }; ++ // clang-format off ++ struct TestCase_li tc[] = { ++ // imm, num_instr ++ {0xFFFFFFFFFFFFF800, 1}, // min_int12 ++ // The test case above generates addi_d instruction. ++ // This is int12 value and we can load it using just addi_d. ++ { 0x800, 1}, // max_int12 + 1 ++ // Generates ori ++ // max_int12 + 1 is not int12 but is uint12, just use ori. ++ {0xFFFFFFFFFFFFF7FF, 2}, // min_int12 - 1 ++ // Generates lu12i + ori ++ // We load int32 value using lu12i_w + ori. ++ { 0x801, 1}, // max_int12 + 2 ++ // Generates ori ++ // Also an uint1 value, use ori. ++ { 0x00001000, 1}, // max_uint12 + 1 ++ // Generates lu12i_w ++ // Low 12 bits are 0, load value using lu12i_w. ++ { 0x00001001, 2}, // max_uint12 + 2 ++ // Generates lu12i_w + ori ++ // We have to generate two instructions in this case. ++ {0x00000000FFFFFFFF, 2}, // max_uint32 ++ // addi_w + lu32i_d ++ {0x00000000FFFFFFFE, 2}, // max_uint32 - 1 ++ // addi_w + lu32i_d ++ {0xFFFFFFFF80000000, 1}, // min_int32 ++ // lu12i_w ++ {0x0000000080000000, 2}, // max_int32 + 1 ++ // lu12i_w + lu32i_d ++ {0xFFFF0000FFFF8765, 3}, ++ // lu12i_w + ori + lu32i_d ++ {0x1234ABCD87654321, 4}, ++ // lu12i_w + ori + lu32i_d + lu52i_d ++ {0xFFFF789100000000, 2}, ++ // xor + lu32i_d ++ {0xF12F789100000000, 3}, ++ // xor + lu32i_d + lu52i_d ++ {0xF120000000000800, 2}, ++ // ori + lu52i_d ++ {0xFFF0000000000000, 1}, ++ // lu52i_d ++ {0xF100000000000000, 1}, ++ {0x0122000000000000, 2}, ++ {0x1234FFFF77654321, 4}, ++ {0x1230000077654321, 3}, ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase_li); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ CHECK_EQ(tc[i].imm, ++ run_li_macro(tc[i].imm, OPTIMIZE_SIZE, tc[i].num_instr)); ++ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, CONSTANT_SIZE)); ++ if (is_int48(tc[i].imm)) { ++ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, ADDRESS_LOAD)); ++ } ++ } ++} ++ ++TEST(FMIN_FMAX) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ double b; ++ float c; ++ float d; ++ double e; ++ double f; ++ float g; ++ float h; ++ }; ++ ++ TestFloat test; ++ const double dnan = std::numeric_limits::quiet_NaN(); ++ const double dinf = std::numeric_limits::infinity(); ++ const double dminf = -std::numeric_limits::infinity(); ++ const float fnan = std::numeric_limits::quiet_NaN(); ++ const float finf = std::numeric_limits::infinity(); ++ const float fminf = -std::numeric_limits::infinity(); ++ const int kTableLength = 13; ++ ++ // clang-format off ++ double inputsa[kTableLength] = {2.0, 3.0, dnan, 3.0, -0.0, 0.0, dinf, ++ dnan, 42.0, dinf, dminf, dinf, dnan}; ++ double inputsb[kTableLength] = {3.0, 2.0, 3.0, dnan, 0.0, -0.0, dnan, ++ dinf, dinf, 42.0, dinf, dminf, dnan}; ++ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, ++ -0.0, dinf, dinf, 42.0, 42.0, ++ dminf, dminf, dnan}; ++ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, dinf, ++ dinf, dinf, dinf, dinf, dinf, dnan}; ++ ++ float inputsc[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf, ++ fnan, 42.0, finf, fminf, finf, fnan}; ++ float inputsd[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan, ++ finf, finf, 42.0, finf, fminf, fnan}; ++ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, ++ -0.0, finf, finf, 42.0, 42.0, ++ fminf, fminf, fnan}; ++ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf, ++ finf, finf, finf, finf, finf, fnan}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fmin_d(f12, f8, f9); ++ __ fmax_d(f13, f8, f9); ++ __ fmin_s(f14, f10, f11); ++ __ fmax_s(f15, f10, f11); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, e))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, f))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g))); ++ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 4; i < kTableLength; i++) { ++ test.a = inputsa[i]; ++ test.b = inputsb[i]; ++ test.c = inputsc[i]; ++ test.d = inputsd[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(0, memcmp(&test.e, &outputsdmin[i], sizeof(test.e))); ++ CHECK_EQ(0, memcmp(&test.f, &outputsdmax[i], sizeof(test.f))); ++ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); ++ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); ++ } ++} ++ ++TEST(FMINA_FMAXA) { ++ const int kTableLength = 23; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ const double dnan = std::numeric_limits::quiet_NaN(); ++ const double dinf = std::numeric_limits::infinity(); ++ const double dminf = -std::numeric_limits::infinity(); ++ const float fnan = std::numeric_limits::quiet_NaN(); ++ const float finf = std::numeric_limits::infinity(); ++ const float fminf = std::numeric_limits::infinity(); ++ ++ struct TestFloat { ++ double a; ++ double b; ++ double resd1; ++ double resd2; ++ float c; ++ float d; ++ float resf1; ++ float resf2; ++ }; ++ ++ TestFloat test; ++ // clang-format off ++ double inputsa[kTableLength] = { ++ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, ++ dnan, 3.0, -0.0, 0.0, dinf, dnan, 42.0, dinf, dminf, dinf, dnan}; ++ double inputsb[kTableLength] = { ++ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, ++ 3.0, dnan, 0.0, -0.0, dnan, dinf, dinf, 42.0, dinf, dminf, dnan}; ++ double resd1[kTableLength] = { ++ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, ++ 3.0, 3.0, -0.0, -0.0, dinf, dinf, 42.0, 42.0, dminf, dminf, dnan}; ++ double resd2[kTableLength] = { ++ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, ++ 3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, dinf, dinf, dinf, dnan}; ++ float inputsc[kTableLength] = { ++ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, ++ fnan, 3.0, -0.0, 0.0, finf, fnan, 42.0, finf, fminf, finf, fnan}; ++ float inputsd[kTableLength] = { ++ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, ++ 3.0, fnan, -0.0, 0.0, fnan, finf, finf, 42.0, finf, fminf, fnan}; ++ float resf1[kTableLength] = { ++ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, ++ 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan}; ++ float resf2[kTableLength] = { ++ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, ++ 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan}; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fmina_d(f12, f8, f9); ++ __ fmaxa_d(f13, f8, f9); ++ __ fmina_s(f14, f10, f11); ++ __ fmaxa_s(f15, f10, f11); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, resd1))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resd2))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resf1))); ++ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, resf2))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputsa[i]; ++ test.b = inputsb[i]; ++ test.c = inputsc[i]; ++ test.d = inputsd[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ if (i < kTableLength - 1) { ++ CHECK_EQ(test.resd1, resd1[i]); ++ CHECK_EQ(test.resd2, resd2[i]); ++ CHECK_EQ(test.resf1, resf1[i]); ++ CHECK_EQ(test.resf2, resf2[i]); ++ } else { ++ CHECK(std::isnan(test.resd1)); ++ CHECK(std::isnan(test.resd2)); ++ CHECK(std::isnan(test.resf1)); ++ CHECK(std::isnan(test.resf2)); ++ } ++ } ++} ++ ++TEST(FADD) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ double b; ++ double c; ++ float d; ++ float e; ++ float f; ++ }; ++ ++ TestFloat test; ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fadd_d(f10, f8, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ Fld_s(f12, MemOperand(a0, offsetof(TestFloat, e))); ++ __ fadd_s(f13, f11, f12); ++ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, f))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.a = 2.0; ++ test.b = 3.0; ++ test.d = 2.0; ++ test.e = 3.0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, 5.0); ++ CHECK_EQ(test.f, 5.0); ++ ++ test.a = std::numeric_limits::max(); ++ test.b = -std::numeric_limits::max(); // lowest() ++ test.d = std::numeric_limits::max(); ++ test.e = -std::numeric_limits::max(); // lowest() ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.c, 0.0); ++ CHECK_EQ(test.f, 0.0); ++ ++ test.a = std::numeric_limits::max(); ++ test.b = std::numeric_limits::max(); ++ test.d = std::numeric_limits::max(); ++ test.e = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(!std::isfinite(test.c)); ++ CHECK(!std::isfinite(test.f)); ++ ++ test.a = 5.0; ++ test.b = std::numeric_limits::signaling_NaN(); ++ test.d = 5.0; ++ test.e = std::numeric_limits::signaling_NaN(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.c)); ++ CHECK(std::isnan(test.f)); ++} ++ ++TEST(FSUB) { ++ const int kTableLength = 12; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float a; ++ float b; ++ float resultS; ++ double c; ++ double d; ++ double resultD; ++ }; ++ ++ TestFloat test; ++ ++ // clang-format off ++ double inputfs_D[kTableLength] = { ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 ++ }; ++ double inputft_D[kTableLength] = { ++ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, ++ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 ++ }; ++ double outputs_D[kTableLength] = { ++ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, ++ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 ++ }; ++ float inputfs_S[kTableLength] = { ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, ++ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 ++ }; ++ float inputft_S[kTableLength] = { ++ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, ++ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 ++ }; ++ float outputs_S[kTableLength] = { ++ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, ++ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 ++ }; ++ // clang-format on ++ ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fsub_s(f12, f8, f9); ++ __ fsub_d(f13, f10, f11); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputfs_S[i]; ++ test.b = inputft_S[i]; ++ test.c = inputfs_D[i]; ++ test.d = inputft_D[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.resultS, outputs_S[i]); ++ CHECK_EQ(test.resultD, outputs_D[i]); ++ } ++} ++ ++TEST(FMUL) { ++ const int kTableLength = 4; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float a; ++ float b; ++ float resultS; ++ double c; ++ double d; ++ double resultD; ++ }; ++ ++ TestFloat test; ++ // clang-format off ++ double inputfs_D[kTableLength] = { ++ 5.3, -5.3, 5.3, -2.9 ++ }; ++ double inputft_D[kTableLength] = { ++ 4.8, 4.8, -4.8, -0.29 ++ }; ++ ++ float inputfs_S[kTableLength] = { ++ 5.3, -5.3, 5.3, -2.9 ++ }; ++ float inputft_S[kTableLength] = { ++ 4.8, 4.8, -4.8, -0.29 ++ }; ++ // clang-format on ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d))); ++ __ fmul_s(f12, f8, f9); ++ __ fmul_d(f13, f10, f11); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputfs_S[i]; ++ test.b = inputft_S[i]; ++ test.c = inputfs_D[i]; ++ test.d = inputft_D[i]; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.resultS, inputfs_S[i] * inputft_S[i]); ++ CHECK_EQ(test.resultD, inputfs_D[i] * inputft_D[i]); ++ } ++} ++ ++TEST(FDIV) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct Test { ++ double dOp1; ++ double dOp2; ++ double dRes; ++ float fOp1; ++ float fOp2; ++ float fRes; ++ }; ++ ++ Test test; ++ ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(zero_reg); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(Test, dOp1))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(Test, dOp2))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(Test, fOp1))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(Test, fOp2))); ++ __ fdiv_d(f12, f8, f9); ++ __ fdiv_s(f13, f10, f11); ++ __ Fst_d(f12, MemOperand(a0, offsetof(Test, dRes))); ++ __ Fst_s(f13, MemOperand(a0, offsetof(Test, fRes))); ++ ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ f.Call(&test, 0, 0, 0, 0); ++ const int test_size = 3; ++ // clang-format off ++ double dOp1[test_size] = { ++ 5.0, DBL_MAX, DBL_MAX}; ++ ++ double dOp2[test_size] = { ++ 2.0, 2.0, -DBL_MAX}; ++ ++ double dRes[test_size] = { ++ 2.5, DBL_MAX / 2.0, -1.0}; ++ ++ float fOp1[test_size] = { ++ 5.0, FLT_MAX, FLT_MAX}; ++ ++ float fOp2[test_size] = { ++ 2.0, 2.0, -FLT_MAX}; ++ ++ float fRes[test_size] = { ++ 2.5, FLT_MAX / 2.0, -1.0}; ++ // clang-format on ++ ++ for (int i = 0; i < test_size; i++) { ++ test.dOp1 = dOp1[i]; ++ test.dOp2 = dOp2[i]; ++ test.fOp1 = fOp1[i]; ++ test.fOp2 = fOp2[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.dRes, dRes[i]); ++ CHECK_EQ(test.fRes, fRes[i]); ++ } ++ ++ test.dOp1 = DBL_MAX; ++ test.dOp2 = -0.0; ++ test.fOp1 = FLT_MAX; ++ test.fOp2 = -0.0; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(!std::isfinite(test.dRes)); ++ CHECK(!std::isfinite(test.fRes)); ++ ++ test.dOp1 = 0.0; ++ test.dOp2 = -0.0; ++ test.fOp1 = 0.0; ++ test.fOp2 = -0.0; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.dRes)); ++ CHECK(std::isnan(test.fRes)); ++ ++ test.dOp1 = std::numeric_limits::quiet_NaN(); ++ test.dOp2 = -5.0; ++ test.fOp1 = std::numeric_limits::quiet_NaN(); ++ test.fOp2 = -5.0; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.dRes)); ++ CHECK(std::isnan(test.fRes)); ++} ++ ++TEST(FABS) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ double a; ++ float b; ++ }; ++ ++ TestFloat test; ++ ++ __ movfcsr2gr(a4); ++ __ movgr2fcsr(zero_reg); ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fabs_d(f10, f8); ++ __ fabs_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, b))); ++ ++ __ movgr2fcsr(a4); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ test.a = -2.0; ++ test.b = -2.0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, 2.0); ++ CHECK_EQ(test.b, 2.0); ++ ++ test.a = 2.0; ++ test.b = 2.0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, 2.0); ++ CHECK_EQ(test.b, 2.0); ++ ++ // Testing biggest positive number ++ test.a = std::numeric_limits::max(); ++ test.b = std::numeric_limits::max(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::max()); ++ CHECK_EQ(test.b, std::numeric_limits::max()); ++ ++ // Testing smallest negative number ++ test.a = -std::numeric_limits::max(); // lowest() ++ test.b = -std::numeric_limits::max(); // lowest() ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::max()); ++ CHECK_EQ(test.b, std::numeric_limits::max()); ++ ++ // Testing smallest positive number ++ test.a = -std::numeric_limits::min(); ++ test.b = -std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::min()); ++ CHECK_EQ(test.b, std::numeric_limits::min()); ++ ++ // Testing infinity ++ test.a = ++ -std::numeric_limits::max() / std::numeric_limits::min(); ++ test.b = ++ -std::numeric_limits::max() / std::numeric_limits::min(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.a, std::numeric_limits::max() / ++ std::numeric_limits::min()); ++ CHECK_EQ(test.b, std::numeric_limits::max() / ++ std::numeric_limits::min()); ++ ++ test.a = std::numeric_limits::quiet_NaN(); ++ test.b = std::numeric_limits::quiet_NaN(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.a)); ++ CHECK(std::isnan(test.b)); ++ ++ test.a = std::numeric_limits::signaling_NaN(); ++ test.b = std::numeric_limits::signaling_NaN(); ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK(std::isnan(test.a)); ++ CHECK(std::isnan(test.b)); ++} ++ ++template ++struct TestCaseMaddMsub { ++ T fj, fk, fa, fd_fmadd, fd_fmsub, fd_fnmadd, fd_fnmsub; ++}; ++ ++template ++void helper_fmadd_fmsub_fnmadd_fnmsub(F func) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ T x = std::sqrt(static_cast(2.0)); ++ T y = std::sqrt(static_cast(3.0)); ++ T z = std::sqrt(static_cast(5.0)); ++ T x2 = 11.11, y2 = 22.22, z2 = 33.33; ++ // clang-format off ++ TestCaseMaddMsub test_cases[] = { ++ {x, y, z, 0.0, 0.0, 0.0, 0.0}, ++ {x, y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {x, -y, z, 0.0, 0.0, 0.0, 0.0}, ++ {x, -y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, y, z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, -y, z, 0.0, 0.0, 0.0, 0.0}, ++ {-x, -y, -z, 0.0, 0.0, 0.0, 0.0}, ++ {-3.14, 0.2345, -123.000056, 0.0, 0.0, 0.0, 0.0}, ++ {7.3, -23.257, -357.1357, 0.0, 0.0, 0.0, 0.0}, ++ {x2, y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {x2, y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ {x2, -y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, -y2, z2, 0.0, 0.0, 0.0, 0.0}, ++ {-x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0}, ++ }; ++ // clang-format on ++ if (std::is_same::value) { ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, fj))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(TestCaseMaddMsub, fk))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fa))); ++ } else if (std::is_same::value) { ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, fj))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestCaseMaddMsub, fk))); ++ __ Fld_d(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fa))); ++ } else { ++ UNREACHABLE(); ++ } ++ ++ func(assm); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub); ++ TestCaseMaddMsub tc; ++ for (size_t i = 0; i < kTableLength; i++) { ++ tc.fj = test_cases[i].fj; ++ tc.fk = test_cases[i].fk; ++ tc.fa = test_cases[i].fa; ++ ++ f.Call(&tc, 0, 0, 0, 0); ++ ++ T res_fmadd; ++ T res_fmsub; ++ T res_fnmadd; ++ T res_fnmsub; ++ res_fmadd = std::fma(tc.fj, tc.fk, tc.fa); ++ res_fmsub = std::fma(tc.fj, tc.fk, -tc.fa); ++ res_fnmadd = -std::fma(tc.fj, tc.fk, tc.fa); ++ res_fnmsub = -std::fma(tc.fj, tc.fk, -tc.fa); ++ ++ CHECK_EQ(tc.fd_fmadd, res_fmadd); ++ CHECK_EQ(tc.fd_fmsub, res_fmsub); ++ CHECK_EQ(tc.fd_fnmadd, res_fnmadd); ++ CHECK_EQ(tc.fd_fnmsub, res_fnmsub); ++ } ++} ++ ++TEST(FMADD_FMSUB_FNMADD_FNMSUB_S) { ++ helper_fmadd_fmsub_fnmadd_fnmsub([](MacroAssembler& assm) { ++ __ fmadd_s(f11, f8, f9, f10); ++ __ Fst_s(f11, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmadd))); ++ __ fmsub_s(f12, f8, f9, f10); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmsub))); ++ __ fnmadd_s(f13, f8, f9, f10); ++ __ Fst_s(f13, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmadd))); ++ __ fnmsub_s(f14, f8, f9, f10); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmsub))); ++ }); ++} ++ ++TEST(FMADD_FMSUB_FNMADD_FNMSUB_D) { ++ helper_fmadd_fmsub_fnmadd_fnmsub([](MacroAssembler& assm) { ++ __ fmadd_d(f11, f8, f9, f10); ++ __ Fst_d(f11, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmadd))); ++ __ fmsub_d(f12, f8, f9, f10); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fmsub))); ++ __ fnmadd_d(f13, f8, f9, f10); ++ __ Fst_d(f13, ++ MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmadd))); ++ __ fnmsub_d(f14, f8, f9, f10); ++ __ Fst_d(f14, ++ MemOperand(a0, offsetof(TestCaseMaddMsub, fd_fnmsub))); ++ }); ++} ++ ++/* ++TEST(FSQRT_FRSQRT_FRECIP) { ++ const int kTableLength = 4; ++ const double deltaDouble = 2E-15; ++ const float deltaFloat = 2E-7; ++ const float sqrt2_s = sqrt(2); ++ const double sqrt2_d = sqrt(2); ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ struct TestFloat { ++ float a; ++ float resultS1; ++ float resultS2; ++ float resultS3; ++ double b; ++ double resultD1; ++ double resultD2; ++ double resultD3; ++ }; ++ TestFloat test; ++ // clang-format off ++ double inputs_D[kTableLength] = { ++ 0.0L, 4.0L, 2.0L, 4e-28L ++ }; ++ ++ double outputs_D[kTableLength] = { ++ 0.0L, 2.0L, sqrt2_d, 2e-14L ++ }; ++ float inputs_S[kTableLength] = { ++ 0.0, 4.0, 2.0, 4e-28 ++ }; ++ ++ float outputs_S[kTableLength] = { ++ 0.0, 2.0, sqrt2_s, 2e-14 ++ }; ++ // clang-format on ++ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ fsqrt_s(f10, f8); ++ __ fsqrt_d(f11, f9); ++ __ frsqrt_s(f12, f8); ++ __ frsqrt_d(f13, f9); ++ __ frecip_s(f14, f8); ++ __ frecip_d(f15, f9); ++ __ Fst_s(f10, MemOperand(a0, offsetof(TestFloat, resultS1))); ++ __ Fst_d(f11, MemOperand(a0, offsetof(TestFloat, resultD1))); ++ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS2))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD2))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resultS3))); ++ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, resultD3))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ for (int i = 0; i < kTableLength; i++) { ++ float f1; ++ double d1; ++ test.a = inputs_S[i]; ++ test.b = inputs_D[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ ++ CHECK_EQ(test.resultS1, outputs_S[i]); ++ CHECK_EQ(test.resultD1, outputs_D[i]); ++ ++ if (i != 0) { ++ f1 = test.resultS2 - 1.0F/outputs_S[i]; ++ f1 = (f1 < 0) ? f1 : -f1; ++ CHECK(f1 <= deltaFloat); ++ d1 = test.resultD2 - 1.0L/outputs_D[i]; ++ d1 = (d1 < 0) ? d1 : -d1; ++ CHECK(d1 <= deltaDouble); ++ f1 = test.resultS3 - 1.0F/inputs_S[i]; ++ f1 = (f1 < 0) ? f1 : -f1; ++ CHECK(f1 <= deltaFloat); ++ d1 = test.resultD3 - 1.0L/inputs_D[i]; ++ d1 = (d1 < 0) ? d1 : -d1; ++ CHECK(d1 <= deltaDouble); ++ } else { ++ CHECK_EQ(test.resultS2, 1.0F/outputs_S[i]); ++ CHECK_EQ(test.resultD2, 1.0L/outputs_D[i]); ++ CHECK_EQ(test.resultS3, 1.0F/inputs_S[i]); ++ CHECK_EQ(test.resultD3, 1.0L/inputs_D[i]); ++ } ++ } ++}*/ ++ ++TEST(LA15) { ++ // Test chaining of label usages within instructions (issue 1644). ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ Assembler assm(AssemblerOptions{}); ++ ++ Label target; ++ __ beq(a0, a1, &target); ++ __ nop(); ++ __ bne(a0, a1, &target); ++ __ nop(); ++ __ bind(&target); ++ __ nop(); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ f.Call(1, 1, 0, 0, 0); ++} ++ ++TEST(Trampoline) { ++ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; ++ ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ Label done; ++ size_t nr_calls = kMaxBranchOffset / kInstrSize + 5; ++ ++ __ xor_(a2, a2, a2); ++ __ BranchShort(&done, eq, a0, Operand(a1)); ++ for (size_t i = 0; i < nr_calls; ++i) { ++ __ addi_d(a2, a2, 1); ++ } ++ __ bind(&done); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ int64_t res = reinterpret_cast(f.Call(42, 42, 0, 0, 0)); ++ CHECK_EQ(0, res); ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 +diff --git a/deps/v8/test/cctest/test-disasm-loong64.cc b/deps/v8/test/cctest/test-disasm-loong64.cc +new file mode 100644 +index 00000000..8b074659 +--- /dev/null ++++ b/deps/v8/test/cctest/test-disasm-loong64.cc +@@ -0,0 +1,896 @@ ++// Copyright 2012 the V8 project authors. All rights reserved. ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following ++// disclaimer in the documentation and/or other materials provided ++// with the distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived ++// from this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++// ++ ++#include ++#include ++ ++#include "src/init/v8.h" ++ ++#include "src/codegen/macro-assembler.h" ++#include "src/debug/debug.h" ++#include "src/diagnostics/disasm.h" ++#include "src/diagnostics/disassembler.h" ++#include "src/execution/frames-inl.h" ++#include "test/cctest/cctest.h" ++ ++namespace v8 { ++namespace internal { ++ ++bool DisassembleAndCompare(byte* pc, const char* compare_string) { ++ disasm::NameConverter converter; ++ disasm::Disassembler disasm(converter); ++ EmbeddedVector disasm_buffer; ++ ++ /* if (prev_instr_compact_branch) { ++ disasm.InstructionDecode(disasm_buffer, pc); ++ pc += 4; ++ }*/ ++ ++ disasm.InstructionDecode(disasm_buffer, pc); ++ ++ if (strcmp(compare_string, disasm_buffer.begin()) != 0) { ++ fprintf(stderr, ++ "expected: \n" ++ "%s\n" ++ "disassembled: \n" ++ "%s\n\n", ++ compare_string, disasm_buffer.begin()); ++ return false; ++ } ++ return true; ++} ++ ++// Set up V8 to a state where we can at least run the assembler and ++// disassembler. Declare the variables and allocate the data structures used ++// in the rest of the macros. ++#define SET_UP() \ ++ CcTest::InitializeVM(); \ ++ Isolate* isolate = CcTest::i_isolate(); \ ++ HandleScope scope(isolate); \ ++ byte* buffer = reinterpret_cast(malloc(4 * 1024)); \ ++ Assembler assm(AssemblerOptions{}, \ ++ ExternalAssemblerBuffer(buffer, 4 * 1024)); \ ++ bool failure = false; ++ ++// This macro assembles one instruction using the preallocated assembler and ++// disassembles the generated instruction, comparing the output to the expected ++// value. If the comparison fails an error message is printed, but the test ++// continues to run until the end. ++#define COMPARE(asm_, compare_string) \ ++ { \ ++ int pc_offset = assm.pc_offset(); \ ++ byte* progcounter = &buffer[pc_offset]; \ ++ assm.asm_; \ ++ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \ ++ } ++ ++// Verify that all invocations of the COMPARE macro passed successfully. ++// Exit with a failure if at least one of the tests failed. ++#define VERIFY_RUN() \ ++ if (failure) { \ ++ FATAL("LOONG64 Disassembler tests failed.\n"); \ ++ } ++ ++#define COMPARE_PC_REL(asm_, compare_string, offset) \ ++ { \ ++ int pc_offset = assm.pc_offset(); \ ++ byte* progcounter = &buffer[pc_offset]; \ ++ char str_with_address[100]; \ ++ printf("%p\n", static_cast(progcounter)); \ ++ snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \ ++ compare_string, static_cast(progcounter + (offset * 4))); \ ++ assm.asm_; \ ++ if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \ ++ } ++ ++TEST(TypeOp6) { ++ SET_UP(); ++ ++ COMPARE(jirl(ra, t7, 0), "4c000261 jirl ra, t7, 0"); ++ COMPARE(jirl(ra, t7, 32767), "4dfffe61 jirl ra, t7, 32767"); ++ COMPARE(jirl(ra, t7, -32768), "4e000261 jirl ra, t7, -32768"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp6PC) { ++ SET_UP(); ++ ++ COMPARE_PC_REL(beqz(t7, 1048575), "43fffe6f beqz t7, 1048575", ++ 1048575); ++ COMPARE_PC_REL(beqz(t0, -1048576), "40000190 beqz t0, -1048576", ++ -1048576); ++ COMPARE_PC_REL(beqz(t1, 0), "400001a0 beqz t1, 0", 0); ++ ++ COMPARE_PC_REL(bnez(a2, 1048575), "47fffccf bnez a2, 1048575", ++ 1048575); ++ COMPARE_PC_REL(bnez(s3, -1048576), "44000350 bnez s3, -1048576", ++ -1048576); ++ COMPARE_PC_REL(bnez(t8, 0), "44000280 bnez t8, 0", 0); ++ ++ COMPARE_PC_REL(bceqz(FCC0, 1048575), "4bfffc0f bceqz fcc0, 1048575", ++ 1048575); ++ COMPARE_PC_REL(bceqz(FCC0, -1048576), ++ "48000010 bceqz fcc0, -1048576", -1048576); ++ COMPARE_PC_REL(bceqz(FCC0, 0), "48000000 bceqz fcc0, 0", 0); ++ ++ COMPARE_PC_REL(bcnez(FCC0, 1048575), "4bfffd0f bcnez fcc0, 1048575", ++ 1048575); ++ COMPARE_PC_REL(bcnez(FCC0, -1048576), ++ "48000110 bcnez fcc0, -1048576", -1048576); ++ COMPARE_PC_REL(bcnez(FCC0, 0), "48000100 bcnez fcc0, 0", 0); ++ ++ COMPARE_PC_REL(b(33554431), "53fffdff b 33554431", 33554431); ++ COMPARE_PC_REL(b(-33554432), "50000200 b -33554432", -33554432); ++ COMPARE_PC_REL(b(0), "50000000 b 0", 0); ++ ++ COMPARE_PC_REL(beq(t0, a6, 32767), "59fffd8a beq t0, a6, 32767", ++ 32767); ++ COMPARE_PC_REL(beq(t1, a0, -32768), "5a0001a4 beq t1, a0, -32768", ++ -32768); ++ COMPARE_PC_REL(beq(a4, t1, 0), "5800010d beq a4, t1, 0", 0); ++ ++ COMPARE_PC_REL(bne(a3, a4, 32767), "5dfffce8 bne a3, a4, 32767", ++ 32767); ++ COMPARE_PC_REL(bne(a6, a5, -32768), "5e000149 bne a6, a5, -32768", ++ -32768); ++ COMPARE_PC_REL(bne(a4, a5, 0), "5c000109 bne a4, a5, 0", 0); ++ ++ COMPARE_PC_REL(blt(a4, a6, 32767), "61fffd0a blt a4, a6, 32767", ++ 32767); ++ COMPARE_PC_REL(blt(a4, a5, -32768), "62000109 blt a4, a5, -32768", ++ -32768); ++ COMPARE_PC_REL(blt(a4, a6, 0), "6000010a blt a4, a6, 0", 0); ++ ++ COMPARE_PC_REL(bge(s7, a5, 32767), "65ffffc9 bge s7, a5, 32767", ++ 32767); ++ COMPARE_PC_REL(bge(a1, a3, -32768), "660000a7 bge a1, a3, -32768", ++ -32768); ++ COMPARE_PC_REL(bge(a5, s3, 0), "6400013a bge a5, s3, 0", 0); ++ ++ COMPARE_PC_REL(bltu(a5, s7, 32767), "69fffd3e bltu a5, s7, 32767", ++ 32767); ++ COMPARE_PC_REL(bltu(a4, a5, -32768), "6a000109 bltu a4, a5, -32768", ++ -32768); ++ COMPARE_PC_REL(bltu(a4, t6, 0), "68000112 bltu a4, t6, 0", 0); ++ ++ COMPARE_PC_REL(bgeu(a7, a6, 32767), "6dfffd6a bgeu a7, a6, 32767", ++ 32767); ++ COMPARE_PC_REL(bgeu(a5, a3, -32768), "6e000127 bgeu a5, a3, -32768", ++ -32768); ++ COMPARE_PC_REL(bgeu(t2, t1, 0), "6c0001cd bgeu t2, t1, 0", 0); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp7) { ++ SET_UP(); ++ ++ COMPARE(lu12i_w(a4, 524287), "14ffffe8 lu12i.w a4, 524287"); ++ COMPARE(lu12i_w(a5, -524288), "15000009 lu12i.w a5, -524288"); ++ COMPARE(lu12i_w(a6, 0), "1400000a lu12i.w a6, 0"); ++ ++ COMPARE(lu32i_d(a7, 524287), "16ffffeb lu32i.d a7, 524287"); ++ COMPARE(lu32i_d(t0, 524288), "1700000c lu32i.d t0, -524288"); ++ COMPARE(lu32i_d(t1, 0), "1600000d lu32i.d t1, 0"); ++ ++ COMPARE(pcaddi(t1, 1), "1800002d pcaddi t1, 1"); ++ COMPARE(pcaddi(t2, 524287), "18ffffee pcaddi t2, 524287"); ++ COMPARE(pcaddi(t3, -524288), "1900000f pcaddi t3, -524288"); ++ COMPARE(pcaddi(t4, 0), "18000010 pcaddi t4, 0"); ++ ++ COMPARE(pcalau12i(t5, 524287), "1afffff1 pcalau12i t5, 524287"); ++ COMPARE(pcalau12i(t6, -524288), "1b000012 pcalau12i t6, -524288"); ++ COMPARE(pcalau12i(a4, 0), "1a000008 pcalau12i a4, 0"); ++ ++ COMPARE(pcaddu12i(a5, 524287), "1cffffe9 pcaddu12i a5, 524287"); ++ COMPARE(pcaddu12i(a6, -524288), "1d00000a pcaddu12i a6, -524288"); ++ COMPARE(pcaddu12i(a7, 0), "1c00000b pcaddu12i a7, 0"); ++ ++ COMPARE(pcaddu18i(t0, 524287), "1effffec pcaddu18i t0, 524287"); ++ COMPARE(pcaddu18i(t1, -524288), "1f00000d pcaddu18i t1, -524288"); ++ COMPARE(pcaddu18i(t2, 0), "1e00000e pcaddu18i t2, 0"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp8) { ++ SET_UP(); ++ ++ COMPARE(ll_w(t2, t3, 32764), "207ffdee ll.w t2, t3, 32764"); ++ COMPARE(ll_w(t3, t4, -32768), "2080020f ll.w t3, t4, -32768"); ++ COMPARE(ll_w(t5, t6, 0), "20000251 ll.w t5, t6, 0"); ++ ++ COMPARE(sc_w(a6, a7, 32764), "217ffd6a sc.w a6, a7, 32764"); ++ COMPARE(sc_w(t0, t1, -32768), "218001ac sc.w t0, t1, -32768"); ++ COMPARE(sc_w(t2, t3, 0), "210001ee sc.w t2, t3, 0"); ++ ++ COMPARE(ll_d(a0, a1, 32764), "227ffca4 ll.d a0, a1, 32764"); ++ COMPARE(ll_d(a2, a3, -32768), "228000e6 ll.d a2, a3, -32768"); ++ COMPARE(ll_d(a4, a5, 0), "22000128 ll.d a4, a5, 0"); ++ ++ COMPARE(sc_d(t4, t5, 32764), "237ffe30 sc.d t4, t5, 32764"); ++ COMPARE(sc_d(t6, a0, -32768), "23800092 sc.d t6, a0, -32768"); ++ COMPARE(sc_d(a1, a2, 0), "230000c5 sc.d a1, a2, 0"); ++ ++ COMPARE(ldptr_w(a4, a5, 32764), "247ffd28 ldptr.w a4, a5, 32764"); ++ COMPARE(ldptr_w(a6, a7, -32768), "2480016a ldptr.w a6, a7, -32768"); ++ COMPARE(ldptr_w(t0, t1, 0), "240001ac ldptr.w t0, t1, 0"); ++ ++ COMPARE(stptr_w(a4, a5, 32764), "257ffd28 stptr.w a4, a5, 32764"); ++ COMPARE(stptr_w(a6, a7, -32768), "2580016a stptr.w a6, a7, -32768"); ++ COMPARE(stptr_w(t0, t1, 0), "250001ac stptr.w t0, t1, 0"); ++ ++ COMPARE(ldptr_d(t2, t3, 32764), "267ffdee ldptr.d t2, t3, 32764"); ++ COMPARE(ldptr_d(t4, t5, -32768), "26800230 ldptr.d t4, t5, -32768"); ++ COMPARE(ldptr_d(t6, a4, 0), "26000112 ldptr.d t6, a4, 0"); ++ ++ COMPARE(stptr_d(a5, a6, 32764), "277ffd49 stptr.d a5, a6, 32764"); ++ COMPARE(stptr_d(a7, t0, -32768), "2780018b stptr.d a7, t0, -32768"); ++ COMPARE(stptr_d(t1, t2, 0), "270001cd stptr.d t1, t2, 0"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp10) { ++ SET_UP(); ++ ++ COMPARE(bstrins_w(a4, a5, 31, 16), ++ "007f4128 bstrins.w a4, a5, 31, 16"); ++ COMPARE(bstrins_w(a6, a7, 5, 0), "0065016a bstrins.w a6, a7, 5, 0"); ++ ++ COMPARE(bstrins_d(a3, zero_reg, 17, 0), ++ "00910007 bstrins.d a3, zero_reg, 17, 0"); ++ COMPARE(bstrins_d(t1, zero_reg, 17, 0), ++ "0091000d bstrins.d t1, zero_reg, 17, 0"); ++ ++ COMPARE(bstrpick_w(t0, t1, 31, 29), ++ "007ff5ac bstrpick.w t0, t1, 31, 29"); ++ COMPARE(bstrpick_w(a4, a5, 16, 0), ++ "00708128 bstrpick.w a4, a5, 16, 0"); ++ ++ COMPARE(bstrpick_d(a5, a5, 31, 0), ++ "00df0129 bstrpick.d a5, a5, 31, 0"); ++ COMPARE(bstrpick_d(a4, a4, 25, 2), ++ "00d90908 bstrpick.d a4, a4, 25, 2"); ++ ++ COMPARE(slti(t2, a5, 2047), "021ffd2e slti t2, a5, 2047"); ++ COMPARE(slti(a7, a1, -2048), "022000ab slti a7, a1, -2048"); ++ ++ COMPARE(sltui(a7, a7, 2047), "025ffd6b sltui a7, a7, 2047"); ++ COMPARE(sltui(t1, t1, -2048), "026001ad sltui t1, t1, -2048"); ++ ++ COMPARE(addi_w(t0, t2, 2047), "029ffdcc addi.w t0, t2, 2047"); ++ COMPARE(addi_w(a0, a0, -2048), "02a00084 addi.w a0, a0, -2048"); ++ ++ COMPARE(addi_d(a0, zero_reg, 2047), ++ "02dffc04 addi.d a0, zero_reg, 2047"); ++ COMPARE(addi_d(t7, t7, -2048), "02e00273 addi.d t7, t7, -2048"); ++ ++ COMPARE(lu52i_d(a0, a0, 2047), "031ffc84 lu52i.d a0, a0, 2047"); ++ COMPARE(lu52i_d(a1, a1, -2048), "032000a5 lu52i.d a1, a1, -2048"); ++ ++ COMPARE(andi(s3, a3, 0xfff), "037ffcfa andi s3, a3, 0xfff"); ++ COMPARE(andi(a4, a4, 0), "03400108 andi a4, a4, 0x0"); ++ ++ COMPARE(ori(t6, t6, 0xfff), "03bffe52 ori t6, t6, 0xfff"); ++ COMPARE(ori(t6, t6, 0), "03800252 ori t6, t6, 0x0"); ++ ++ COMPARE(xori(t1, t1, 0xfff), "03fffdad xori t1, t1, 0xfff"); ++ COMPARE(xori(a3, a3, 0x0), "03c000e7 xori a3, a3, 0x0"); ++ ++ COMPARE(ld_b(a1, a1, 2047), "281ffca5 ld.b a1, a1, 2047"); ++ COMPARE(ld_b(a4, a4, -2048), "28200108 ld.b a4, a4, -2048"); ++ ++ COMPARE(ld_h(a4, a0, 2047), "285ffc88 ld.h a4, a0, 2047"); ++ COMPARE(ld_h(a4, a3, -2048), "286000e8 ld.h a4, a3, -2048"); ++ ++ COMPARE(ld_w(a6, a6, 2047), "289ffd4a ld.w a6, a6, 2047"); ++ COMPARE(ld_w(a5, a4, -2048), "28a00109 ld.w a5, a4, -2048"); ++ ++ COMPARE(ld_d(a0, a3, 2047), "28dffce4 ld.d a0, a3, 2047"); ++ COMPARE(ld_d(a6, fp, -2048), "28e002ca ld.d a6, fp, -2048"); ++ COMPARE(ld_d(a0, a6, 0), "28c00144 ld.d a0, a6, 0"); ++ ++ COMPARE(st_b(a4, a0, 2047), "291ffc88 st.b a4, a0, 2047"); ++ COMPARE(st_b(a6, a5, -2048), "2920012a st.b a6, a5, -2048"); ++ ++ COMPARE(st_h(a4, a0, 2047), "295ffc88 st.h a4, a0, 2047"); ++ COMPARE(st_h(t1, t2, -2048), "296001cd st.h t1, t2, -2048"); ++ ++ COMPARE(st_w(t3, a4, 2047), "299ffd0f st.w t3, a4, 2047"); ++ COMPARE(st_w(a3, t2, -2048), "29a001c7 st.w a3, t2, -2048"); ++ ++ COMPARE(st_d(s3, sp, 2047), "29dffc7a st.d s3, sp, 2047"); ++ COMPARE(st_d(fp, s6, -2048), "29e003b6 st.d fp, s6, -2048"); ++ ++ COMPARE(ld_bu(a6, a0, 2047), "2a1ffc8a ld.bu a6, a0, 2047"); ++ COMPARE(ld_bu(a7, a7, -2048), "2a20016b ld.bu a7, a7, -2048"); ++ ++ COMPARE(ld_hu(a7, a7, 2047), "2a5ffd6b ld.hu a7, a7, 2047"); ++ COMPARE(ld_hu(a3, a3, -2048), "2a6000e7 ld.hu a3, a3, -2048"); ++ ++ COMPARE(ld_wu(a3, a0, 2047), "2a9ffc87 ld.wu a3, a0, 2047"); ++ COMPARE(ld_wu(a3, a5, -2048), "2aa00127 ld.wu a3, a5, -2048"); ++ ++ COMPARE(fld_s(f0, a3, 2047), "2b1ffce0 fld.s f0, a3, 2047"); ++ COMPARE(fld_s(f0, a1, -2048), "2b2000a0 fld.s f0, a1, -2048"); ++ ++ COMPARE(fld_d(f0, a0, 2047), "2b9ffc80 fld.d f0, a0, 2047"); ++ COMPARE(fld_d(f0, fp, -2048), "2ba002c0 fld.d f0, fp, -2048"); ++ ++ COMPARE(fst_d(f0, fp, 2047), "2bdffec0 fst.d f0, fp, 2047"); ++ COMPARE(fst_d(f0, a0, -2048), "2be00080 fst.d f0, a0, -2048"); ++ ++ COMPARE(fst_s(f0, a5, 2047), "2b5ffd20 fst.s f0, a5, 2047"); ++ COMPARE(fst_s(f0, a3, -2048), "2b6000e0 fst.s f0, a3, -2048"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp12) { ++ SET_UP(); ++ ++ COMPARE(fmadd_s(f0, f1, f2, f3), "08118820 fmadd.s f0, f1, f2, f3"); ++ COMPARE(fmadd_s(f4, f5, f6, f7), "081398a4 fmadd.s f4, f5, f6, f7"); ++ ++ COMPARE(fmadd_d(f8, f9, f10, f11), ++ "0825a928 fmadd.d f8, f9, f10, f11"); ++ COMPARE(fmadd_d(f12, f13, f14, f15), ++ "0827b9ac fmadd.d f12, f13, f14, f15"); ++ ++ COMPARE(fmsub_s(f0, f1, f2, f3), "08518820 fmsub.s f0, f1, f2, f3"); ++ COMPARE(fmsub_s(f4, f5, f6, f7), "085398a4 fmsub.s f4, f5, f6, f7"); ++ ++ COMPARE(fmsub_d(f8, f9, f10, f11), ++ "0865a928 fmsub.d f8, f9, f10, f11"); ++ COMPARE(fmsub_d(f12, f13, f14, f15), ++ "0867b9ac fmsub.d f12, f13, f14, f15"); ++ ++ COMPARE(fnmadd_s(f0, f1, f2, f3), ++ "08918820 fnmadd.s f0, f1, f2, f3"); ++ COMPARE(fnmadd_s(f4, f5, f6, f7), ++ "089398a4 fnmadd.s f4, f5, f6, f7"); ++ ++ COMPARE(fnmadd_d(f8, f9, f10, f11), ++ "08a5a928 fnmadd.d f8, f9, f10, f11"); ++ COMPARE(fnmadd_d(f12, f13, f14, f15), ++ "08a7b9ac fnmadd.d f12, f13, f14, f15"); ++ ++ COMPARE(fnmsub_s(f0, f1, f2, f3), ++ "08d18820 fnmsub.s f0, f1, f2, f3"); ++ COMPARE(fnmsub_s(f4, f5, f6, f7), ++ "08d398a4 fnmsub.s f4, f5, f6, f7"); ++ ++ COMPARE(fnmsub_d(f8, f9, f10, f11), ++ "08e5a928 fnmsub.d f8, f9, f10, f11"); ++ COMPARE(fnmsub_d(f12, f13, f14, f15), ++ "08e7b9ac fnmsub.d f12, f13, f14, f15"); ++ ++ COMPARE(fcmp_cond_s(CAF, f1, f2, FCC0), ++ "0c100820 fcmp.caf.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(CUN, f5, f6, FCC0), ++ "0c1418a0 fcmp.cun.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(CEQ, f9, f10, FCC0), ++ "0c122920 fcmp.ceq.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(CUEQ, f13, f14, FCC0), ++ "0c1639a0 fcmp.cueq.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(CLT, f1, f2, FCC0), ++ "0c110820 fcmp.clt.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(CULT, f5, f6, FCC0), ++ "0c1518a0 fcmp.cult.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(CLE, f9, f10, FCC0), ++ "0c132920 fcmp.cle.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(CULE, f13, f14, FCC0), ++ "0c1739a0 fcmp.cule.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(CNE, f1, f2, FCC0), ++ "0c180820 fcmp.cne.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(COR, f5, f6, FCC0), ++ "0c1a18a0 fcmp.cor.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(CUNE, f9, f10, FCC0), ++ "0c1c2920 fcmp.cune.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(SAF, f13, f14, FCC0), ++ "0c10b9a0 fcmp.saf.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(SUN, f1, f2, FCC0), ++ "0c148820 fcmp.sun.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(SEQ, f5, f6, FCC0), ++ "0c1298a0 fcmp.seq.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(SUEQ, f9, f10, FCC0), ++ "0c16a920 fcmp.sueq.s fcc0, f9, f10"); ++ // COMPARE(fcmp_cond_s(SLT, f13, f14, FCC0), ++ // "0c11b9a0 fcmp.slt.s fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_s(SULT, f1, f2, FCC0), ++ "0c158820 fcmp.sult.s fcc0, f1, f2"); ++ COMPARE(fcmp_cond_s(SLE, f5, f6, FCC0), ++ "0c1398a0 fcmp.sle.s fcc0, f5, f6"); ++ COMPARE(fcmp_cond_s(SULE, f9, f10, FCC0), ++ "0c17a920 fcmp.sule.s fcc0, f9, f10"); ++ COMPARE(fcmp_cond_s(SNE, f13, f14, FCC0), ++ "0c18b9a0 fcmp.sne.s fcc0, f13, f14"); ++ COMPARE(fcmp_cond_s(SOR, f13, f14, FCC0), ++ "0c1ab9a0 fcmp.sor.s fcc0, f13, f14"); ++ COMPARE(fcmp_cond_s(SUNE, f1, f2, FCC0), ++ "0c1c8820 fcmp.sune.s fcc0, f1, f2"); ++ ++ COMPARE(fcmp_cond_d(CAF, f1, f2, FCC0), ++ "0c200820 fcmp.caf.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(CUN, f5, f6, FCC0), ++ "0c2418a0 fcmp.cun.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(CEQ, f9, f10, FCC0), ++ "0c222920 fcmp.ceq.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(CUEQ, f13, f14, FCC0), ++ "0c2639a0 fcmp.cueq.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(CLT, f1, f2, FCC0), ++ "0c210820 fcmp.clt.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(CULT, f5, f6, FCC0), ++ "0c2518a0 fcmp.cult.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(CLE, f9, f10, FCC0), ++ "0c232920 fcmp.cle.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(CULE, f13, f14, FCC0), ++ "0c2739a0 fcmp.cule.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(CNE, f1, f2, FCC0), ++ "0c280820 fcmp.cne.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(COR, f5, f6, FCC0), ++ "0c2a18a0 fcmp.cor.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(CUNE, f9, f10, FCC0), ++ "0c2c2920 fcmp.cune.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(SAF, f13, f14, FCC0), ++ "0c20b9a0 fcmp.saf.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(SUN, f1, f2, FCC0), ++ "0c248820 fcmp.sun.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(SEQ, f5, f6, FCC0), ++ "0c2298a0 fcmp.seq.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(SUEQ, f9, f10, FCC0), ++ "0c26a920 fcmp.sueq.d fcc0, f9, f10"); ++ // COMPARE(fcmp_cond_d(SLT, f13, f14, FCC0), ++ // "0c21b9a0 fcmp.slt.d fcc0, f13, f14"); ++ ++ COMPARE(fcmp_cond_d(SULT, f1, f2, FCC0), ++ "0c258820 fcmp.sult.d fcc0, f1, f2"); ++ COMPARE(fcmp_cond_d(SLE, f5, f6, FCC0), ++ "0c2398a0 fcmp.sle.d fcc0, f5, f6"); ++ COMPARE(fcmp_cond_d(SULE, f9, f10, FCC0), ++ "0c27a920 fcmp.sule.d fcc0, f9, f10"); ++ COMPARE(fcmp_cond_d(SNE, f13, f14, FCC0), ++ "0c28b9a0 fcmp.sne.d fcc0, f13, f14"); ++ COMPARE(fcmp_cond_d(SOR, f13, f14, FCC0), ++ "0c2ab9a0 fcmp.sor.d fcc0, f13, f14"); ++ COMPARE(fcmp_cond_d(SUNE, f1, f2, FCC0), ++ "0c2c8820 fcmp.sune.d fcc0, f1, f2"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp14) { ++ SET_UP(); ++ ++ COMPARE(alsl_w(a0, a1, a2, 1), "000418a4 alsl.w a0, a1, a2, 1"); ++ COMPARE(alsl_w(a3, a4, a5, 3), "00052507 alsl.w a3, a4, a5, 3"); ++ COMPARE(alsl_w(a6, a7, t0, 4), "0005b16a alsl.w a6, a7, t0, 4"); ++ ++ COMPARE(alsl_wu(t1, t2, t3, 1), "00063dcd alsl.wu t1, t2, t3, 1"); ++ COMPARE(alsl_wu(t4, t5, t6, 3), "00074a30 alsl.wu t4, t5, t6, 3"); ++ COMPARE(alsl_wu(a0, a1, a2, 4), "000798a4 alsl.wu a0, a1, a2, 4"); ++ ++ COMPARE(alsl_d(a3, a4, a5, 1), "002c2507 alsl.d a3, a4, a5, 1"); ++ COMPARE(alsl_d(a6, a7, t0, 3), "002d316a alsl.d a6, a7, t0, 3"); ++ COMPARE(alsl_d(t1, t2, t3, 4), "002dbdcd alsl.d t1, t2, t3, 4"); ++ ++ COMPARE(bytepick_w(t4, t5, t6, 0), ++ "00084a30 bytepick.w t4, t5, t6, 0"); ++ COMPARE(bytepick_w(a0, a1, a2, 3), ++ "000998a4 bytepick.w a0, a1, a2, 3"); ++ ++ COMPARE(bytepick_d(a6, a7, t0, 0), ++ "000c316a bytepick.d a6, a7, t0, 0"); ++ COMPARE(bytepick_d(t4, t5, t6, 7), ++ "000fca30 bytepick.d t4, t5, t6, 7"); ++ ++ COMPARE(slli_w(a3, a3, 31), "0040fce7 slli.w a3, a3, 31"); ++ COMPARE(slli_w(a6, a6, 1), "0040854a slli.w a6, a6, 1"); ++ ++ COMPARE(slli_d(t3, t2, 63), "0041fdcf slli.d t3, t2, 63"); ++ COMPARE(slli_d(t4, a6, 1), "00410550 slli.d t4, a6, 1"); ++ ++ COMPARE(srli_w(a7, a7, 31), "0044fd6b srli.w a7, a7, 31"); ++ COMPARE(srli_w(a4, a4, 1), "00448508 srli.w a4, a4, 1"); ++ ++ COMPARE(srli_d(a4, a3, 63), "0045fce8 srli.d a4, a3, 63"); ++ COMPARE(srli_d(a4, a4, 1), "00450508 srli.d a4, a4, 1"); ++ ++ COMPARE(srai_d(a0, a0, 63), "0049fc84 srai.d a0, a0, 63"); ++ COMPARE(srai_d(a4, a1, 1), "004904a8 srai.d a4, a1, 1"); ++ ++ COMPARE(srai_w(s4, a3, 31), "0048fcfb srai.w s4, a3, 31"); ++ COMPARE(srai_w(s4, a5, 1), "0048853b srai.w s4, a5, 1"); ++ ++ COMPARE(rotri_d(t7, t6, 1), "004d0653 rotri.d t7, t6, 1"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp17) { ++ SET_UP(); ++ ++ COMPARE(sltu(t5, t4, a4), "0012a211 sltu t5, t4, a4"); ++ COMPARE(sltu(t4, zero_reg, t4), "0012c010 sltu t4, zero_reg, t4"); ++ ++ COMPARE(add_w(a4, a4, a6), "00102908 add.w a4, a4, a6"); ++ COMPARE(add_w(a5, a6, t3), "00103d49 add.w a5, a6, t3"); ++ ++ COMPARE(add_d(a4, t0, t1), "0010b588 add.d a4, t0, t1"); ++ COMPARE(add_d(a6, a3, t1), "0010b4ea add.d a6, a3, t1"); ++ ++ COMPARE(sub_w(a7, a7, a2), "0011196b sub.w a7, a7, a2"); ++ COMPARE(sub_w(a2, a2, s3), "001168c6 sub.w a2, a2, s3"); ++ ++ COMPARE(sub_d(s3, ra, s3), "0011e83a sub.d s3, ra, s3"); ++ COMPARE(sub_d(a0, a1, a2), "001198a4 sub.d a0, a1, a2"); ++ ++ COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6"); ++ COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4"); ++ ++ COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3"); ++ COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5"); ++ ++ COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0"); ++ COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3"); ++ ++ COMPARE(or_(s3, sp, zero_reg), "0015007a or s3, sp, zero_reg"); ++ COMPARE(or_(a4, a0, zero_reg), "00150088 or a4, a0, zero_reg"); ++ ++ COMPARE(and_(sp, sp, t6), "0014c863 and sp, sp, t6"); ++ COMPARE(and_(a3, a3, a7), "0014ace7 and a3, a3, a7"); ++ ++ COMPARE(nor(a7, a7, a7), "00142d6b nor a7, a7, a7"); ++ COMPARE(nor(t4, t5, t6), "00144a30 nor t4, t5, t6"); ++ ++ COMPARE(xor_(a0, a1, a2), "001598a4 xor a0, a1, a2"); ++ COMPARE(xor_(a3, a4, a5), "0015a507 xor a3, a4, a5"); ++ ++ COMPARE(orn(a6, a7, t0), "0016316a orn a6, a7, t0"); ++ COMPARE(orn(t1, t2, t3), "00163dcd orn t1, t2, t3"); ++ ++ COMPARE(andn(t4, t5, t6), "0016ca30 andn t4, t5, t6"); ++ COMPARE(andn(a0, a1, a2), "001698a4 andn a0, a1, a2"); ++ ++ COMPARE(sll_w(a3, t0, a7), "00172d87 sll.w a3, t0, a7"); ++ COMPARE(sll_w(a3, a4, a3), "00171d07 sll.w a3, a4, a3"); ++ ++ COMPARE(srl_w(a3, a4, a3), "00179d07 srl.w a3, a4, a3"); ++ COMPARE(srl_w(a3, t1, t4), "0017c1a7 srl.w a3, t1, t4"); ++ ++ COMPARE(sra_w(a4, t4, a4), "00182208 sra.w a4, t4, a4"); ++ COMPARE(sra_w(a3, t1, a6), "001829a7 sra.w a3, t1, a6"); ++ ++ COMPARE(sll_d(a3, a1, a3), "00189ca7 sll.d a3, a1, a3"); ++ COMPARE(sll_d(a7, a4, t0), "0018b10b sll.d a7, a4, t0"); ++ ++ COMPARE(srl_d(a7, a7, t0), "0019316b srl.d a7, a7, t0"); ++ COMPARE(srl_d(t0, a6, t0), "0019314c srl.d t0, a6, t0"); ++ ++ COMPARE(sra_d(a3, a4, a5), "0019a507 sra.d a3, a4, a5"); ++ COMPARE(sra_d(a6, a7, t0), "0019b16a sra.d a6, a7, t0"); ++ ++ COMPARE(rotr_d(t1, t2, t3), "001bbdcd rotr.d t1, t2, t3"); ++ COMPARE(rotr_d(t4, t5, t6), "001bca30 rotr.d t4, t5, t6"); ++ ++ COMPARE(rotr_w(a0, a1, a2), "001b18a4 rotr.w a0, a1, a2"); ++ COMPARE(rotr_w(a3, a4, a5), "001b2507 rotr.w a3, a4, a5"); ++ ++ COMPARE(mul_w(t8, a5, t7), "001c4d34 mul.w t8, a5, t7"); ++ COMPARE(mul_w(t4, t5, t6), "001c4a30 mul.w t4, t5, t6"); ++ ++ COMPARE(mulh_w(s3, a3, t7), "001cccfa mulh.w s3, a3, t7"); ++ COMPARE(mulh_w(a0, a1, a2), "001c98a4 mulh.w a0, a1, a2"); ++ ++ COMPARE(mulh_wu(a6, a7, t0), "001d316a mulh.wu a6, a7, t0"); ++ COMPARE(mulh_wu(t1, t2, t3), "001d3dcd mulh.wu t1, t2, t3"); ++ ++ COMPARE(mul_d(t2, a5, t1), "001db52e mul.d t2, a5, t1"); ++ COMPARE(mul_d(a4, a4, a5), "001da508 mul.d a4, a4, a5"); ++ ++ COMPARE(mulh_d(a3, a4, a5), "001e2507 mulh.d a3, a4, a5"); ++ COMPARE(mulh_d(a6, a7, t0), "001e316a mulh.d a6, a7, t0"); ++ ++ COMPARE(mulh_du(t1, t2, t3), "001ebdcd mulh.du t1, t2, t3"); ++ COMPARE(mulh_du(t4, t5, t6), "001eca30 mulh.du t4, t5, t6"); ++ ++ COMPARE(mulw_d_w(a0, a1, a2), "001f18a4 mulw.d.w a0, a1, a2"); ++ COMPARE(mulw_d_w(a3, a4, a5), "001f2507 mulw.d.w a3, a4, a5"); ++ ++ COMPARE(mulw_d_wu(a6, a7, t0), "001fb16a mulw.d.wu a6, a7, t0"); ++ COMPARE(mulw_d_wu(t1, t2, t3), "001fbdcd mulw.d.wu t1, t2, t3"); ++ ++ COMPARE(div_w(a5, a5, a3), "00201d29 div.w a5, a5, a3"); ++ COMPARE(div_w(t4, t5, t6), "00204a30 div.w t4, t5, t6"); ++ ++ COMPARE(mod_w(a6, t3, a6), "0020a9ea mod.w a6, t3, a6"); ++ COMPARE(mod_w(a3, a4, a3), "00209d07 mod.w a3, a4, a3"); ++ ++ COMPARE(div_wu(t1, t2, t3), "00213dcd div.wu t1, t2, t3"); ++ COMPARE(div_wu(t4, t5, t6), "00214a30 div.wu t4, t5, t6"); ++ ++ COMPARE(mod_wu(a0, a1, a2), "002198a4 mod.wu a0, a1, a2"); ++ COMPARE(mod_wu(a3, a4, a5), "0021a507 mod.wu a3, a4, a5"); ++ ++ COMPARE(div_d(t0, t0, a6), "0022298c div.d t0, t0, a6"); ++ COMPARE(div_d(a7, a7, a5), "0022256b div.d a7, a7, a5"); ++ ++ COMPARE(mod_d(a6, a7, t0), "0022b16a mod.d a6, a7, t0"); ++ COMPARE(mod_d(t1, t2, t3), "0022bdcd mod.d t1, t2, t3"); ++ ++ COMPARE(div_du(t4, t5, t6), "00234a30 div.du t4, t5, t6"); ++ COMPARE(div_du(a0, a1, a2), "002318a4 div.du a0, a1, a2"); ++ ++ COMPARE(mod_du(a3, a4, a5), "0023a507 mod.du a3, a4, a5"); ++ COMPARE(mod_du(a6, a7, t0), "0023b16a mod.du a6, a7, t0"); ++ ++ COMPARE(fadd_s(f3, f4, f5), "01009483 fadd.s f3, f4, f5"); ++ COMPARE(fadd_s(f6, f7, f8), "0100a0e6 fadd.s f6, f7, f8"); ++ ++ COMPARE(fadd_d(f0, f1, f0), "01010020 fadd.d f0, f1, f0"); ++ COMPARE(fadd_d(f0, f1, f2), "01010820 fadd.d f0, f1, f2"); ++ ++ COMPARE(fsub_s(f9, f10, f11), "0102ad49 fsub.s f9, f10, f11"); ++ COMPARE(fsub_s(f12, f13, f14), "0102b9ac fsub.s f12, f13, f14"); ++ ++ COMPARE(fsub_d(f30, f0, f30), "0103781e fsub.d f30, f0, f30"); ++ COMPARE(fsub_d(f0, f0, f1), "01030400 fsub.d f0, f0, f1"); ++ ++ COMPARE(fmul_s(f15, f16, f17), "0104c60f fmul.s f15, f16, f17"); ++ COMPARE(fmul_s(f18, f19, f20), "0104d272 fmul.s f18, f19, f20"); ++ ++ COMPARE(fmul_d(f0, f0, f1), "01050400 fmul.d f0, f0, f1"); ++ COMPARE(fmul_d(f0, f0, f0), "01050000 fmul.d f0, f0, f0"); ++ ++ COMPARE(fdiv_s(f0, f1, f2), "01068820 fdiv.s f0, f1, f2"); ++ COMPARE(fdiv_s(f3, f4, f5), "01069483 fdiv.s f3, f4, f5"); ++ ++ COMPARE(fdiv_d(f0, f0, f1), "01070400 fdiv.d f0, f0, f1"); ++ COMPARE(fdiv_d(f0, f1, f0), "01070020 fdiv.d f0, f1, f0"); ++ ++ COMPARE(fmax_s(f9, f10, f11), "0108ad49 fmax.s f9, f10, f11"); ++ COMPARE(fmin_s(f6, f7, f8), "010aa0e6 fmin.s f6, f7, f8"); ++ ++ COMPARE(fmax_d(f0, f1, f0), "01090020 fmax.d f0, f1, f0"); ++ COMPARE(fmin_d(f0, f1, f0), "010b0020 fmin.d f0, f1, f0"); ++ ++ COMPARE(fmaxa_s(f12, f13, f14), "010cb9ac fmaxa.s f12, f13, f14"); ++ COMPARE(fmina_s(f15, f16, f17), "010ec60f fmina.s f15, f16, f17"); ++ ++ COMPARE(fmaxa_d(f18, f19, f20), "010d5272 fmaxa.d f18, f19, f20"); ++ COMPARE(fmina_d(f0, f1, f2), "010f0820 fmina.d f0, f1, f2"); ++ ++ COMPARE(ldx_b(a0, a1, a2), "380018a4 ldx.b a0, a1, a2"); ++ COMPARE(ldx_h(a3, a4, a5), "38042507 ldx.h a3, a4, a5"); ++ COMPARE(ldx_w(a6, a7, t0), "3808316a ldx.w a6, a7, t0"); ++ ++ COMPARE(stx_b(t1, t2, t3), "38103dcd stx.b t1, t2, t3"); ++ COMPARE(stx_h(t4, t5, t6), "38144a30 stx.h t4, t5, t6"); ++ COMPARE(stx_w(a0, a1, a2), "381818a4 stx.w a0, a1, a2"); ++ ++ COMPARE(ldx_bu(a3, a4, a5), "38202507 ldx.bu a3, a4, a5"); ++ COMPARE(ldx_hu(a6, a7, t0), "3824316a ldx.hu a6, a7, t0"); ++ COMPARE(ldx_wu(t1, t2, t3), "38283dcd ldx.wu t1, t2, t3"); ++ ++ COMPARE(ldx_d(a2, s6, t6), "380c4ba6 ldx.d a2, s6, t6"); ++ COMPARE(ldx_d(t7, s6, t6), "380c4bb3 ldx.d t7, s6, t6"); ++ ++ COMPARE(stx_d(a4, a3, t6), "381c48e8 stx.d a4, a3, t6"); ++ COMPARE(stx_d(a0, a3, t6), "381c48e4 stx.d a0, a3, t6"); ++ ++ COMPARE(dbar(0), "38720000 dbar 0x0(0)"); ++ COMPARE(ibar(5555), "387295b3 ibar 0x15b3(5555)"); ++ ++ COMPARE(break_(0), "002a0000 break code: 0x0(0)"); ++ COMPARE(break_(0x3fc0), "002a3fc0 break code: 0x3fc0(16320)"); ++ ++ COMPARE(fldx_s(f3, a4, a5), "38302503 fldx.s f3, a4, a5"); ++ COMPARE(fldx_d(f6, a7, t0), "38343166 fldx.d f6, a7, t0"); ++ ++ COMPARE(fstx_s(f1, t2, t3), "38383dc1 fstx.s f1, t2, t3"); ++ COMPARE(fstx_d(f4, t5, t6), "383c4a24 fstx.d f4, t5, t6"); ++ ++ COMPARE(amswap_w(a4, a5, a6), "38602548 amswap.w a4, a5, a6"); ++ COMPARE(amswap_d(a7, t0, t1), "3860b1ab amswap.d a7, t0, t1"); ++ ++ COMPARE(amadd_w(t2, t3, t4), "38613e0e amadd.w t2, t3, t4"); ++ COMPARE(amadd_d(t5, t6, a0), "3861c891 amadd.d t5, t6, a0"); ++ ++ COMPARE(amand_w(a1, a2, a3), "386218e5 amand.w a1, a2, a3"); ++ COMPARE(amand_d(a4, a5, a6), "3862a548 amand.d a4, a5, a6"); ++ ++ COMPARE(amor_w(a7, t0, t1), "386331ab amor.w a7, t0, t1"); ++ COMPARE(amor_d(t2, t3, t4), "3863be0e amor.d t2, t3, t4"); ++ ++ COMPARE(amxor_w(t5, t6, a0), "38644891 amxor.w t5, t6, a0"); ++ COMPARE(amxor_d(a1, a2, a3), "386498e5 amxor.d a1, a2, a3"); ++ ++ COMPARE(ammax_w(a4, a5, a6), "38652548 ammax.w a4, a5, a6"); ++ COMPARE(ammax_d(a7, t0, t1), "3865b1ab ammax.d a7, t0, t1"); ++ ++ COMPARE(ammin_w(t2, t3, t4), "38663e0e ammin.w t2, t3, t4"); ++ COMPARE(ammin_d(t5, t6, a0), "3866c891 ammin.d t5, t6, a0"); ++ ++ COMPARE(ammax_wu(a1, a2, a3), "386718e5 ammax.wu a1, a2, a3"); ++ COMPARE(ammax_du(a4, a5, a6), "3867a548 ammax.du a4, a5, a6"); ++ ++ COMPARE(ammin_wu(a7, t0, t1), "386831ab ammin.wu a7, t0, t1"); ++ COMPARE(ammin_du(t2, t3, t4), "3868be0e ammin.du t2, t3, t4"); ++ ++ COMPARE(ammax_db_d(a0, a1, a2), "386e94c4 ammax_db.d a0, a1, a2"); ++ COMPARE(ammax_db_du(a3, a4, a5), "3870a127 ammax_db.du a3, a4, a5"); ++ ++ COMPARE(ammax_db_w(a6, a7, t0), "386e2d8a ammax_db.w a6, a7, t0"); ++ COMPARE(ammax_db_wu(t1, t2, t3), "387039ed ammax_db.wu t1, t2, t3"); ++ ++ COMPARE(ammin_db_d(t4, t5, t6), "386fc650 ammin_db.d t4, t5, t6"); ++ COMPARE(ammin_db_du(a0, a1, a2), "387194c4 ammin_db.du a0, a1, a2"); ++ ++ COMPARE(ammin_db_wu(a3, a4, a5), "38712127 ammin_db.wu a3, a4, a5"); ++ COMPARE(ammin_db_w(a6, a7, t0), "386f2d8a ammin_db.w a6, a7, t0"); ++ ++ COMPARE(fscaleb_s(f0, f1, f2), "01108820 fscaleb.s f0, f1, f2"); ++ COMPARE(fscaleb_d(f3, f4, f5), "01111483 fscaleb.d f3, f4, f5"); ++ ++ COMPARE(fcopysign_s(f6, f7, f8), "0112a0e6 fcopysign.s f6, f7, f8"); ++ COMPARE(fcopysign_d(f9, f10, f12), ++ "01133149 fcopysign.d f9, f10, f12"); ++ ++ VERIFY_RUN(); ++} ++ ++TEST(TypeOp22) { ++ SET_UP(); ++ ++ COMPARE(clz_w(a3, a0), "00001487 clz.w a3, a0"); ++ COMPARE(ctz_w(a0, a1), "00001ca4 ctz.w a0, a1"); ++ COMPARE(clz_d(a2, a3), "000024e6 clz.d a2, a3"); ++ COMPARE(ctz_d(a4, a5), "00002d28 ctz.d a4, a5"); ++ ++ COMPARE(clo_w(a0, a1), "000010a4 clo.w a0, a1"); ++ COMPARE(cto_w(a2, a3), "000018e6 cto.w a2, a3"); ++ COMPARE(clo_d(a4, a5), "00002128 clo.d a4, a5"); ++ COMPARE(cto_d(a6, a7), "0000296a cto.d a6, a7"); ++ ++ COMPARE(revb_2h(a6, a7), "0000316a revb.2h a6, a7"); ++ COMPARE(revb_4h(t0, t1), "000035ac revb.4h t0, t1"); ++ COMPARE(revb_2w(t2, t3), "000039ee revb.2w t2, t3"); ++ COMPARE(revb_d(t4, t5), "00003e30 revb.d t4, t5"); ++ ++ COMPARE(revh_2w(a0, a1), "000040a4 revh.2w a0, a1"); ++ COMPARE(revh_d(a2, a3), "000044e6 revh.d a2, a3"); ++ ++ COMPARE(bitrev_4b(a4, a5), "00004928 bitrev.4b a4, a5"); ++ COMPARE(bitrev_8b(a6, a7), "00004d6a bitrev.8b a6, a7"); ++ COMPARE(bitrev_w(t0, t1), "000051ac bitrev.w t0, t1"); ++ COMPARE(bitrev_d(t2, t3), "000055ee bitrev.d t2, t3"); ++ ++ COMPARE(ext_w_b(t4, t5), "00005e30 ext.w.b t4, t5"); ++ COMPARE(ext_w_h(a0, a1), "000058a4 ext.w.h a0, a1"); ++ ++ COMPARE(fabs_s(f2, f3), "01140462 fabs.s f2, f3"); ++ COMPARE(fabs_d(f0, f0), "01140800 fabs.d f0, f0"); ++ ++ COMPARE(fneg_s(f0, f1), "01141420 fneg.s f0, f1"); ++ COMPARE(fneg_d(f0, f0), "01141800 fneg.d f0, f0"); ++ ++ COMPARE(fsqrt_s(f4, f5), "011444a4 fsqrt.s f4, f5"); ++ COMPARE(fsqrt_d(f0, f0), "01144800 fsqrt.d f0, f0"); ++ ++ COMPARE(fmov_s(f6, f7), "011494e6 fmov.s f6, f7"); ++ COMPARE(fmov_d(f0, f1), "01149820 fmov.d f0, f1"); ++ COMPARE(fmov_d(f1, f0), "01149801 fmov.d f1, f0"); ++ ++ COMPARE(movgr2fr_d(f0, t6), "0114aa40 movgr2fr.d f0, t6"); ++ COMPARE(movgr2fr_d(f1, t6), "0114aa41 movgr2fr.d f1, t6"); ++ ++ COMPARE(movgr2fr_w(f30, a3), "0114a4fe movgr2fr.w f30, a3"); ++ COMPARE(movgr2fr_w(f30, a0), "0114a49e movgr2fr.w f30, a0"); ++ ++ COMPARE(movgr2frh_w(f30, t6), "0114ae5e movgr2frh.w f30, t6"); ++ COMPARE(movgr2frh_w(f0, a3), "0114ace0 movgr2frh.w f0, a3"); ++ ++ COMPARE(movfr2gr_s(a3, f30), "0114b7c7 movfr2gr.s a3, f30"); ++ ++ COMPARE(movfr2gr_d(a6, f30), "0114bbca movfr2gr.d a6, f30"); ++ COMPARE(movfr2gr_d(t7, f30), "0114bbd3 movfr2gr.d t7, f30"); ++ ++ COMPARE(movfrh2gr_s(a5, f0), "0114bc09 movfrh2gr.s a5, f0"); ++ COMPARE(movfrh2gr_s(a4, f0), "0114bc08 movfrh2gr.s a4, f0"); ++ ++ COMPARE(movgr2fcsr(a2), "0114c0c0 movgr2fcsr fcsr, a2"); ++ COMPARE(movfcsr2gr(a4), "0114c808 movfcsr2gr a4, fcsr"); ++ ++ COMPARE(movfr2cf(FCC0, f0), "0114d000 movfr2cf fcc0, f0"); ++ COMPARE(movcf2fr(f1, FCC1), "0114d421 movcf2fr f1, fcc1"); ++ ++ COMPARE(movgr2cf(FCC2, a0), "0114d882 movgr2cf fcc2, a0"); ++ COMPARE(movcf2gr(a1, FCC3), "0114dc65 movcf2gr a1, fcc3"); ++ ++ COMPARE(fcvt_s_d(f0, f0), "01191800 fcvt.s.d f0, f0"); ++ COMPARE(fcvt_d_s(f0, f0), "01192400 fcvt.d.s f0, f0"); ++ ++ COMPARE(ftintrm_w_s(f8, f9), "011a0528 ftintrm.w.s f8, f9"); ++ COMPARE(ftintrm_w_d(f10, f11), "011a096a ftintrm.w.d f10, f11"); ++ COMPARE(ftintrm_l_s(f12, f13), "011a25ac ftintrm.l.s f12, f13"); ++ COMPARE(ftintrm_l_d(f14, f15), "011a29ee ftintrm.l.d f14, f15"); ++ ++ COMPARE(ftintrp_w_s(f16, f17), "011a4630 ftintrp.w.s f16, f17"); ++ COMPARE(ftintrp_w_d(f18, f19), "011a4a72 ftintrp.w.d f18, f19"); ++ COMPARE(ftintrp_l_s(f20, f21), "011a66b4 ftintrp.l.s f20, f21"); ++ COMPARE(ftintrp_l_d(f0, f1), "011a6820 ftintrp.l.d f0, f1"); ++ ++ COMPARE(ftintrz_w_s(f30, f4), "011a849e ftintrz.w.s f30, f4"); ++ COMPARE(ftintrz_w_d(f30, f4), "011a889e ftintrz.w.d f30, f4"); ++ COMPARE(ftintrz_l_s(f30, f0), "011aa41e ftintrz.l.s f30, f0"); ++ COMPARE(ftintrz_l_d(f30, f30), "011aabde ftintrz.l.d f30, f30"); ++ ++ COMPARE(ftintrne_w_s(f2, f3), "011ac462 ftintrne.w.s f2, f3"); ++ COMPARE(ftintrne_w_d(f4, f5), "011ac8a4 ftintrne.w.d f4, f5"); ++ COMPARE(ftintrne_l_s(f6, f7), "011ae4e6 ftintrne.l.s f6, f7"); ++ COMPARE(ftintrne_l_d(f8, f9), "011ae928 ftintrne.l.d f8, f9"); ++ ++ COMPARE(ftint_w_s(f10, f11), "011b056a ftint.w.s f10, f11"); ++ COMPARE(ftint_w_d(f12, f13), "011b09ac ftint.w.d f12, f13"); ++ COMPARE(ftint_l_s(f14, f15), "011b25ee ftint.l.s f14, f15"); ++ COMPARE(ftint_l_d(f16, f17), "011b2a30 ftint.l.d f16, f17"); ++ ++ COMPARE(ffint_s_w(f18, f19), "011d1272 ffint.s.w f18, f19"); ++ COMPARE(ffint_s_l(f20, f21), "011d1ab4 ffint.s.l f20, f21"); ++ COMPARE(ffint_d_w(f0, f1), "011d2020 ffint.d.w f0, f1"); ++ COMPARE(ffint_d_l(f2, f3), "011d2862 ffint.d.l f2, f3"); ++ ++ COMPARE(frint_s(f4, f5), "011e44a4 frint.s f4, f5"); ++ COMPARE(frint_d(f6, f7), "011e48e6 frint.d f6, f7"); ++ ++ COMPARE(frecip_s(f8, f9), "01145528 frecip.s f8, f9"); ++ COMPARE(frecip_d(f10, f11), "0114596a frecip.d f10, f11"); ++ ++ COMPARE(frsqrt_s(f12, f13), "011465ac frsqrt.s f12, f13"); ++ COMPARE(frsqrt_d(f14, f15), "011469ee frsqrt.d f14, f15"); ++ ++ COMPARE(fclass_s(f16, f17), "01143630 fclass.s f16, f17"); ++ COMPARE(fclass_d(f18, f19), "01143a72 fclass.d f18, f19"); ++ ++ COMPARE(flogb_s(f20, f21), "011426b4 flogb.s f20, f21"); ++ COMPARE(flogb_d(f0, f1), "01142820 flogb.d f0, f1"); ++ ++ VERIFY_RUN(); ++} ++ ++} // namespace internal ++} // namespace v8 +diff --git a/deps/v8/test/cctest/test-macro-assembler-loong64.cc b/deps/v8/test/cctest/test-macro-assembler-loong64.cc +new file mode 100644 +index 00000000..ef536b86 +--- /dev/null ++++ b/deps/v8/test/cctest/test-macro-assembler-loong64.cc +@@ -0,0 +1,2894 @@ ++// Copyright 2013 the V8 project authors. All rights reserved. ++// Redistribution and use in source and binary forms, with or without ++// modification, are permitted provided that the following conditions are ++// met: ++// ++// * Redistributions of source code must retain the above copyright ++// notice, this list of conditions and the following disclaimer. ++// * Redistributions in binary form must reproduce the above ++// copyright notice, this list of conditions and the following ++// disclaimer in the documentation and/or other materials provided ++// with the distribution. ++// * Neither the name of Google Inc. nor the names of its ++// contributors may be used to endorse or promote products derived ++// from this software without specific prior written permission. ++// ++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++#include ++ ++#include // NOLINT(readability/streams) ++ ++#include "src/base/utils/random-number-generator.h" ++#include "src/codegen/macro-assembler.h" ++#include "src/execution/simulator.h" ++#include "src/init/v8.h" ++#include "src/objects/heap-number.h" ++#include "src/objects/objects-inl.h" ++#include "src/utils/ostreams.h" ++#include "test/cctest/cctest.h" ++ ++namespace v8 { ++namespace internal { ++ ++// TODO(mips64): Refine these signatures per test case. ++using FV = void*(int64_t x, int64_t y, int p2, int p3, int p4); ++using F1 = void*(int x, int p1, int p2, int p3, int p4); ++using F2 = void*(int x, int y, int p2, int p3, int p4); ++using F3 = void*(void* p, int p1, int p2, int p3, int p4); ++using F4 = void*(void* p0, void* p1, int p2, int p3, int p4); ++ ++#define __ masm-> ++ ++TEST(BYTESWAP) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ struct T { ++ uint64_t s8; ++ uint64_t s4; ++ uint64_t s2; ++ uint64_t u4; ++ uint64_t u2; ++ }; ++ ++ T t; ++ // clang-format off ++ uint64_t test_values[] = {0x5612FFCD9D327ACC, ++ 0x781A15C3, ++ 0xFCDE, ++ 0x9F, ++ 0xC81A15C3, ++ 0x8000000000000000, ++ 0xFFFFFFFFFFFFFFFF, ++ 0x0000000080000000, ++ 0x0000000000008000}; ++ // clang-format on ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ ++ MacroAssembler* masm = &assembler; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, s8))); ++ __ ByteSwapSigned(a4, a4, 8); ++ __ St_d(a4, MemOperand(a0, offsetof(T, s8))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, s4))); ++ __ ByteSwapSigned(a4, a4, 4); ++ __ St_d(a4, MemOperand(a0, offsetof(T, s4))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, s2))); ++ __ ByteSwapSigned(a4, a4, 2); ++ __ St_d(a4, MemOperand(a0, offsetof(T, s2))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, u4))); ++ __ ByteSwapSigned(a4, a4, 4); ++ __ St_d(a4, MemOperand(a0, offsetof(T, u4))); ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, u2))); ++ __ ByteSwapSigned(a4, a4, 2); ++ __ St_d(a4, MemOperand(a0, offsetof(T, u2))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ for (size_t i = 0; i < arraysize(test_values); i++) { ++ int32_t in_s4 = static_cast(test_values[i]); ++ int16_t in_s2 = static_cast(test_values[i]); ++ uint32_t in_u4 = static_cast(test_values[i]); ++ uint16_t in_u2 = static_cast(test_values[i]); ++ ++ t.s8 = test_values[i]; ++ t.s4 = static_cast(in_s4); ++ t.s2 = static_cast(in_s2); ++ t.u4 = static_cast(in_u4); ++ t.u2 = static_cast(in_u2); ++ ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(ByteReverse(test_values[i]), t.s8); ++ CHECK_EQ(ByteReverse(in_s4), static_cast(t.s4)); ++ CHECK_EQ(ByteReverse(in_s2), static_cast(t.s2)); ++ CHECK_EQ(ByteReverse(in_u4), static_cast(t.u4)); ++ CHECK_EQ(ByteReverse(in_u2), static_cast(t.u2)); ++ } ++} ++ ++TEST(LoadConstants) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope handles(isolate); ++ ++ int64_t refConstants[64]; ++ int64_t result[64]; ++ ++ int64_t mask = 1; ++ for (int i = 0; i < 64; i++) { ++ refConstants[i] = ~(mask << i); ++ } ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ or_(a4, a0, zero_reg); ++ for (int i = 0; i < 64; i++) { ++ // Load constant. ++ __ li(a5, Operand(refConstants[i])); ++ __ St_d(a5, MemOperand(a4, zero_reg)); ++ __ Add_d(a4, a4, Operand(kPointerSize)); ++ } ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ (void)f.Call(reinterpret_cast(result), 0, 0, 0, 0); ++ // Check results. ++ for (int i = 0; i < 64; i++) { ++ CHECK(refConstants[i] == result[i]); ++ } ++} ++ ++TEST(LoadAddress) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope handles(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ Label to_jump, skip; ++ __ mov(a4, a0); ++ ++ __ Branch(&skip); ++ __ bind(&to_jump); ++ __ nop(); ++ __ nop(); ++ __ jirl(zero_reg, ra, 0); ++ __ bind(&skip); ++ __ li(a4, Operand(masm->jump_address(&to_jump)), ADDRESS_LOAD); ++ int check_size = masm->InstructionsGeneratedSince(&skip); ++ CHECK_EQ(3, check_size); ++ __ jirl(zero_reg, a4, 0); ++ __ stop(); ++ __ stop(); ++ __ stop(); ++ __ stop(); ++ __ stop(); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ (void)f.Call(0, 0, 0, 0, 0); ++ // Check results. ++} ++ ++TEST(jump_tables4) { ++ // Similar to test-assembler-mips jump_tables1, with extra test for branch ++ // trampoline required before emission of the dd table (where trampolines are ++ // blocked), and proper transition to long-branch mode. ++ // Regression test for v8:4294. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ const int kNumCases = 512; ++ int values[kNumCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kNumCases]; ++ Label near_start, end, done; ++ ++ __ Push(ra); ++ __ xor_(a2, a2, a2); ++ ++ __ Branch(&end); ++ __ bind(&near_start); ++ ++ for (int i = 0; i < 32768 - 256; ++i) { ++ __ Add_d(a2, a2, 1); ++ } ++ ++ __ GenerateSwitchTable(a0, kNumCases, ++ [&labels](size_t i) { return labels + i; }); ++ ++ for (int i = 0; i < kNumCases; ++i) { ++ __ bind(&labels[i]); ++ __ li(a2, values[i]); ++ __ Branch(&done); ++ } ++ ++ __ bind(&done); ++ __ Pop(ra); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ __ bind(&end); ++ __ Branch(&near_start); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kNumCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ(values[i], res); ++ } ++} ++ ++TEST(jump_tables6) { ++ // Similar to test-assembler-mips jump_tables1, with extra test for branch ++ // trampoline required after emission of the dd table (where trampolines are ++ // blocked). This test checks if number of really generated instructions is ++ // greater than number of counted instructions from code, as we are expecting ++ // generation of trampoline in this case ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ const int kSwitchTableCases = 40; ++ ++ const int kMaxBranchOffset = (1 << (18 - 1)) - 1; ++ const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize; ++ const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize; ++ ++ const int kMaxOffsetForTrampolineStart = ++ kMaxBranchOffset - 16 * kTrampolineSlotsSize; ++ const int kFillInstr = (kMaxOffsetForTrampolineStart / kInstrSize) - ++ (kSwitchTablePrologueSize + 2 * kSwitchTableCases) - ++ 20; ++ ++ int values[kSwitchTableCases]; ++ isolate->random_number_generator()->NextBytes(values, sizeof(values)); ++ Label labels[kSwitchTableCases]; ++ Label near_start, end, done; ++ ++ __ Push(ra); ++ __ xor_(a2, a2, a2); ++ ++ int offs1 = masm->pc_offset(); ++ int gen_insn = 0; ++ ++ __ Branch(&end); ++ gen_insn += 1; ++ __ bind(&near_start); ++ ++ for (int i = 0; i < kFillInstr; ++i) { ++ __ Add_d(a2, a2, 1); ++ } ++ gen_insn += kFillInstr; ++ ++ __ GenerateSwitchTable(a0, kSwitchTableCases, ++ [&labels](size_t i) { return labels + i; }); ++ gen_insn += (kSwitchTablePrologueSize + 2 * kSwitchTableCases); ++ ++ for (int i = 0; i < kSwitchTableCases; ++i) { ++ __ bind(&labels[i]); ++ __ li(a2, values[i]); ++ __ Branch(&done); ++ } ++ gen_insn += 3 * kSwitchTableCases; ++ ++ // If offset from here to first branch instr is greater than max allowed ++ // offset for trampoline ... ++ CHECK_LT(kMaxOffsetForTrampolineStart, masm->pc_offset() - offs1); ++ // ... number of generated instructions must be greater then "gen_insn", ++ // as we are expecting trampoline generation ++ CHECK_LT(gen_insn, (masm->pc_offset() - offs1) / kInstrSize); ++ ++ __ bind(&done); ++ __ Pop(ra); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ __ bind(&end); ++ __ Branch(&near_start); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kSwitchTableCases; ++i) { ++ int64_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); ++ ::printf("f(%d) = %" PRId64 "\n", i, res); ++ CHECK_EQ(values[i], res); ++ } ++} ++ ++static uint64_t run_alsl_w(uint32_t rj, uint32_t rk, int8_t sa) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ Alsl_w(a2, a0, a1, sa); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assembler.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(ALSL_W) { ++ CcTest::InitializeVM(); ++ struct TestCaseAlsl { ++ int32_t rj; ++ int32_t rk; ++ uint8_t sa; ++ uint64_t expected_res; ++ }; ++ // clang-format off ++ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res ++ {0x1, 0x4, 1, 0x6}, ++ {0x1, 0x4, 2, 0x8}, ++ {0x1, 0x4, 3, 0xC}, ++ {0x1, 0x4, 4, 0x14}, ++ {0x1, 0x4, 5, 0x24}, ++ {0x1, 0x0, 1, 0x2}, ++ {0x1, 0x0, 2, 0x4}, ++ {0x1, 0x0, 3, 0x8}, ++ {0x1, 0x0, 4, 0x10}, ++ {0x1, 0x0, 5, 0x20}, ++ {0x0, 0x4, 1, 0x4}, ++ {0x0, 0x4, 2, 0x4}, ++ {0x0, 0x4, 3, 0x4}, ++ {0x0, 0x4, 4, 0x4}, ++ {0x0, 0x4, 5, 0x4}, ++ ++ // Shift overflow. ++ {INT32_MAX, 0x4, 1, 0x2}, ++ {INT32_MAX >> 1, 0x4, 2, 0x0}, ++ {INT32_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC}, ++ {INT32_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4}, ++ {INT32_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4}, ++ ++ // Signed addition overflow. ++ {0x1, INT32_MAX - 1, 1, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 3, 2, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 7, 3, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 15, 4, 0xFFFFFFFF80000000}, ++ {0x1, INT32_MAX - 31, 5, 0xFFFFFFFF80000000}, ++ ++ // Addition overflow. ++ {0x1, -2, 1, 0x0}, ++ {0x1, -4, 2, 0x0}, ++ {0x1, -8, 3, 0x0}, ++ {0x1, -16, 4, 0x0}, ++ {0x1, -32, 5, 0x0}}; ++ // clang-format on ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_alsl_w(tc[i].rj, tc[i].rk, tc[i].sa); ++ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Alsl_w(a0, %x, %x, %hhu)\n", ++ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++ ++static uint64_t run_alsl_d(uint64_t rj, uint64_t rk, int8_t sa) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ Alsl_d(a2, a0, a1, sa); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assembler.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(ALSL_D) { ++ CcTest::InitializeVM(); ++ struct TestCaseAlsl { ++ int64_t rj; ++ int64_t rk; ++ uint8_t sa; ++ uint64_t expected_res; ++ }; ++ // clang-format off ++ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res ++ {0x1, 0x4, 1, 0x6}, ++ {0x1, 0x4, 2, 0x8}, ++ {0x1, 0x4, 3, 0xC}, ++ {0x1, 0x4, 4, 0x14}, ++ {0x1, 0x4, 5, 0x24}, ++ {0x1, 0x0, 1, 0x2}, ++ {0x1, 0x0, 2, 0x4}, ++ {0x1, 0x0, 3, 0x8}, ++ {0x1, 0x0, 4, 0x10}, ++ {0x1, 0x0, 5, 0x20}, ++ {0x0, 0x4, 1, 0x4}, ++ {0x0, 0x4, 2, 0x4}, ++ {0x0, 0x4, 3, 0x4}, ++ {0x0, 0x4, 4, 0x4}, ++ {0x0, 0x4, 5, 0x4}, ++ ++ // Shift overflow. ++ {INT64_MAX, 0x4, 1, 0x2}, ++ {INT64_MAX >> 1, 0x4, 2, 0x0}, ++ {INT64_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC}, ++ {INT64_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4}, ++ {INT64_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4}, ++ ++ // Signed addition overflow. ++ {0x1, INT64_MAX - 1, 1, 0x8000000000000000}, ++ {0x1, INT64_MAX - 3, 2, 0x8000000000000000}, ++ {0x1, INT64_MAX - 7, 3, 0x8000000000000000}, ++ {0x1, INT64_MAX - 15, 4, 0x8000000000000000}, ++ {0x1, INT64_MAX - 31, 5, 0x8000000000000000}, ++ ++ // Addition overflow. ++ {0x1, -2, 1, 0x0}, ++ {0x1, -4, 2, 0x0}, ++ {0x1, -8, 3, 0x0}, ++ {0x1, -16, 4, 0x0}, ++ {0x1, -32, 5, 0x0}}; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t res = run_alsl_d(tc[i].rj, tc[i].rk, tc[i].sa); ++ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Dlsa(v0, %" PRIx64 ", %" PRIx64 ++ ", %hhu)\n", ++ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa); ++ CHECK_EQ(tc[i].expected_res, res); ++ } ++} ++// clang-format off ++static const std::vector ffint_ftintrz_uint32_test_values() { ++ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00, ++ 0x7FFFFFFF, 0x80000000, 0x80000001, ++ 0x80FFFF00, 0x8FFFFFFF, 0xFFFFFFFF}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector ffint_ftintrz_int32_test_values() { ++ static const int32_t kValues[] = { ++ static_cast(0x00000000), static_cast(0x00000001), ++ static_cast(0x00FFFF00), static_cast(0x7FFFFFFF), ++ static_cast(0x80000000), static_cast(0x80000001), ++ static_cast(0x80FFFF00), static_cast(0x8FFFFFFF), ++ static_cast(0xFFFFFFFF)}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector ffint_ftintrz_uint64_test_values() { ++ static const uint64_t kValues[] = { ++ 0x0000000000000000, 0x0000000000000001, 0x0000FFFFFFFF0000, ++ 0x7FFFFFFFFFFFFFFF, 0x8000000000000000, 0x8000000000000001, ++ 0x8000FFFFFFFF0000, 0x8FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector ffint_ftintrz_int64_test_values() { ++ static const int64_t kValues[] = {static_cast(0x0000000000000000), ++ static_cast(0x0000000000000001), ++ static_cast(0x0000FFFFFFFF0000), ++ static_cast(0x7FFFFFFFFFFFFFFF), ++ static_cast(0x8000000000000000), ++ static_cast(0x8000000000000001), ++ static_cast(0x8000FFFFFFFF0000), ++ static_cast(0x8FFFFFFFFFFFFFFF), ++ static_cast(0xFFFFFFFFFFFFFFFF)}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ // clang-off on ++ ++// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... } ++#define FOR_INPUTS(ctype, itype, var, test_vector) \ ++ std::vector var##_vec = test_vector(); \ ++ for (std::vector::iterator var = var##_vec.begin(); \ ++ var != var##_vec.end(); ++var) ++ ++#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \ ++ std::vector var##_vec = test_vector(); \ ++ std::vector::iterator var; \ ++ std::vector::reverse_iterator var2; \ ++ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \ ++ var != var##_vec.end(); ++var, ++var2) ++ ++#define FOR_ENUM_INPUTS(var, type, test_vector) \ ++ FOR_INPUTS(enum type, type, var, test_vector) ++#define FOR_STRUCT_INPUTS(var, type, test_vector) \ ++ FOR_INPUTS(struct type, type, var, test_vector) ++#define FOR_INT32_INPUTS(var, test_vector) \ ++ FOR_INPUTS(int32_t, int32, var, test_vector) ++#define FOR_INT32_INPUTS2(var, var2, test_vector) \ ++ FOR_INPUTS2(int32_t, int32, var, var2, test_vector) ++#define FOR_INT64_INPUTS(var, test_vector) \ ++ FOR_INPUTS(int64_t, int64, var, test_vector) ++#define FOR_UINT32_INPUTS(var, test_vector) \ ++ FOR_INPUTS(uint32_t, uint32, var, test_vector) ++#define FOR_UINT64_INPUTS(var, test_vector) \ ++ FOR_INPUTS(uint64_t, uint64, var, test_vector) ++ ++template ++RET_TYPE run_CVT(IN_TYPE x, Func GenerateConvertInstructionFunc) { ++ using F_CVT = RET_TYPE(IN_TYPE x0, int x1, int x2, int x3, int x4); ++ ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assm; ++ ++ GenerateConvertInstructionFunc(masm); ++ __ movfr2gr_d(a2, f9); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ return reinterpret_cast(f.Call(x, 0, 0, 0, 0)); ++} ++ ++TEST(Ffint_s_uw_Ftintrz_uw_s) { ++ CcTest::InitializeVM(); ++ FOR_UINT32_INPUTS(i, ffint_ftintrz_uint32_test_values) { ++ ++ uint32_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_s_uw(f8, a0); ++ __ movgr2frh_w(f9, zero_reg); ++ __ Ftintrz_uw_s(f9, f8, f10); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_s_ul_Ftintrz_ul_s) { ++ CcTest::InitializeVM(); ++ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { ++ uint64_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_s_ul(f8, a0); ++ __ Ftintrz_ul_s(f9, f8, f10, a2); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_d_uw_Ftintrz_uw_d) { ++ CcTest::InitializeVM(); ++ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { ++ uint32_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_d_uw(f8, a0); ++ __ movgr2frh_w(f9, zero_reg); ++ __ Ftintrz_uw_d(f9, f8, f10); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_d_ul_Ftintrz_ul_d) { ++ CcTest::InitializeVM(); ++ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) { ++ uint64_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ Ffint_d_ul(f8, a0); ++ __ Ftintrz_ul_d(f9, f8, f10, a2); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(Ffint_d_l_Ftintrz_l_ud) { ++ CcTest::InitializeVM(); ++ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) { ++ int64_t input = *i; ++ uint64_t abs_input = (input < 0) ? -input : input; ++ auto fn = [](MacroAssembler* masm) { ++ __ movgr2fr_d(f8, a0); ++ __ ffint_d_l(f10, f8); ++ __ Ftintrz_l_ud(f9, f10, f11); ++ }; ++ CHECK_EQ(static_cast(abs_input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(ffint_d_l_Ftint_l_d) { ++ CcTest::InitializeVM(); ++ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) { ++ int64_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ movgr2fr_d(f8, a0); ++ __ ffint_d_l(f10, f8); ++ __ Ftintrz_l_d(f9, f10); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++TEST(ffint_d_w_Ftint_w_d) { ++ CcTest::InitializeVM(); ++ FOR_INT32_INPUTS(i, ffint_ftintrz_int32_test_values) { ++ int32_t input = *i; ++ auto fn = [](MacroAssembler* masm) { ++ __ movgr2fr_w(f8, a0); ++ __ ffint_d_w(f10, f8); ++ __ Ftintrz_w_d(f9, f10); ++ __ movfr2gr_s(a4, f9); ++ __ movgr2fr_d(f9, a4); ++ }; ++ CHECK_EQ(static_cast(input), run_CVT(input, fn)); ++ } ++} ++ ++ ++static const std::vector overflow_int64_test_values() { ++ // clang-format off ++ static const int64_t kValues[] = {static_cast(0xF000000000000000), ++ static_cast(0x0000000000000001), ++ static_cast(0xFF00000000000000), ++ static_cast(0x0000F00111111110), ++ static_cast(0x0F00001000000000), ++ static_cast(0x991234AB12A96731), ++ static_cast(0xB0FFFF0F0F0F0F01), ++ static_cast(0x00006FFFFFFFFFFF), ++ static_cast(0xFFFFFFFFFFFFFFFF)}; ++ // clang-format on ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++TEST(OverflowInstructions) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope handles(isolate); ++ ++ struct T { ++ int64_t lhs; ++ int64_t rhs; ++ int64_t output_add1; ++ int64_t output_add2; ++ int64_t output_sub1; ++ int64_t output_sub2; ++ int64_t output_mul1; ++ int64_t output_mul2; ++ int64_t overflow_add1; ++ int64_t overflow_add2; ++ int64_t overflow_sub1; ++ int64_t overflow_sub2; ++ int64_t overflow_mul1; ++ int64_t overflow_mul2; ++ }; ++ T t; ++ ++ FOR_INT64_INPUTS(i, overflow_int64_test_values) { ++ FOR_INT64_INPUTS(j, overflow_int64_test_values) { ++ int64_t ii = *i; ++ int64_t jj = *j; ++ int64_t expected_add, expected_sub; ++ int32_t ii32 = static_cast(ii); ++ int32_t jj32 = static_cast(jj); ++ int32_t expected_mul; ++ int64_t expected_add_ovf, expected_sub_ovf, expected_mul_ovf; ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ ld_d(t0, a0, offsetof(T, lhs)); ++ __ ld_d(t1, a0, offsetof(T, rhs)); ++ ++ __ AdddOverflow(t2, t0, Operand(t1), t3); ++ __ st_d(t2, a0, offsetof(T, output_add1)); ++ __ st_d(t3, a0, offsetof(T, overflow_add1)); ++ __ or_(t3, zero_reg, zero_reg); ++ __ AdddOverflow(t0, t0, Operand(t1), t3); ++ __ st_d(t0, a0, offsetof(T, output_add2)); ++ __ st_d(t3, a0, offsetof(T, overflow_add2)); ++ ++ __ ld_d(t0, a0, offsetof(T, lhs)); ++ __ ld_d(t1, a0, offsetof(T, rhs)); ++ ++ __ SubdOverflow(t2, t0, Operand(t1), t3); ++ __ st_d(t2, a0, offsetof(T, output_sub1)); ++ __ st_d(t3, a0, offsetof(T, overflow_sub1)); ++ __ or_(t3, zero_reg, zero_reg); ++ __ SubdOverflow(t0, t0, Operand(t1), t3); ++ __ st_d(t0, a0, offsetof(T, output_sub2)); ++ __ st_d(t3, a0, offsetof(T, overflow_sub2)); ++ ++ __ ld_d(t0, a0, offsetof(T, lhs)); ++ __ ld_d(t1, a0, offsetof(T, rhs)); ++ __ slli_w(t0, t0, 0); ++ __ slli_w(t1, t1, 0); ++ ++ __ MulOverflow(t2, t0, Operand(t1), t3); ++ __ st_d(t2, a0, offsetof(T, output_mul1)); ++ __ st_d(t3, a0, offsetof(T, overflow_mul1)); ++ __ or_(t3, zero_reg, zero_reg); ++ __ MulOverflow(t0, t0, Operand(t1), t3); ++ __ st_d(t0, a0, offsetof(T, output_mul2)); ++ __ st_d(t3, a0, offsetof(T, overflow_mul2)); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = ++ Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.lhs = ii; ++ t.rhs = jj; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ expected_add_ovf = base::bits::SignedAddOverflow64(ii, jj, &expected_add); ++ expected_sub_ovf = base::bits::SignedSubOverflow64(ii, jj, &expected_sub); ++ expected_mul_ovf = ++ base::bits::SignedMulOverflow32(ii32, jj32, &expected_mul); ++ ++ CHECK_EQ(expected_add_ovf, t.overflow_add1 < 0); ++ CHECK_EQ(expected_sub_ovf, t.overflow_sub1 < 0); ++ CHECK_EQ(expected_mul_ovf, t.overflow_mul1 != 0); ++ ++ CHECK_EQ(t.overflow_add1, t.overflow_add2); ++ CHECK_EQ(t.overflow_sub1, t.overflow_sub2); ++ CHECK_EQ(t.overflow_mul1, t.overflow_mul2); ++ ++ CHECK_EQ(expected_add, t.output_add1); ++ CHECK_EQ(expected_add, t.output_add2); ++ CHECK_EQ(expected_sub, t.output_sub1); ++ CHECK_EQ(expected_sub, t.output_sub2); ++ if (!expected_mul_ovf) { ++ CHECK_EQ(expected_mul, t.output_mul1); ++ CHECK_EQ(expected_mul, t.output_mul2); ++ } ++ } ++ } ++} ++ ++TEST(min_max_nan) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct TestFloat { ++ double a; ++ double b; ++ double c; ++ double d; ++ float e; ++ float f; ++ float g; ++ float h; ++ }; ++ ++ TestFloat test; ++ const double dnan = std::numeric_limits::quiet_NaN(); ++ const double dinf = std::numeric_limits::infinity(); ++ const double dminf = -std::numeric_limits::infinity(); ++ const float fnan = std::numeric_limits::quiet_NaN(); ++ const float finf = std::numeric_limits::infinity(); ++ const float fminf = -std::numeric_limits::infinity(); ++ const int kTableLength = 13; ++ ++ // clang-format off ++ double inputsa[kTableLength] = {dnan, 3.0, -0.0, 0.0, 42.0, dinf, dminf, ++ dinf, dnan, 3.0, dinf, dnan, dnan}; ++ double inputsb[kTableLength] = {dnan, 2.0, 0.0, -0.0, dinf, 42.0, dinf, ++ dminf, 3.0, dnan, dnan, dinf, dnan}; ++ double outputsdmin[kTableLength] = {dnan, 2.0, -0.0, -0.0, 42.0, ++ 42.0, dminf, dminf, dnan, dnan, ++ dnan, dnan, dnan}; ++ double outputsdmax[kTableLength] = {dnan, 3.0, 0.0, 0.0, dinf, dinf, dinf, ++ dinf, dnan, dnan, dnan, dnan, dnan}; ++ ++ float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf, ++ finf, fnan, 3.0, finf, fnan, fnan}; ++ float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf, ++ fminf, 3.0, fnan, fnan, finf, fnan}; ++ float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf, ++ fminf, fnan, fnan, fnan, fnan, fnan}; ++ float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf, ++ finf, fnan, fnan, fnan, fnan, fnan}; ++ ++ // clang-format on ++ auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) { ++ __ bind(nan); ++ __ LoadRoot(t8, RootIndex::kNanValue); ++ __ Fld_d(dst, FieldMemOperand(t8, HeapNumber::kValueOffset)); ++ __ Branch(back); ++ }; ++ ++ auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) { ++ __ bind(nan); ++ __ Move(dst, fnan); ++ __ Branch(back); ++ }; ++ ++ Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan; ++ Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan; ++ ++ __ push(s6); ++ __ InitializeRootRegister(); ++ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a))); ++ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b))); ++ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, e))); ++ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, f))); ++ __ Float64Min(f12, f8, f9, &handle_mind_nan); ++ __ bind(&back_mind_nan); ++ __ Float64Max(f13, f8, f9, &handle_maxd_nan); ++ __ bind(&back_maxd_nan); ++ __ Float32Min(f14, f10, f11, &handle_mins_nan); ++ __ bind(&back_mins_nan); ++ __ Float32Max(f15, f10, f11, &handle_maxs_nan); ++ __ bind(&back_maxs_nan); ++ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, c))); ++ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, d))); ++ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g))); ++ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h))); ++ __ pop(s6); ++ __ jirl(zero_reg, ra, 0); ++ ++ handle_dnan(f12, &handle_mind_nan, &back_mind_nan); ++ handle_dnan(f13, &handle_maxd_nan, &back_maxd_nan); ++ handle_snan(f14, &handle_mins_nan, &back_mins_nan); ++ handle_snan(f15, &handle_maxs_nan, &back_maxs_nan); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputsa[i]; ++ test.b = inputsb[i]; ++ test.e = inputse[i]; ++ test.f = inputsf[i]; ++ ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c))); ++ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d))); ++ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); ++ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); ++ } ++} ++ ++template ++bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset, ++ IN_TYPE value, Func GenerateUnalignedInstructionFunc) { ++ using F_CVT = int32_t(char* x0, int x1, int x2, int x3, int x4); ++ ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assm; ++ IN_TYPE res; ++ ++ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ ++ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE)); ++ f.Call(memory_buffer, 0, 0, 0, 0); ++ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE)); ++ ++ return res == value; ++} ++ ++static const std::vector unsigned_test_values() { ++ // clang-format off ++ static const uint64_t kValues[] = { ++ 0x2180F18A06384414, 0x000A714532102277, 0xBC1ACCCF180649F0, ++ 0x8000000080008000, 0x0000000000000001, 0xFFFFFFFFFFFFFFFF, ++ }; ++ // clang-format on ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector unsigned_test_offset() { ++ static const int32_t kValues[] = {// value, offset ++ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++static const std::vector unsigned_test_offset_increment() { ++ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++TEST(Ld_b) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_b(a2, MemOperand(a0, in_offset)); ++ __ St_b(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_b(a0, MemOperand(a0, in_offset)); ++ __ St_b(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_2)); ++ ++ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_bu(a0, MemOperand(a0, in_offset)); ++ __ St_b(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_3)); ++ ++ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_bu(a2, MemOperand(a0, in_offset)); ++ __ St_b(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_4)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_b_bitextension) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ Label success, fail, end, different; ++ __ Ld_b(t0, MemOperand(a0, in_offset)); ++ __ Ld_bu(t1, MemOperand(a0, in_offset)); ++ __ Branch(&different, ne, t0, Operand(t1)); ++ ++ // If signed and unsigned values are same, check ++ // the upper bits to see if they are zero ++ __ srai_w(t0, t0, 7); ++ __ Branch(&success, eq, t0, Operand(zero_reg)); ++ __ Branch(&fail); ++ ++ // If signed and unsigned values are different, ++ // check that the upper bits are complementary ++ __ bind(&different); ++ __ srai_w(t1, t1, 7); ++ __ Branch(&fail, ne, t1, Operand(1)); ++ __ srai_w(t0, t0, 7); ++ __ addi_d(t0, t0, 1); ++ __ Branch(&fail, ne, t0, Operand(zero_reg)); ++ // Fall through to success ++ ++ __ bind(&success); ++ __ Ld_b(t0, MemOperand(a0, in_offset)); ++ __ St_b(t0, MemOperand(a0, out_offset)); ++ __ Branch(&end); ++ __ bind(&fail); ++ __ St_b(zero_reg, MemOperand(a0, out_offset)); ++ __ bind(&end); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_h) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_h(a2, MemOperand(a0, in_offset)); ++ __ St_h(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_h(a0, MemOperand(a0, in_offset)); ++ __ St_h(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_2)); ++ ++ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_hu(a0, MemOperand(a0, in_offset)); ++ __ St_h(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_3)); ++ ++ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_hu(a2, MemOperand(a0, in_offset)); ++ __ St_h(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_4)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_h_bitextension) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint16_t value = static_cast(*i & 0xFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ Label success, fail, end, different; ++ __ Ld_h(t0, MemOperand(a0, in_offset)); ++ __ Ld_hu(t1, MemOperand(a0, in_offset)); ++ __ Branch(&different, ne, t0, Operand(t1)); ++ ++ // If signed and unsigned values are same, check ++ // the upper bits to see if they are zero ++ __ srai_w(t0, t0, 15); ++ __ Branch(&success, eq, t0, Operand(zero_reg)); ++ __ Branch(&fail); ++ ++ // If signed and unsigned values are different, ++ // check that the upper bits are complementary ++ __ bind(&different); ++ __ srai_w(t1, t1, 15); ++ __ Branch(&fail, ne, t1, Operand(1)); ++ __ srai_w(t0, t0, 15); ++ __ addi_d(t0, t0, 1); ++ __ Branch(&fail, ne, t0, Operand(zero_reg)); ++ // Fall through to success ++ ++ __ bind(&success); ++ __ Ld_h(t0, MemOperand(a0, in_offset)); ++ __ St_h(t0, MemOperand(a0, out_offset)); ++ __ Branch(&end); ++ __ bind(&fail); ++ __ St_h(zero_reg, MemOperand(a0, out_offset)); ++ __ bind(&end); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_w) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint32_t value = static_cast(*i & 0xFFFFFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_w(a2, MemOperand(a0, in_offset)); ++ __ St_w(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_w(a0, MemOperand(a0, in_offset)); ++ __ St_w(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, ++ run_Unaligned(buffer_middle, in_offset, out_offset, ++ (uint32_t)value, fn_2)); ++ ++ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_wu(a2, MemOperand(a0, in_offset)); ++ __ St_w(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_3)); ++ ++ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_wu(a0, MemOperand(a0, in_offset)); ++ __ St_w(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, ++ run_Unaligned(buffer_middle, in_offset, out_offset, ++ (uint32_t)value, fn_4)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_w_extension) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint32_t value = static_cast(*i & 0xFFFFFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ Label success, fail, end, different; ++ __ Ld_w(t0, MemOperand(a0, in_offset)); ++ __ Ld_wu(t1, MemOperand(a0, in_offset)); ++ __ Branch(&different, ne, t0, Operand(t1)); ++ ++ // If signed and unsigned values are same, check ++ // the upper bits to see if they are zero ++ __ srai_d(t0, t0, 31); ++ __ Branch(&success, eq, t0, Operand(zero_reg)); ++ __ Branch(&fail); ++ ++ // If signed and unsigned values are different, ++ // check that the upper bits are complementary ++ __ bind(&different); ++ __ srai_d(t1, t1, 31); ++ __ Branch(&fail, ne, t1, Operand(1)); ++ __ srai_d(t0, t0, 31); ++ __ addi_d(t0, t0, 1); ++ __ Branch(&fail, ne, t0, Operand(zero_reg)); ++ // Fall through to success ++ ++ __ bind(&success); ++ __ Ld_w(t0, MemOperand(a0, in_offset)); ++ __ St_w(t0, MemOperand(a0, out_offset)); ++ __ Branch(&end); ++ __ bind(&fail); ++ __ St_w(zero_reg, MemOperand(a0, out_offset)); ++ __ bind(&end); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Ld_d) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ uint64_t value = *i; ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Ld_d(a2, MemOperand(a0, in_offset)); ++ __ St_d(a2, MemOperand(a0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ mov(t0, a0); ++ __ Ld_d(a0, MemOperand(a0, in_offset)); ++ __ St_d(a0, MemOperand(t0, out_offset)); ++ __ or_(a0, a2, zero_reg); ++ }; ++ CHECK_EQ(true, ++ run_Unaligned(buffer_middle, in_offset, out_offset, ++ (uint32_t)value, fn_2)); ++ } ++ } ++ } ++} ++ ++TEST(Fld_s) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ float value = static_cast(*i & 0xFFFFFFFF); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Fld_s(f0, MemOperand(a0, in_offset)); ++ __ Fst_s(f0, MemOperand(a0, out_offset)); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++TEST(Fld_d) { ++ CcTest::InitializeVM(); ++ ++ static const int kBufferSize = 300 * KB; ++ char memory_buffer[kBufferSize]; ++ char* buffer_middle = memory_buffer + (kBufferSize / 2); ++ ++ FOR_UINT64_INPUTS(i, unsigned_test_values) { ++ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { ++ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { ++ double value = static_cast(*i); ++ int32_t in_offset = *j1 + *k1; ++ int32_t out_offset = *j2 + *k2; ++ ++ auto fn = [](MacroAssembler* masm, int32_t in_offset, ++ int32_t out_offset) { ++ __ Fld_d(f0, MemOperand(a0, in_offset)); ++ __ Fst_d(f0, MemOperand(a0, out_offset)); ++ }; ++ CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, ++ out_offset, value, fn)); ++ } ++ } ++ } ++} ++ ++static const std::vector sltu_test_values() { ++ // clang-format off ++ static const uint64_t kValues[] = { ++ 0, ++ 1, ++ 0x7FE, ++ 0x7FF, ++ 0x800, ++ 0x801, ++ 0xFFE, ++ 0xFFF, ++ 0xFFFFFFFFFFFFF7FE, ++ 0xFFFFFFFFFFFFF7FF, ++ 0xFFFFFFFFFFFFF800, ++ 0xFFFFFFFFFFFFF801, ++ 0xFFFFFFFFFFFFFFFE, ++ 0xFFFFFFFFFFFFFFFF, ++ }; ++ // clang-format on ++ return std::vector(&kValues[0], &kValues[arraysize(kValues)]); ++} ++ ++template ++bool run_Sltu(uint64_t rj, uint64_t rk, Func GenerateSltuInstructionFunc) { ++ using F_CVT = int64_t(uint64_t x0, uint64_t x1, int x2, int x3, int x4); ++ ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assm; ++ ++ GenerateSltuInstructionFunc(masm, rk); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ assm.GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(rj, rk, 0, 0, 0)); ++ return res == 1; ++} ++ ++TEST(Sltu) { ++ CcTest::InitializeVM(); ++ ++ FOR_UINT64_INPUTS(i, sltu_test_values) { ++ FOR_UINT64_INPUTS(j, sltu_test_values) { ++ uint64_t rj = *i; ++ uint64_t rk = *j; ++ ++ auto fn_1 = [](MacroAssembler* masm, uint64_t imm) { ++ __ Sltu(a2, a0, Operand(imm)); ++ }; ++ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_1)); ++ ++ auto fn_2 = [](MacroAssembler* masm, uint64_t imm) { ++ __ Sltu(a2, a0, a1); ++ }; ++ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_2)); ++ } ++ } ++} ++ ++template ++static GeneratedCode GenerateMacroFloat32MinMax(MacroAssembler* masm) { ++ T a = T::from_code(8); // f8 ++ T b = T::from_code(9); // f9 ++ T c = T::from_code(10); // f10 ++ ++ Label ool_min_abc, ool_min_aab, ool_min_aba; ++ Label ool_max_abc, ool_max_aab, ool_max_aba; ++ ++ Label done_min_abc, done_min_aab, done_min_aba; ++ Label done_max_abc, done_max_aab, done_max_aba; ++ ++#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ ++ __ Fld_s(x, MemOperand(a0, offsetof(Inputs, src1_))); \ ++ __ Fld_s(y, MemOperand(a0, offsetof(Inputs, src2_))); \ ++ __ fminmax(res, x, y, &ool); \ ++ __ bind(&done); \ ++ __ Fst_s(a, MemOperand(a1, offsetof(Results, res_field))) ++ ++ // a = min(b, c); ++ FLOAT_MIN_MAX(Float32Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); ++ // a = min(a, b); ++ FLOAT_MIN_MAX(Float32Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); ++ // a = min(b, a); ++ FLOAT_MIN_MAX(Float32Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); ++ ++ // a = max(b, c); ++ FLOAT_MIN_MAX(Float32Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); ++ // a = max(a, b); ++ FLOAT_MIN_MAX(Float32Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); ++ // a = max(b, a); ++ FLOAT_MIN_MAX(Float32Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); ++ ++#undef FLOAT_MIN_MAX ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ // Generate out-of-line cases. ++ __ bind(&ool_min_abc); ++ __ Float32MinOutOfLine(a, b, c); ++ __ Branch(&done_min_abc); ++ ++ __ bind(&ool_min_aab); ++ __ Float32MinOutOfLine(a, a, b); ++ __ Branch(&done_min_aab); ++ ++ __ bind(&ool_min_aba); ++ __ Float32MinOutOfLine(a, b, a); ++ __ Branch(&done_min_aba); ++ ++ __ bind(&ool_max_abc); ++ __ Float32MaxOutOfLine(a, b, c); ++ __ Branch(&done_max_abc); ++ ++ __ bind(&ool_max_aab); ++ __ Float32MaxOutOfLine(a, a, b); ++ __ Branch(&done_max_aab); ++ ++ __ bind(&ool_max_aba); ++ __ Float32MaxOutOfLine(a, b, a); ++ __ Branch(&done_max_aba); ++ ++ CodeDesc desc; ++ masm->GetCode(masm->isolate(), &desc); ++ Handle code = ++ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build(); ++#ifdef DEBUG ++ StdoutStream os; ++ code->Print(os); ++#endif ++ return GeneratedCode::FromCode(*code); ++} ++ ++TEST(macro_float_minmax_f32) { ++ // Test the Float32Min and Float32Max macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct Inputs { ++ float src1_; ++ float src2_; ++ }; ++ ++ struct Results { ++ // Check all register aliasing possibilities in order to exercise all ++ // code-paths in the macro assembler. ++ float min_abc_; ++ float min_aab_; ++ float min_aba_; ++ float max_abc_; ++ float max_aab_; ++ float max_aba_; ++ }; ++ ++ GeneratedCode f = ++ GenerateMacroFloat32MinMax(masm); ++ ++#define CHECK_MINMAX(src1, src2, min, max) \ ++ do { \ ++ Inputs inputs = {src1, src2}; \ ++ Results results; \ ++ f.Call(&inputs, &results, 0, 0, 0); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_abc_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aab_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aba_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_abc_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aab_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aba_)); \ ++ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \ ++ } while (0) ++ ++ float nan_a = std::numeric_limits::quiet_NaN(); ++ float nan_b = std::numeric_limits::quiet_NaN(); ++ ++ CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f); ++ CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f); ++ CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f); ++ CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f); ++ CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f); ++ CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f); ++ CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f); ++ CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f); ++ ++ CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f); ++ CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f); ++ CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f); ++ CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f); ++ ++ CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); ++ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); ++ ++#undef CHECK_MINMAX ++} ++ ++template ++static GeneratedCode GenerateMacroFloat64MinMax(MacroAssembler* masm) { ++ T a = T::from_code(8); // f8 ++ T b = T::from_code(9); // f9 ++ T c = T::from_code(10); // f10 ++ ++ Label ool_min_abc, ool_min_aab, ool_min_aba; ++ Label ool_max_abc, ool_max_aab, ool_max_aba; ++ ++ Label done_min_abc, done_min_aab, done_min_aba; ++ Label done_max_abc, done_max_aab, done_max_aba; ++ ++#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ ++ __ Fld_d(x, MemOperand(a0, offsetof(Inputs, src1_))); \ ++ __ Fld_d(y, MemOperand(a0, offsetof(Inputs, src2_))); \ ++ __ fminmax(res, x, y, &ool); \ ++ __ bind(&done); \ ++ __ Fst_d(a, MemOperand(a1, offsetof(Results, res_field))) ++ ++ // a = min(b, c); ++ FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); ++ // a = min(a, b); ++ FLOAT_MIN_MAX(Float64Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); ++ // a = min(b, a); ++ FLOAT_MIN_MAX(Float64Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); ++ ++ // a = max(b, c); ++ FLOAT_MIN_MAX(Float64Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); ++ // a = max(a, b); ++ FLOAT_MIN_MAX(Float64Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); ++ // a = max(b, a); ++ FLOAT_MIN_MAX(Float64Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); ++ ++#undef FLOAT_MIN_MAX ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ // Generate out-of-line cases. ++ __ bind(&ool_min_abc); ++ __ Float64MinOutOfLine(a, b, c); ++ __ Branch(&done_min_abc); ++ ++ __ bind(&ool_min_aab); ++ __ Float64MinOutOfLine(a, a, b); ++ __ Branch(&done_min_aab); ++ ++ __ bind(&ool_min_aba); ++ __ Float64MinOutOfLine(a, b, a); ++ __ Branch(&done_min_aba); ++ ++ __ bind(&ool_max_abc); ++ __ Float64MaxOutOfLine(a, b, c); ++ __ Branch(&done_max_abc); ++ ++ __ bind(&ool_max_aab); ++ __ Float64MaxOutOfLine(a, a, b); ++ __ Branch(&done_max_aab); ++ ++ __ bind(&ool_max_aba); ++ __ Float64MaxOutOfLine(a, b, a); ++ __ Branch(&done_max_aba); ++ ++ CodeDesc desc; ++ masm->GetCode(masm->isolate(), &desc); ++ Handle code = ++ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build(); ++#ifdef DEBUG ++ StdoutStream os; ++ code->Print(os); ++#endif ++ return GeneratedCode::FromCode(*code); ++} ++ ++TEST(macro_float_minmax_f64) { ++ // Test the Float64Min and Float64Max macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct Inputs { ++ double src1_; ++ double src2_; ++ }; ++ ++ struct Results { ++ // Check all register aliasing possibilities in order to exercise all ++ // code-paths in the macro assembler. ++ double min_abc_; ++ double min_aab_; ++ double min_aba_; ++ double max_abc_; ++ double max_aab_; ++ double max_aba_; ++ }; ++ ++ GeneratedCode f = ++ GenerateMacroFloat64MinMax(masm); ++ ++#define CHECK_MINMAX(src1, src2, min, max) \ ++ do { \ ++ Inputs inputs = {src1, src2}; \ ++ Results results; \ ++ f.Call(&inputs, &results, 0, 0, 0); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_abc_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aab_)); \ ++ CHECK_EQ(bit_cast(min), bit_cast(results.min_aba_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_abc_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aab_)); \ ++ CHECK_EQ(bit_cast(max), bit_cast(results.max_aba_)); \ ++ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \ ++ } while (0) ++ ++ double nan_a = std::numeric_limits::quiet_NaN(); ++ double nan_b = std::numeric_limits::quiet_NaN(); ++ ++ CHECK_MINMAX(1.0, -1.0, -1.0, 1.0); ++ CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0); ++ CHECK_MINMAX(0.0, -1.0, -1.0, 0.0); ++ CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0); ++ CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0); ++ CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0); ++ CHECK_MINMAX(0.0, 1.0, 0.0, 1.0); ++ CHECK_MINMAX(1.0, 0.0, 0.0, 1.0); ++ ++ CHECK_MINMAX(0.0, 0.0, 0.0, 0.0); ++ CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0); ++ CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0); ++ CHECK_MINMAX(0.0, -0.0, -0.0, 0.0); ++ ++ CHECK_MINMAX(0.0, nan_a, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a); ++ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); ++ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); ++ ++#undef CHECK_MINMAX ++} ++ ++uint64_t run_Sub_w(uint64_t imm, int32_t num_instr) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label code_start; ++ __ bind(&code_start); ++ __ Sub_w(a2, zero_reg, Operand(imm)); ++ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(SUB_W) { ++ CcTest::InitializeVM(); ++ ++ // Test Subu macro-instruction for min_int12 and max_int12 border cases. ++ // For subtracting int16 immediate values we use addiu. ++ ++ struct TestCaseSub { ++ uint64_t imm; ++ uint64_t expected_res; ++ int32_t num_instr; ++ }; ++ ++ // We call Sub_w(v0, zero_reg, imm) to test cases listed below. ++ // 0 - imm = expected_res ++ // clang-format off ++ struct TestCaseSub tc[] = { ++ // imm, expected_res, num_instr ++ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12 ++ // The test case above generates ori + add_w instruction sequence. ++ // We can't have just addi_ because -min_int12 > max_int12 so use ++ // register. We can load min_int12 to at register with addi_w and then ++ // subtract at with sub_w, but now we use ori + add_w because -min_int12 can ++ // be loaded using ori. ++ {0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1 ++ // Generates addi_w ++ // max_int12 + 1 is not int12 but -(max_int12 + 1) is, just use addi_w. ++ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1 ++ // Generates ori + add_w ++ // To load this value to at we need two instructions and another one to ++ // subtract, lu12i + ori + sub_w. But we can load -value to at using just ++ // ori and then add at register with add_w. ++ {0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2 ++ // Generates ori + sub_w ++ // Not int12 but is uint12, load value to at with ori and subtract with ++ // sub_w. ++ {0x00010000, 0xFFFFFFFFFFFF0000, 2}, ++ // Generates lu12i_w + sub_w ++ // Load value using lui to at and subtract with subu. ++ {0x00010001, 0xFFFFFFFFFFFEFFFF, 3}, ++ // Generates lu12i + ori + sub_w ++ // We have to generate three instructions in this case. ++ {0x7FFFFFFF, 0xFFFFFFFF80000001, 3}, // max_int32 ++ // Generates lu12i_w + ori + sub_w ++ {0xFFFFFFFF80000000, 0xFFFFFFFF80000000, 2}, // min_int32 ++ // The test case above generates lu12i + sub_w intruction sequence. ++ // The result of 0 - min_int32 eqauls max_int32 + 1, which wraps around to ++ // min_int32 again. ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ CHECK_EQ(tc[i].expected_res, run_Sub_w(tc[i].imm, tc[i].num_instr)); ++ } ++} ++ ++uint64_t run_Sub_d(uint64_t imm, int32_t num_instr) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label code_start; ++ __ bind(&code_start); ++ __ Sub_d(a2, zero_reg, Operand(imm)); ++ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++#ifdef OBJECT_PRINT ++ code->Print(std::cout); ++#endif ++ auto f = GeneratedCode::FromCode(*code); ++ ++ uint64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ return res; ++} ++ ++TEST(SUB_D) { ++ CcTest::InitializeVM(); ++ ++ // Test Sub_d macro-instruction for min_int12 and max_int12 border cases. ++ // For subtracting int12 immediate values we use addi_d. ++ ++ struct TestCaseSub { ++ uint64_t imm; ++ uint64_t expected_res; ++ int32_t num_instr; ++ }; ++ // We call Sub(v0, zero_reg, imm) to test cases listed below. ++ // 0 - imm = expected_res ++ // clang-format off ++ struct TestCaseSub tc[] = { ++ // imm, expected_res, num_instr ++ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12 ++ // The test case above generates addi_d instruction. ++ // This is int12 value and we can load it using just addi_d. ++ { 0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1 ++ // Generates addi_d ++ // max_int12 + 1 is not int12 but is uint12, just use ori. ++ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1 ++ // Generates ori + add_d ++ { 0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2 ++ // Generates ori + add_d ++ { 0x00001000, 0xFFFFFFFFFFFFF000, 2}, // max_uint12 + 1 ++ // Generates lu12i_w + sub_d ++ { 0x00001001, 0xFFFFFFFFFFFFEFFF, 3}, // max_uint12 + 2 ++ // Generates lu12i_w + ori + sub_d ++ {0x00000000FFFFFFFF, 0xFFFFFFFF00000001, 3}, // max_uint32 ++ // Generates addi_w + li32i_d + sub_d ++ {0x00000000FFFFFFFE, 0xFFFFFFFF00000002, 3}, // max_uint32 - 1 ++ // Generates addi_w + li32i_d + sub_d ++ {0xFFFFFFFF80000000, 0x80000000, 2}, // min_int32 ++ // Generates lu12i_w + sub_d ++ {0x0000000080000000, 0xFFFFFFFF80000000, 2}, // max_int32 + 1 ++ // Generates lu12i_w + add_d ++ {0xFFFF0000FFFF8765, 0x0000FFFF0000789B, 4}, ++ // Generates lu12i_w + ori + lu32i_d + sub ++ {0x1234ABCD87654321, 0xEDCB5432789ABCDF, 5}, ++ // Generates lu12i_w + ori + lu32i_d + lu52i_d + sub ++ {0xFFFF789100000000, 0x876F00000000, 3}, ++ // Generates xor + lu32i_d + sub ++ {0xF12F789100000000, 0xED0876F00000000, 4}, ++ // Generates xor + lu32i_d + lu52i_d + sub ++ {0xF120000000000800, 0xEDFFFFFFFFFF800, 3}, ++ // Generates ori + lu52i_d + sub ++ {0xFFF0000000000000, 0x10000000000000, 2} ++ // Generates lu52i_d + sub ++ }; ++ // clang-format on ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ CHECK_EQ(tc[i].expected_res, run_Sub_d(tc[i].imm, tc[i].num_instr)); ++ } ++} ++ ++TEST(Move) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ float a; ++ float b; ++ float result_a; ++ float result_b; ++ double c; ++ double d; ++ double e; ++ double result_c; ++ double result_d; ++ double result_e; ++ }; ++ T t; ++ __ li(a4, static_cast(0x80000000)); ++ __ St_w(a4, MemOperand(a0, offsetof(T, a))); ++ __ li(a5, static_cast(0x12345678)); ++ __ St_w(a5, MemOperand(a0, offsetof(T, b))); ++ __ li(a6, static_cast(0x8877665544332211)); ++ __ St_d(a6, MemOperand(a0, offsetof(T, c))); ++ __ li(a7, static_cast(0x1122334455667788)); ++ __ St_d(a7, MemOperand(a0, offsetof(T, d))); ++ __ li(t0, static_cast(0)); ++ __ St_d(t0, MemOperand(a0, offsetof(T, e))); ++ ++ __ Move(f8, static_cast(0x80000000)); ++ __ Move(f9, static_cast(0x12345678)); ++ __ Move(f10, static_cast(0x8877665544332211)); ++ __ Move(f11, static_cast(0x1122334455667788)); ++ __ Move(f12, static_cast(0)); ++ __ Fst_s(f8, MemOperand(a0, offsetof(T, result_a))); ++ __ Fst_s(f9, MemOperand(a0, offsetof(T, result_b))); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_c))); ++ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_d))); ++ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_e))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(t.a, t.result_a); ++ CHECK_EQ(t.b, t.result_b); ++ CHECK_EQ(t.c, t.result_c); ++ CHECK_EQ(t.d, t.result_d); ++ CHECK_EQ(t.e, t.result_e); ++} ++ ++TEST(Movz_Movn) { ++ const int kTableLength = 4; ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct Test { ++ int64_t rt; ++ int64_t a; ++ int64_t b; ++ int64_t bold; ++ int64_t b1; ++ int64_t bold1; ++ int32_t c; ++ int32_t d; ++ int32_t dold; ++ int32_t d1; ++ int32_t dold1; ++ }; ++ ++ Test test; ++ // clang-format off ++ int64_t inputs_D[kTableLength] = { ++ 7, 8, -9, -10 ++ }; ++ int32_t inputs_W[kTableLength] = { ++ 3, 4, -5, -6 ++ }; ++ ++ int32_t outputs_W[kTableLength] = { ++ 3, 4, -5, -6 ++ }; ++ int64_t outputs_D[kTableLength] = { ++ 7, 8, -9, -10 ++ }; ++ // clang-format on ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(Test, a))); ++ __ Ld_w(a5, MemOperand(a0, offsetof(Test, c))); ++ __ Ld_d(a6, MemOperand(a0, offsetof(Test, rt))); ++ __ li(t0, 1); ++ __ li(t1, 1); ++ __ li(t2, 1); ++ __ li(t3, 1); ++ __ St_d(t0, MemOperand(a0, offsetof(Test, bold))); ++ __ St_d(t1, MemOperand(a0, offsetof(Test, bold1))); ++ __ St_w(t2, MemOperand(a0, offsetof(Test, dold))); ++ __ St_w(t3, MemOperand(a0, offsetof(Test, dold1))); ++ __ Movz(t0, a4, a6); ++ __ Movn(t1, a4, a6); ++ __ Movz(t2, a5, a6); ++ __ Movn(t3, a5, a6); ++ __ St_d(t0, MemOperand(a0, offsetof(Test, b))); ++ __ St_d(t1, MemOperand(a0, offsetof(Test, b1))); ++ __ St_w(t2, MemOperand(a0, offsetof(Test, d))); ++ __ St_w(t3, MemOperand(a0, offsetof(Test, d1))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ test.a = inputs_D[i]; ++ test.c = inputs_W[i]; ++ ++ test.rt = 1; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.b, test.bold); ++ CHECK_EQ(test.d, test.dold); ++ CHECK_EQ(test.b1, outputs_D[i]); ++ CHECK_EQ(test.d1, outputs_W[i]); ++ ++ test.rt = 0; ++ f.Call(&test, 0, 0, 0, 0); ++ CHECK_EQ(test.b, outputs_D[i]); ++ CHECK_EQ(test.d, outputs_W[i]); ++ CHECK_EQ(test.b1, test.bold1); ++ CHECK_EQ(test.d1, test.dold1); ++ } ++} ++ ++TEST(macro_instructions1) { ++ // Test 32bit calculate instructions macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label exit, error; ++ ++ __ li(a4, 0x00000004); ++ __ li(a5, 0x00001234); ++ __ li(a6, 0x12345678); ++ __ li(a7, 0x7FFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFC)); ++ __ li(t1, static_cast(0xFFFFEDCC)); ++ __ li(t2, static_cast(0xEDCBA988)); ++ __ li(t3, static_cast(0x80000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ add_w(a2, a7, t1); ++ __ Add_w(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Add_w(t4, t1, static_cast(0x7FFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ __ addi_w(a2, a6, 0x800); ++ __ Add_w(a3, a6, 0xFFFFF800); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ mul_w(a2, t1, a7); ++ __ Mul_w(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Mul_w(t4, t1, static_cast(0x7FFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ mulh_w(a2, t1, a7); ++ __ Mulh_w(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Mulh_w(t4, t1, static_cast(0x7FFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mulh_wu(a2, a4, static_cast(0xFFFFEDCC)); ++ __ Branch(&error, ne, a2, Operand(0x3)); ++ __ Mulh_wu(a3, a4, t1); ++ __ Branch(&error, ne, a3, Operand(0x3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ div_w(a2, a7, t2); ++ __ Div_w(a3, a7, t2); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Div_w(t4, a7, static_cast(0xEDCBA988)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Div_wu(a2, a7, a5); ++ __ Branch(&error, ne, a2, Operand(0x70821)); ++ __ Div_wu(a3, t0, static_cast(0x00001234)); ++ __ Branch(&error, ne, a3, Operand(0xE1042)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_w(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ Mod_w(a3, t2, static_cast(0x00001234)); ++ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFF258)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_wu(a2, a6, a5); ++ __ Branch(&error, ne, a2, Operand(0xDA8)); ++ __ Mod_wu(a3, t2, static_cast(0x00001234)); ++ __ Branch(&error, ne, a3, Operand(0xF0)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(macro_instructions2) { ++ // Test 64bit calculate instructions macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ add_d(a2, a7, t1); ++ __ Add_d(a3, t1, a7); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ __ Add_d(t4, t1, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a2, Operand(t4)); ++ __ addi_d(a2, a6, 0x800); ++ __ Add_d(a3, a6, Operand(0xFFFFFFFFFFFFF800)); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mul_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0)); ++ __ Mul_d(a3, t0, Operand(0xDF6B8F35A10E205C)); ++ __ Branch(&error, ne, a3, Operand(0x57ad69f40f870584)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mulh_d(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467)); ++ __ Mulh_d(a3, t0, Operand(0xDF6B8F35A10E205C)); ++ __ Branch(&error, ne, a3, Operand(0x15d)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Div_d(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Div_d(a3, t1, Operand(0x17312)); ++ __ Branch(&error, ne, a3, Operand(0xffffe985f631e6d9)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Div_du(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Div_du(a3, t1, 0x17312); ++ __ Branch(&error, ne, a3, Operand(0x9a22ffd3973d)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_d(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ Mod_d(a3, t2, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(0xfffffffffffffb0a)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Mod_du(a2, a6, a4); ++ __ Branch(&error, ne, a2, Operand(0x13558)); ++ __ Mod_du(a3, t2, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841)); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(macro_instructions3) { ++ // Test 64bit calculate instructions macros. ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ Label exit, error; ++ ++ __ li(a4, 0x17312); ++ __ li(a5, 0x1012131415161718); ++ __ li(a6, 0x51F4B764A26E7412); ++ __ li(a7, 0x7FFFFFFFFFFFFFFF); ++ __ li(t0, static_cast(0xFFFFFFFFFFFFF547)); ++ __ li(t1, static_cast(0xDF6B8F35A10E205C)); ++ __ li(t2, static_cast(0x81F25A87C4236841)); ++ __ li(t3, static_cast(0x8000000000000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ And(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x1310)); ++ __ And(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a3, Operand(0x51F4B764A26E7412)); ++ __ andi(a2, a6, 0xDCB); ++ __ And(a3, a6, Operand(0xDCB)); ++ __ Branch(&error, ne, a3, Operand(a2)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Or(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f)); ++ __ Or(a3, t2, Operand(0x8000000000000000)); ++ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841)); ++ __ ori(a2, a5, 0xDCB); ++ __ Or(a3, a5, Operand(0xDCB)); ++ __ Branch(&error, ne, a2, Operand(a3)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Orn(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); ++ __ Orn(a3, t2, Operand(0x81F25A87C4236841)); ++ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Xor(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b)); ++ __ Xor(a3, t2, Operand(0x8000000000000000)); ++ __ Branch(&error, ne, a3, Operand(0x1f25a87c4236841)); ++ __ Xor(a2, t2, Operand(0xDCB)); ++ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Nor(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5)); ++ __ Nor(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a3, Operand(0x8000000000000000)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Andn(a2, a4, a5); ++ __ Branch(&error, ne, a2, Operand(0x16002)); ++ __ Andn(a3, a6, Operand(0x7FFFFFFFFFFFFFFF)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Orn(a2, t0, t1); ++ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7)); ++ __ Orn(a3, t2, Operand(0x8000000000000000)); ++ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Neg(a2, a7); ++ __ Branch(&error, ne, a2, Operand(0x8000000000000001)); ++ __ Neg(a3, t0); ++ __ Branch(&error, ne, a3, Operand(0xAB9)); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Slt(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Slt(a3, a7, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ __ Slt(a3, a4, 0x800); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sle(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Sle(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sle(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sleu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(0x1)); ++ __ Sleu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sleu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sge(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sge(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sge(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sgeu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sgeu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0x1))); ++ __ Sgeu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sgt(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sgt(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ __ Sgt(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0x1))); ++ ++ __ or_(a2, zero_reg, zero_reg); ++ __ or_(a3, zero_reg, zero_reg); ++ __ Sgtu(a2, a5, a6); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ __ Sgtu(a3, t0, Operand(0xFFFFFFFFFFFFF547)); ++ __ Branch(&error, ne, a3, Operand(static_cast(0))); ++ __ Sgtu(a2, a7, t0); ++ __ Branch(&error, ne, a2, Operand(static_cast(0))); ++ ++ __ li(a2, 0x31415926); ++ __ b(&exit); ++ ++ __ bind(&error); ++ __ li(a2, 0x666); ++ ++ __ bind(&exit); ++ __ or_(a0, a2, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ int64_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); ++ ++ CHECK_EQ(0x31415926L, res); ++} ++ ++TEST(Rotr_w) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ int32_t input; ++ int32_t result_rotr_0; ++ int32_t result_rotr_4; ++ int32_t result_rotr_8; ++ int32_t result_rotr_12; ++ int32_t result_rotr_16; ++ int32_t result_rotr_20; ++ int32_t result_rotr_24; ++ int32_t result_rotr_28; ++ int32_t result_rotr_32; ++ int32_t result_rotri_0; ++ int32_t result_rotri_4; ++ int32_t result_rotri_8; ++ int32_t result_rotri_12; ++ int32_t result_rotri_16; ++ int32_t result_rotri_20; ++ int32_t result_rotri_24; ++ int32_t result_rotri_28; ++ int32_t result_rotri_32; ++ }; ++ T t; ++ ++ __ Ld_w(a4, MemOperand(a0, offsetof(T, input))); ++ ++ __ Rotr_w(a5, a4, 0); ++ __ Rotr_w(a6, a4, 0x04); ++ __ Rotr_w(a7, a4, 0x08); ++ __ Rotr_w(t0, a4, 0x0C); ++ __ Rotr_w(t1, a4, 0x10); ++ __ Rotr_w(t2, a4, -0x0C); ++ __ Rotr_w(t3, a4, -0x08); ++ __ Rotr_w(t4, a4, -0x04); ++ __ Rotr_w(t5, a4, 0x20); ++ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotr_0))); ++ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotr_4))); ++ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotr_8))); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_12))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_16))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotr_20))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotr_24))); ++ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotr_28))); ++ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotr_32))); ++ ++ __ li(t5, 0); ++ __ Rotr_w(a5, a4, t5); ++ __ li(t5, 0x04); ++ __ Rotr_w(a6, a4, t5); ++ __ li(t5, 0x08); ++ __ Rotr_w(a7, a4, t5); ++ __ li(t5, 0x0C); ++ __ Rotr_w(t0, a4, t5); ++ __ li(t5, 0x10); ++ __ Rotr_w(t1, a4, t5); ++ __ li(t5, -0x0C); ++ __ Rotr_w(t2, a4, t5); ++ __ li(t5, -0x08); ++ __ Rotr_w(t3, a4, t5); ++ __ li(t5, -0x04); ++ __ Rotr_w(t4, a4, t5); ++ __ li(t5, 0x20); ++ __ Rotr_w(t5, a4, t5); ++ ++ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotri_0))); ++ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotri_4))); ++ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotri_8))); ++ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_12))); ++ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_16))); ++ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_20))); ++ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_24))); ++ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotri_28))); ++ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotri_32))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x12345678; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotr_0); ++ CHECK_EQ(static_cast(0x81234567), t.result_rotr_4); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotr_8); ++ CHECK_EQ(static_cast(0x67812345), t.result_rotr_12); ++ CHECK_EQ(static_cast(0x56781234), t.result_rotr_16); ++ CHECK_EQ(static_cast(0x45678123), t.result_rotr_20); ++ CHECK_EQ(static_cast(0x34567812), t.result_rotr_24); ++ CHECK_EQ(static_cast(0x23456781), t.result_rotr_28); ++ CHECK_EQ(static_cast(0x12345678), t.result_rotr_32); ++ ++ CHECK_EQ(static_cast(0x12345678), t.result_rotri_0); ++ CHECK_EQ(static_cast(0x81234567), t.result_rotri_4); ++ CHECK_EQ(static_cast(0x78123456), t.result_rotri_8); ++ CHECK_EQ(static_cast(0x67812345), t.result_rotri_12); ++ CHECK_EQ(static_cast(0x56781234), t.result_rotri_16); ++ CHECK_EQ(static_cast(0x45678123), t.result_rotri_20); ++ CHECK_EQ(static_cast(0x34567812), t.result_rotri_24); ++ CHECK_EQ(static_cast(0x23456781), t.result_rotri_28); ++ CHECK_EQ(static_cast(0x12345678), t.result_rotri_32); ++} ++ ++TEST(Rotr_d) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ int64_t input; ++ int64_t result_rotr_0; ++ int64_t result_rotr_8; ++ int64_t result_rotr_16; ++ int64_t result_rotr_24; ++ int64_t result_rotr_32; ++ int64_t result_rotr_40; ++ int64_t result_rotr_48; ++ int64_t result_rotr_56; ++ int64_t result_rotr_64; ++ int64_t result_rotri_0; ++ int64_t result_rotri_8; ++ int64_t result_rotri_16; ++ int64_t result_rotri_24; ++ int64_t result_rotri_32; ++ int64_t result_rotri_40; ++ int64_t result_rotri_48; ++ int64_t result_rotri_56; ++ int64_t result_rotri_64; ++ }; ++ T t; ++ ++ __ Ld_d(a4, MemOperand(a0, offsetof(T, input))); ++ ++ __ Rotr_d(a5, a4, 0); ++ __ Rotr_d(a6, a4, 0x08); ++ __ Rotr_d(a7, a4, 0x10); ++ __ Rotr_d(t0, a4, 0x18); ++ __ Rotr_d(t1, a4, 0x20); ++ __ Rotr_d(t2, a4, -0x18); ++ __ Rotr_d(t3, a4, -0x10); ++ __ Rotr_d(t4, a4, -0x08); ++ __ Rotr_d(t5, a4, 0x40); ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotr_0))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotr_8))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotr_16))); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_24))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_32))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotr_40))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotr_48))); ++ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotr_56))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotr_64))); ++ ++ __ li(t5, 0); ++ __ Rotr_d(a5, a4, t5); ++ __ li(t5, 0x08); ++ __ Rotr_d(a6, a4, t5); ++ __ li(t5, 0x10); ++ __ Rotr_d(a7, a4, t5); ++ __ li(t5, 0x18); ++ __ Rotr_d(t0, a4, t5); ++ __ li(t5, 0x20); ++ __ Rotr_d(t1, a4, t5); ++ __ li(t5, -0x18); ++ __ Rotr_d(t2, a4, t5); ++ __ li(t5, -0x10); ++ __ Rotr_d(t3, a4, t5); ++ __ li(t5, -0x08); ++ __ Rotr_d(t4, a4, t5); ++ __ li(t5, 0x40); ++ __ Rotr_d(t5, a4, t5); ++ ++ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotri_0))); ++ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotri_8))); ++ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotri_16))); ++ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_24))); ++ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_32))); ++ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_40))); ++ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_48))); ++ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotri_56))); ++ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotri_64))); ++ ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ t.input = 0x0123456789ABCDEF; ++ f.Call(&t, 0, 0, 0, 0); ++ ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotr_0); ++ CHECK_EQ(static_cast(0xEF0123456789ABCD), t.result_rotr_8); ++ CHECK_EQ(static_cast(0xCDEF0123456789AB), t.result_rotr_16); ++ CHECK_EQ(static_cast(0xABCDEF0123456789), t.result_rotr_24); ++ CHECK_EQ(static_cast(0x89ABCDEF01234567), t.result_rotr_32); ++ CHECK_EQ(static_cast(0x6789ABCDEF012345), t.result_rotr_40); ++ CHECK_EQ(static_cast(0x456789ABCDEF0123), t.result_rotr_48); ++ CHECK_EQ(static_cast(0x23456789ABCDEF01), t.result_rotr_56); ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotr_64); ++ ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotri_0); ++ CHECK_EQ(static_cast(0xEF0123456789ABCD), t.result_rotri_8); ++ CHECK_EQ(static_cast(0xCDEF0123456789AB), t.result_rotri_16); ++ CHECK_EQ(static_cast(0xABCDEF0123456789), t.result_rotri_24); ++ CHECK_EQ(static_cast(0x89ABCDEF01234567), t.result_rotri_32); ++ CHECK_EQ(static_cast(0x6789ABCDEF012345), t.result_rotri_40); ++ CHECK_EQ(static_cast(0x456789ABCDEF0123), t.result_rotri_48); ++ CHECK_EQ(static_cast(0x23456789ABCDEF01), t.result_rotri_56); ++ CHECK_EQ(static_cast(0x0123456789ABCDEF), t.result_rotri_64); ++} ++ ++TEST(macro_instructions4) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct T { ++ double a; ++ float b; ++ double result_floor_a; ++ float result_floor_b; ++ double result_ceil_a; ++ float result_ceil_b; ++ double result_trunc_a; ++ float result_trunc_b; ++ double result_round_a; ++ float result_round_b; ++ }; ++ T t; ++ ++ const int kTableLength = 16; ++ ++ // clang-format off ++ double inputs_d[kTableLength] = { ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307, ++ std::numeric_limits::max() - 0.1, ++ std::numeric_limits::infinity() ++ }; ++ float inputs_s[kTableLength] = { ++ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, ++ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, ++ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37, ++ std::numeric_limits::lowest() + 0.6, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_round_s[kTableLength] = { ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 1.7976931348623157E+38, 0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_round_d[kTableLength] = { ++ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, ++ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, ++ 1.7976931348623157E+308, 0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_trunc_s[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E+38, 0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_trunc_d[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E+308, 0, ++ std::numeric_limits::max() - 1, ++ std::numeric_limits::infinity() ++ }; ++ float outputs_ceil_s[kTableLength] = { ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E38, 1, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_ceil_d[kTableLength] = { ++ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, ++ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, ++ 1.7976931348623157E308, 1, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ float outputs_floor_s[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 1.7976931348623157E38, 0, ++ std::numeric_limits::lowest() + 1, ++ std::numeric_limits::infinity() ++ }; ++ double outputs_floor_d[kTableLength] = { ++ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, ++ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, ++ 1.7976931348623157E308, 0, ++ std::numeric_limits::max(), ++ std::numeric_limits::infinity() ++ }; ++ // clang-format on ++ ++ __ Fld_d(f8, MemOperand(a0, offsetof(T, a))); ++ __ Fld_s(f9, MemOperand(a0, offsetof(T, b))); ++ __ Floor_d(f10, f8); ++ __ Floor_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_floor_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_floor_b))); ++ __ Ceil_d(f10, f8); ++ __ Ceil_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_ceil_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_ceil_b))); ++ __ Trunc_d(f10, f8); ++ __ Trunc_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_trunc_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_trunc_b))); ++ __ Round_d(f10, f8); ++ __ Round_s(f11, f9); ++ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_round_a))); ++ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_round_b))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ for (int i = 0; i < kTableLength; i++) { ++ t.a = inputs_d[i]; ++ t.b = inputs_s[i]; ++ f.Call(&t, 0, 0, 0, 0); ++ CHECK_EQ(t.result_floor_a, outputs_floor_d[i]); ++ CHECK_EQ(t.result_floor_b, outputs_floor_s[i]); ++ CHECK_EQ(t.result_ceil_a, outputs_ceil_d[i]); ++ CHECK_EQ(t.result_ceil_b, outputs_ceil_s[i]); ++ CHECK_EQ(t.result_trunc_a, outputs_trunc_d[i]); ++ CHECK_EQ(t.result_trunc_b, outputs_trunc_s[i]); ++ CHECK_EQ(t.result_round_a, outputs_round_d[i]); ++ CHECK_EQ(t.result_round_b, outputs_round_s[i]); ++ } ++} ++ ++uint64_t run_ExtractBits(uint64_t source, int pos, int size, bool sign_extend) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ if (sign_extend) { ++ __ ExtractBits(t0, a0, a1, size, true); ++ } else { ++ __ ExtractBits(t0, a0, a1, size); ++ } ++ __ or_(a0, t0, zero_reg); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(source, pos, 0, 0, 0)); ++ return res; ++} ++ ++TEST(ExtractBits) { ++ CcTest::InitializeVM(); ++ ++ struct TestCase { ++ uint64_t source; ++ int pos; ++ int size; ++ bool sign_extend; ++ uint64_t res; ++ }; ++ ++ // clang-format off ++ struct TestCase tc[] = { ++ //source, pos, size, sign_extend, res; ++ {0x800, 4, 8, false, 0x80}, ++ {0x800, 4, 8, true, 0xFFFFFFFFFFFFFF80}, ++ {0x800, 5, 8, true, 0x40}, ++ {0x40000, 3, 16, false, 0x8000}, ++ {0x40000, 3, 16, true, 0xFFFFFFFFFFFF8000}, ++ {0x40000, 4, 16, true, 0x4000}, ++ {0x200000000, 2, 32, false, 0x80000000}, ++ {0x200000000, 2, 32, true, 0xFFFFFFFF80000000}, ++ {0x200000000, 3, 32, true, 0x40000000}, ++ }; ++ // clang-format on ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t result = ++ run_ExtractBits(tc[i].source, tc[i].pos, tc[i].size, tc[i].sign_extend); ++ CHECK_EQ(tc[i].res, result); ++ } ++} ++ ++uint64_t run_InsertBits(uint64_t dest, uint64_t source, int pos, int size) { ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ __ InsertBits(a0, a1, a2, size); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ uint64_t res = reinterpret_cast(f.Call(dest, source, pos, 0, 0)); ++ return res; ++} ++ ++TEST(InsertBits) { ++ CcTest::InitializeVM(); ++ ++ struct TestCase { ++ uint64_t dest; ++ uint64_t source; ++ int pos; ++ int size; ++ uint64_t res; ++ }; ++ ++ // clang-format off ++ struct TestCase tc[] = { ++ //dest source, pos, size, res; ++ {0x11111111, 0x1234, 32, 16, 0x123411111111}, ++ {0x111111111111, 0xFFFFF, 24, 10, 0x1113FF111111}, ++ {0x1111111111111111, 0xFEDCBA, 16, 4, 0x11111111111A1111}, ++ }; ++ // clang-format on ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ uint64_t result = ++ run_InsertBits(tc[i].dest, tc[i].source, tc[i].pos, tc[i].size); ++ CHECK_EQ(tc[i].res, result); ++ } ++} ++ ++TEST(Popcnt) { ++ CcTest::InitializeVM(); ++ Isolate* isolate = CcTest::i_isolate(); ++ HandleScope scope(isolate); ++ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); ++ MacroAssembler* masm = &assembler; ++ ++ struct TestCase { ++ uint32_t a; ++ uint64_t b; ++ int expected_a; ++ int expected_b; ++ int result_a; ++ int result_b; ++ }; ++ // clang-format off ++ struct TestCase tc[] = { ++ { 0x12345678, 0x1122334455667788, 13, 26, 0, 0}, ++ { 0x1234, 0x123456, 5, 9, 0, 0}, ++ { 0xFFF00000, 0xFFFF000000000000, 12, 16, 0, 0}, ++ { 0xFF000012, 0xFFFF000000001234, 10, 21, 0, 0} ++ }; ++ // clang-format on ++ ++ __ Ld_w(t0, MemOperand(a0, offsetof(TestCase, a))); ++ __ Ld_d(t1, MemOperand(a0, offsetof(TestCase, b))); ++ __ Popcnt_w(t2, t0); ++ __ Popcnt_d(t3, t1); ++ __ St_w(t2, MemOperand(a0, offsetof(TestCase, result_a))); ++ __ St_w(t3, MemOperand(a0, offsetof(TestCase, result_b))); ++ __ jirl(zero_reg, ra, 0); ++ ++ CodeDesc desc; ++ masm->GetCode(isolate, &desc); ++ Handle code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); ++ auto f = GeneratedCode::FromCode(*code); ++ ++ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase); ++ for (size_t i = 0; i < nr_test_cases; ++i) { ++ f.Call(&tc[i], 0, 0, 0, 0); ++ CHECK_EQ(tc[i].expected_a, tc[i].result_a); ++ CHECK_EQ(tc[i].expected_b, tc[i].result_b); ++ } ++} ++ ++#undef __ ++ ++} // namespace internal ++} // namespace v8 +diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py +index 9d5cbf05..e4ff98c0 100755 +--- a/deps/v8/tools/dev/gm.py ++++ b/deps/v8/tools/dev/gm.py +@@ -39,7 +39,7 @@ BUILD_TARGETS_ALL = ["all"] + + # All arches that this script understands. + ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", +- "s390", "s390x", "android_arm", "android_arm64"] ++ "s390", "s390x", "android_arm", "android_arm64", "loong64"] + # Arches that get built/run when you don't specify any. + DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"] + # Modes that this script understands. +@@ -246,7 +246,7 @@ class Config(object): + if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\"" + if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\"" + if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", +- "s390", "s390x"): ++ "s390", "s390x", "loong64"): + return "\nv8_target_cpu = \"%s\"" % self.arch + return "" + +diff --git a/doc/api/process.md b/doc/api/process.md +index 601fc892..2b61a4e2 100644 +--- a/doc/api/process.md ++++ b/doc/api/process.md +@@ -636,8 +636,8 @@ added: v0.5.0 + * {string} + + The operating system CPU architecture for which the Node.js binary was compiled. +-Possible values are: `'arm'`, `'arm64'`, `'ia32'`, `'mips'`,`'mipsel'`, `'ppc'`, +-`'ppc64'`, `'s390'`, `'s390x'`, `'x32'`, and `'x64'`. ++Possible values are: `'arm'`, `'arm64'`, `'ia32'`, `'mips'`,`'mipsel'`, `'loong64'`, ++`ppc'`, `'ppc64'`, `'s390'`, `'s390x'`, `'x32'`, and `'x64'`. + + ```js + console.log(`This processor architecture is ${process.arch}`); +diff --git a/test/fixtures/wasi/subdir/input_link.txt b/test/fixtures/wasi/subdir/input_link.txt +new file mode 100644 +index 00000000..4c380537 +--- /dev/null ++++ b/test/fixtures/wasi/subdir/input_link.txt +@@ -0,0 +1 @@ ++hello from input.txt +diff --git a/test/fixtures/wasi/subdir/outside.txt b/test/fixtures/wasi/subdir/outside.txt +new file mode 100644 +index 00000000..044c4b96 +--- /dev/null ++++ b/test/fixtures/wasi/subdir/outside.txt +@@ -0,0 +1,2 @@ ++this file is part of the WASI tests. it exists outside of the sandbox, and ++should be inaccessible from the WASI tests. +diff --git a/tools/inspector_protocol/lib/Maybe_h.template b/tools/inspector_protocol/lib/Maybe_h.template +index 22cfac6b..d1c19bed 100644 +--- a/tools/inspector_protocol/lib/Maybe_h.template ++++ b/tools/inspector_protocol/lib/Maybe_h.template +@@ -32,10 +32,14 @@ + #define IP_TARGET_ARCH_MIPS 1 + #endif + ++#if defined(__loongarch64) ++#define IP_TARGET_ARCH_LOONG64 1 ++#endif ++ + // Allowing the use of noexcept by removing the keyword on older compilers that + // do not support adding noexcept to default members. + #if ((IP_GNUC_PREREQ(4, 9, 0) && !defined(IP_TARGET_ARCH_MIPS) && \ +- !defined(IP_TARGET_ARCH_MIPS64)) || \ ++ !defined(IP_TARGET_ARCH_MIPS64)) || !defined(IP_TARGET_ARCH_LOONG64) || \ + (defined(__clang__) && __cplusplus > 201300L)) + #define IP_NOEXCEPT noexcept + #else +@@ -134,6 +138,7 @@ public: + #undef IP_GNUC_PREREQ + #undef IP_TARGET_ARCH_MIPS64 + #undef IP_TARGET_ARCH_MIPS ++#undef IP_TARGET_ARCH_LOONG64 + #undef IP_NOEXCEPT + + #endif // !defined({{"_".join(config.protocol.namespace)}}_Maybe_h) +diff --git a/tools/v8_gypfiles/toolchain.gypi b/tools/v8_gypfiles/toolchain.gypi +index d4bad70d..53cabb67 100644 +--- a/tools/v8_gypfiles/toolchain.gypi ++++ b/tools/v8_gypfiles/toolchain.gypi +@@ -933,6 +933,69 @@ + }], #'_toolset=="host" + ], + }], # v8_target_arch=="mips64el" ++ ['v8_target_arch=="loong64"', { ++ 'defines': [ ++ 'V8_TARGET_ARCH_LOONG64', ++ ], ++ 'conditions': [ ++ [ 'v8_can_use_fpu_instructions=="true"', { ++ 'defines': [ ++ 'CAN_USE_FPU_INSTRUCTIONS', ++ ], ++ }], ++ ], ++ 'target_conditions': [ ++ ['_toolset=="target"', { ++ 'conditions': [ ++ ['v8_target_arch==target_arch', { ++ # Target built with a Mips CXX compiler. ++ 'variables': { ++ 'ldso_path%': '= %{npm_epoch}:%{npm_version}-%{npm_release}%{?dist} +Requires: npm = %{npm_epoch}:%{npm_version}-%{npm_release}%{anolis_release}%{?dist} %else -Recommends: npm >= %{npm_epoch}:%{npm_version}-%{npm_release}%{?dist} +Recommends: npm = %{npm_epoch}:%{npm_version}-%{npm_release}%{anolis_release}%{?dist} %endif %description @@ -296,7 +300,7 @@ real-time applications that run across distributed devices. %package devel Summary: JavaScript runtime - development headers Group: Development/Languages -Requires: %{name}%{?_isa} = %{epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} +Requires: %{name}%{?_isa} = %{epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} Requires: openssl-devel%{?_isa} Requires: zlib-devel%{?_isa} Requires: brotli-devel%{?_isa} @@ -312,7 +316,7 @@ Development headers for the Node.js JavaScript runtime. %package full-i18n Summary: Non-English locale data for Node.js -Requires: %{name}%{?_isa} = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} +Requires: %{name}%{?_isa} = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} %description full-i18n Optional data files to provide full-icu support for Node.js. Remove this @@ -323,16 +327,16 @@ package to save space if non-English locales are not needed. Summary: Node.js Package Manager Epoch: %{npm_epoch} Version: %{npm_version} -Release: %{npm_release}%{?dist} +Release: %{npm_release}%{anolis_release}%{?dist} # We used to ship npm separately, but it is so tightly integrated with Node.js # (and expected to be present on all Node.js systems) that we ship it bundled # now. Obsoletes: npm < 0:3.5.4-6 Provides: npm = %{npm_epoch}:%{npm_version} -Requires: nodejs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} +Requires: nodejs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} %if 0%{?fedora} || 0%{?rhel} >= 8 -Recommends: nodejs-docs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} +Recommends: nodejs-docs = %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} %endif # Do not add epoch to the virtual NPM provides or it will break @@ -352,8 +356,8 @@ BuildArch: noarch # We don't require that the main package be installed to # use the docs, but if it is installed, make sure the # version always matches -Conflicts: %{name} > %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} -Conflicts: %{name} < %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{?dist} +Conflicts: %{name} > %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} +Conflicts: %{name} < %{nodejs_epoch}:%{nodejs_version}-%{nodejs_release}%{anolis_release}%{?dist} %description docs The API documentation for the Node.js JavaScript runtime. @@ -696,6 +700,9 @@ end %changelog +* Thu Apr 13 2023 Shi Pujin - 1:14.21.3-1.0.1 +- add LoongArch support + * Mon Mar 06 2023 Jan Staněk - 1:14.21.3-1 - Rebase to 14.21.3 Resolves: rhbz#2153712 -- Gitee From 536c1304704e78a85846db4a73b210cb5d09a787 Mon Sep 17 00:00:00 2001 From: Shi Pujin Date: Mon, 31 Oct 2022 17:32:44 +0800 Subject: [PATCH 3/3] fix build --- 0001-add-LoongArch-support.patch | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/0001-add-LoongArch-support.patch b/0001-add-LoongArch-support.patch index 828b05d..df51ac8 100644 --- a/0001-add-LoongArch-support.patch +++ b/0001-add-LoongArch-support.patch @@ -1,4 +1,4 @@ -From 5bdab65c2e83c01c048915d0870c83efa4b51294 Mon Sep 17 00:00:00 2001 +From 998f865384dc7d927f79ac8c68dd7fe12d5e8c5a Mon Sep 17 00:00:00 2001 From: Shi Pujin Date: Wed, 26 Oct 2022 15:07:55 +0800 Subject: [PATCH] add LoongArch support @@ -41410,7 +41410,7 @@ index 00000000..701b4b8e +#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h new file mode 100644 -index 00000000..9eae614b +index 00000000..085cc34b --- /dev/null +++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h @@ -0,0 +1,1849 @@ @@ -41816,7 +41816,7 @@ index 00000000..9eae614b +void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, + ValueType type) { + DCHECK_NE(dst_offset, src_offset); -+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); ++ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); + Fill(reg, src_offset, type); + Spill(dst_offset, reg, type); +} @@ -41866,13 +41866,13 @@ index 00000000..9eae614b + MemOperand dst = liftoff::GetStackSlot(offset); + switch (value.type().kind()) { + case ValueType::kI32: { -+ LiftoffRegister tmp = GetUnusedRegister(kGpReg); ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); + TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); + St_w(tmp.gp(), dst); + break; + } + case ValueType::kI64: { -+ LiftoffRegister tmp = GetUnusedRegister(kGpReg); ++ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); + TurboAssembler::li(tmp.gp(), value.to_i64()); + St_d(tmp.gp(), dst); + break; @@ -43085,7 +43085,7 @@ index 00000000..9eae614b +} + +void LiftoffAssembler::CallTrapCallbackForTesting() { -+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp()); ++ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); + CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); +} + -- Gitee