From 3e176cc9c947ec01c51376300c5b1246c1f70d29 Mon Sep 17 00:00:00 2001 From: Guoqi Chen Date: Fri, 16 Dec 2022 11:28:02 +0800 Subject: [PATCH] Add loongarch64 base support patches. Signed-off-by: Guoqi Chen --- apply-patches | 13 + golang.spec | 26 +- loongarch64.conf | 82 + ...md-internal-sys-declare-loong64-arch.patch | 57 + ...-fix-placement-of-loong64-definition.patch | 87 + ...rnal-add-loong64-constant-definition.patch | 66 + ...configure-go-tool-workflow-for-loong.patch | 39 + ...ster-loong64.Init-function-for-compi.patch | 34 + ...rnal-loong64-implement-Init-function.patch | 984 ++ ...rnal-ssa-config-lower-pass-function-.patch | 36 + ...rnal-ssa-increase-the-bit-width-of-B.patch | 27 + ...rnal-ssa-gen-define-rules-and-operat.patch | 12601 ++++++++++++++++ ...rnal-ssa-inline-memmove-with-known-s.patch | 27 + ...rnal-ssa-add-support-on-loong64-for-.patch | 37 + ...rnal-ssagen-enable-intrinsic-operati.patch | 172 + ...e-internal-fix-test-error-on-loong64.patch | 44 + ...-instructions-and-registers-for-loon.patch | 3323 ++++ ...-helper-function-and-end-to-end-test.patch | 812 + ...abi-cmd-link-support-linker-for-linu.patch | 750 + ...p-for-linux-loong64-and-implement-ru.patch | 989 ++ ...-load-save-TLS-variable-g-on-loong64.patch | 68 + ...e-implement-signal-for-linux-loong64.patch | 454 + ...ntime-support-vdso-for-linux-loong64.patch | 114 + ...t-duffzero-duffcopy-for-linux-loong6.patch | 979 ++ ...ement-asyncPreempt-for-linux-loong64.patch | 231 + ...ort-memclr-memmove-for-linux-loong64.patch | 178 + ...t-syscalls-for-runtime-bootstrap-on-.patch | 626 + ...d-tag-for-common-support-on-linux-lo.patch | 61 + ...e-fix-runtime-test-error-for-loong64.patch | 26 + ...ernal-add-atomic-support-for-loong64.patch | 414 + ...d-cgo-configure-cgo-tool-for-loong64.patch | 47 + ...cgo-function-call-support-for-loong6.patch | 250 + ...mp-cmd-pprof-disassembly-is-not-supp.patch | 55 + ...d-dist-support-dist-tool-for-loong64.patch | 88 + ...e-vendored-golang.org-x-sys-to-suppo.patch | 2740 ++++ ...e-vendored-golang.org-x-tools-to-sup.patch | 52 + ...-support-basic-byte-operation-on-loo.patch | 297 + ...-reflect-vendor-support-standard-lib.patch | 390 + ...dd-syscall-support-for-linux-loong64.patch | 5006 ++++++ ...all-unix-loong64-use-generic-syscall.patch | 45 + ...misc-test-fix-test-error-for-loong64.patch | 93 + ...-copyright-add-Loongson-into-AUTHORS.patch | 25 + ...040-api-fix-check-errors-for-loong64.patch | 126 + ...nc-breakpoint-implementation-on-loon.patch | 30 + ...r-golang.org-x-sys-for-byteorder-fix.patch | 26 + ...ve-atomic-Cas-Xchg-and-Xadd-intrinsi.patch | 68 + ...cPreempt-implementation-for-errors-o.patch | 101 + ...-add-FuncInfo-SPWRITE-flag-for-linux.patch | 50 + ...ing-TOPFRAME-NOFRAME-flag-for-linux-.patch | 48 + ...loong64-constant-folding-in-division.patch | 148 + ...x-the-vDSO-symbol-version-on-loong64.patch | 39 + ...pu-fix-cpu-cacheLineSize-for-loong64.patch | 36 + ...internal-syscall-always-zero-the-hig.patch | 68 + ...up-unused-function-gosave-on-loong64.patch | 39 + ...d-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch | 85 + ...seless-constant-definitions-SiginfoM.patch | 29 + ...ve-the-resultInArg0-register-checks-.patch | 31 + ...he-fake-mstart-caller-in-systemstack.patch | 39 + ...or-refactoring-of-_rt0_loong64_linux.patch | 39 + ...iteBarrier-frame-size-on-loong64-mip.patch | 55 + ...-loong64-remove-invalid-branch-delay.patch | 48 + ...internal-obj-rename-loong64-instruct.patch | 208 + ...plement-Sqrt-in-assembly-for-loong64.patch | 74 + ...ME-L-H-.W-RDTIME.D-support-for-loong.patch | 114 + ...leCounter-implement-cputicks-on-linu.patch | 82 + ...-add-new-style-LoongArch-reloc-types.patch | 213 + ...ze-the-new-R_LARCH_32_PCREL-type-on-.patch | 39 + ...-fix-runtime.usleep-on-linux-loong64.patch | 55 + ...j-remove-redundant-cnames-on-loong64.patch | 30 + ...ch-g-register-during-VDSO-on-loong64.patch | 113 + ...tore-callee-saved-registers-in-loong.patch | 222 + ...me-add-comment-for-sys_linux_loong64.patch | 211 + ...ort-for-buildmode-c-shared-on-loong6.patch | 142 + ...support-for-buildmode-c-shared-on-lo.patch | 899 ++ ...-loong64-cmd-internal-objabi-add-c-s.patch | 323 + ...port-for-buildmode-c-shared-on-loong.patch | 264 + ...s-enable-c-shared-feature-on-loong64.patch | 27 + ...o-testcshared-enable-c-shared-test-o.patch | 56 + ...-loong64-add-MASKEQZ-and-MASKNEZ-ins.patch | 97 + ...-loong64-add-ROTR-ROTRV-instructions.patch | 157 + ...-compile-intrinsify-Add64-on-loong64.patch | 277 + ...-compile-intrinsify-Sub64-on-loong64.patch | 186 + ...tect-glibc-vs-musl-ldso-at-link-time.patch | 265 + ...estack_noctxt-SPWRITE-for-linux-loon.patch | 36 + ...dist-cmd-go-enable-pie-buildmode-for.patch | 57 + 85 files changed, 37693 insertions(+), 4 deletions(-) create mode 100644 apply-patches create mode 100644 loongarch64.conf create mode 100644 loongarch64/0001-cmd-internal-sys-declare-loong64-arch.patch create mode 100644 loongarch64/0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch create mode 100644 loongarch64/0003-internal-add-loong64-constant-definition.patch create mode 100644 loongarch64/0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch create mode 100644 loongarch64/0005-cmd-compile-register-loong64.Init-function-for-compi.patch create mode 100644 loongarch64/0006-cmd-compile-internal-loong64-implement-Init-function.patch create mode 100644 loongarch64/0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch create mode 100644 loongarch64/0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch create mode 100644 loongarch64/0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch create mode 100644 loongarch64/0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch create mode 100644 loongarch64/0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch create mode 100644 loongarch64/0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch create mode 100644 loongarch64/0013-cmd-compile-internal-fix-test-error-on-loong64.patch create mode 100644 loongarch64/0014-cmd-internal-obj-instructions-and-registers-for-loon.patch create mode 100644 loongarch64/0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch create mode 100644 loongarch64/0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch create mode 100644 loongarch64/0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch create mode 100644 loongarch64/0018-runtime-load-save-TLS-variable-g-on-loong64.patch create mode 100644 loongarch64/0019-runtime-implement-signal-for-linux-loong64.patch create mode 100644 loongarch64/0020-runtime-support-vdso-for-linux-loong64.patch create mode 100644 loongarch64/0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch create mode 100644 loongarch64/0022-runtime-implement-asyncPreempt-for-linux-loong64.patch create mode 100644 loongarch64/0023-runtime-support-memclr-memmove-for-linux-loong64.patch create mode 100644 loongarch64/0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch create mode 100644 loongarch64/0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch create mode 100644 loongarch64/0026-runtime-fix-runtime-test-error-for-loong64.patch create mode 100644 loongarch64/0027-runtime-internal-add-atomic-support-for-loong64.patch create mode 100644 loongarch64/0028-cmd-cgo-configure-cgo-tool-for-loong64.patch create mode 100644 loongarch64/0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch create mode 100644 loongarch64/0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch create mode 100644 loongarch64/0031-cmd-dist-support-dist-tool-for-loong64.patch create mode 100644 loongarch64/0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch create mode 100644 loongarch64/0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch create mode 100644 loongarch64/0034-internal-bytealg-support-basic-byte-operation-on-loo.patch create mode 100644 loongarch64/0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch create mode 100644 loongarch64/0036-syscall-add-syscall-support-for-linux-loong64.patch create mode 100644 loongarch64/0037-internal-syscall-unix-loong64-use-generic-syscall.patch create mode 100644 loongarch64/0038-misc-test-fix-test-error-for-loong64.patch create mode 100644 loongarch64/0039-copyright-add-Loongson-into-AUTHORS.patch create mode 100644 loongarch64/0040-api-fix-check-errors-for-loong64.patch create mode 100644 loongarch64/0041-runtime-fixed-func-breakpoint-implementation-on-loon.patch create mode 100644 loongarch64/0042-update-vendor-golang.org-x-sys-for-byteorder-fix.patch create mode 100644 loongarch64/0043-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch create mode 100644 loongarch64/0044-runtime-fix-asyncPreempt-implementation-for-errors-o.patch create mode 100644 loongarch64/0045-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch create mode 100644 loongarch64/0046-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch create mode 100644 loongarch64/0047-cmd-compile-fix-loong64-constant-folding-in-division.patch create mode 100644 loongarch64/0048-runtime-fix-the-vDSO-symbol-version-on-loong64.patch create mode 100644 loongarch64/0049-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch create mode 100644 loongarch64/0050-syscall-runtime-internal-syscall-always-zero-the-hig.patch create mode 100644 loongarch64/0051-runtime-clean-up-unused-function-gosave-on-loong64.patch create mode 100644 loongarch64/0052-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch create mode 100644 loongarch64/0053-runtime-delete-useless-constant-definitions-SiginfoM.patch create mode 100644 loongarch64/0054-cmd-compile-remove-the-resultInArg0-register-checks-.patch create mode 100644 loongarch64/0055-runtime-remove-the-fake-mstart-caller-in-systemstack.patch create mode 100644 loongarch64/0056-runtime-minor-refactoring-of-_rt0_loong64_linux.patch create mode 100644 loongarch64/0057-runtime-fix-gcWriteBarrier-frame-size-on-loong64-mip.patch create mode 100644 loongarch64/0058-cmd-internal-obj-loong64-remove-invalid-branch-delay.patch create mode 100644 loongarch64/0059-cmd-compile-cmd-internal-obj-rename-loong64-instruct.patch create mode 100644 loongarch64/0060-math-implement-Sqrt-in-assembly-for-loong64.patch create mode 100644 loongarch64/0061-cmd-asm-add-RDTIME-L-H-.W-RDTIME.D-support-for-loong.patch create mode 100644 loongarch64/0062-runtime-use-StableCounter-implement-cputicks-on-linu.patch create mode 100644 loongarch64/0063-debug-elf-add-new-style-LoongArch-reloc-types.patch create mode 100644 loongarch64/0064-cmd-link-recognize-the-new-R_LARCH_32_PCREL-type-on-.patch create mode 100644 loongarch64/0065-runtime-fix-runtime.usleep-on-linux-loong64.patch create mode 100644 loongarch64/0066-cmd-internal-obj-remove-redundant-cnames-on-loong64.patch create mode 100644 loongarch64/0067-runtime-save-fetch-g-register-during-VDSO-on-loong64.patch create mode 100644 loongarch64/0068-runtime-save-restore-callee-saved-registers-in-loong.patch create mode 100644 loongarch64/0069-runtime-add-comment-for-sys_linux_loong64.patch create mode 100644 loongarch64/0070-runtime-add-support-for-buildmode-c-shared-on-loong6.patch create mode 100644 loongarch64/0071-cmd-compile-add-support-for-buildmode-c-shared-on-lo.patch create mode 100644 loongarch64/0072-cmd-internal-obj-loong64-cmd-internal-objabi-add-c-s.patch create mode 100644 loongarch64/0073-cmd-link-add-support-for-buildmode-c-shared-on-loong.patch create mode 100644 loongarch64/0074-cmd-internal-sys-enable-c-shared-feature-on-loong64.patch create mode 100644 loongarch64/0075-cmd-dist-misc-cgo-testcshared-enable-c-shared-test-o.patch create mode 100644 loongarch64/0076-cmd-internal-obj-loong64-add-MASKEQZ-and-MASKNEZ-ins.patch create mode 100644 loongarch64/0077-cmd-internal-obj-loong64-add-ROTR-ROTRV-instructions.patch create mode 100644 loongarch64/0078-cmd-compile-intrinsify-Add64-on-loong64.patch create mode 100644 loongarch64/0079-cmd-compile-intrinsify-Sub64-on-loong64.patch create mode 100644 loongarch64/0080-cmd-link-detect-glibc-vs-musl-ldso-at-link-time.patch create mode 100644 loongarch64/0081-runtime-mark-morestack_noctxt-SPWRITE-for-linux-loon.patch create mode 100644 loongarch64/0082-cmd-compile-cmd-dist-cmd-go-enable-pie-buildmode-for.patch diff --git a/apply-patches b/apply-patches new file mode 100644 index 0000000..21931cc --- /dev/null +++ b/apply-patches @@ -0,0 +1,13 @@ +#!/bin/bash + +set -ex + +if [ ! -d loongarch64 ]; then + tar -xf loongarch64.tar.gz +fi + +for p in $(cat loongarch64.conf); do + patch -p1 -s -i loongarch64/$p +done + +rm -rf $0 loongarch64 loongarch64.tar.gz loongarch64.conf diff --git a/golang.spec b/golang.spec index 62640cc..1fab441 100644 --- a/golang.spec +++ b/golang.spec @@ -12,19 +12,19 @@ %define __find_requires %{nil} %bcond_with bootstrap -%ifarch x86_64 aarch64 riscv64 +%ifarch x86_64 aarch64 riscv64 loongarch64 %bcond_without ignore_tests %else %bcond_with ignore_tests %endif -%ifarch x86_64 aarch64 riscv64 +%ifarch x86_64 aarch64 riscv64 loongarch64 %global external_linker 1 %else %global external_linker 0 %endif -%ifarch x86_64 aarch64 riscv64 +%ifarch x86_64 aarch64 riscv64 loongarch64 %global cgo_enabled 1 %else %global cgo_enabled 0 @@ -59,15 +59,23 @@ %ifarch riscv64 %global gohostarch riscv64 %endif +%ifarch loongarch64 +%global gohostarch loong64 +%endif Name: golang Version: 1.17.3 -Release: 12 +Release: 13 Summary: The Go Programming Language License: BSD and Public Domain URL: https://golang.org/ Source0: https://dl.google.com/go/go1.17.3.src.tar.gz +%ifarch loongarch64 +Source1: loongarch64.tar.gz +Source2: loongarch64.conf +Source3: apply-patches +%endif %if !%{golang_bootstrap} BuildRequires: gcc-go >= 5 @@ -219,6 +227,13 @@ end %prep %autosetup -n go -p1 +%ifarch loongarch64 +cp %{SOURCE1} . +cp %{SOURCE2} . +cp %{SOURCE3} . +sh ./apply-patches +%endif + %build uname -a cat /proc/cpuinfo @@ -411,6 +426,9 @@ fi %files devel -f go-tests.list -f go-misc.list -f go-src.list %changelog +* Thu Dec 15 2022 chenguoqi - 1.17.3-13 +- Add loongarch64 base support + * Fri Oct 11 2022 hanchao - 1.17.3-12 - Type:CVE - CVE:CVE-2022-41716 diff --git a/loongarch64.conf b/loongarch64.conf new file mode 100644 index 0000000..bd53f85 --- /dev/null +++ b/loongarch64.conf @@ -0,0 +1,82 @@ +0001-cmd-internal-sys-declare-loong64-arch.patch +0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch +0003-internal-add-loong64-constant-definition.patch +0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch +0005-cmd-compile-register-loong64.Init-function-for-compi.patch +0006-cmd-compile-internal-loong64-implement-Init-function.patch +0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch +0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch +0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch +0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch +0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch +0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch +0013-cmd-compile-internal-fix-test-error-on-loong64.patch +0014-cmd-internal-obj-instructions-and-registers-for-loon.patch +0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch +0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch +0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch +0018-runtime-load-save-TLS-variable-g-on-loong64.patch +0019-runtime-implement-signal-for-linux-loong64.patch +0020-runtime-support-vdso-for-linux-loong64.patch +0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch +0022-runtime-implement-asyncPreempt-for-linux-loong64.patch +0023-runtime-support-memclr-memmove-for-linux-loong64.patch +0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch +0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch +0026-runtime-fix-runtime-test-error-for-loong64.patch +0027-runtime-internal-add-atomic-support-for-loong64.patch +0028-cmd-cgo-configure-cgo-tool-for-loong64.patch +0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch +0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch +0031-cmd-dist-support-dist-tool-for-loong64.patch +0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch +0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch +0034-internal-bytealg-support-basic-byte-operation-on-loo.patch +0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch +0036-syscall-add-syscall-support-for-linux-loong64.patch +0037-internal-syscall-unix-loong64-use-generic-syscall.patch +0038-misc-test-fix-test-error-for-loong64.patch +0039-copyright-add-Loongson-into-AUTHORS.patch +0040-api-fix-check-errors-for-loong64.patch +0041-runtime-fixed-func-breakpoint-implementation-on-loon.patch +0042-update-vendor-golang.org-x-sys-for-byteorder-fix.patch +0043-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch +0044-runtime-fix-asyncPreempt-implementation-for-errors-o.patch +0045-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch +0046-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch +0047-cmd-compile-fix-loong64-constant-folding-in-division.patch +0048-runtime-fix-the-vDSO-symbol-version-on-loong64.patch +0049-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch +0050-syscall-runtime-internal-syscall-always-zero-the-hig.patch +0051-runtime-clean-up-unused-function-gosave-on-loong64.patch +0052-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch +0053-runtime-delete-useless-constant-definitions-SiginfoM.patch +0054-cmd-compile-remove-the-resultInArg0-register-checks-.patch +0055-runtime-remove-the-fake-mstart-caller-in-systemstack.patch +0056-runtime-minor-refactoring-of-_rt0_loong64_linux.patch +0057-runtime-fix-gcWriteBarrier-frame-size-on-loong64-mip.patch +0058-cmd-internal-obj-loong64-remove-invalid-branch-delay.patch +0059-cmd-compile-cmd-internal-obj-rename-loong64-instruct.patch +0060-math-implement-Sqrt-in-assembly-for-loong64.patch +0061-cmd-asm-add-RDTIME-L-H-.W-RDTIME.D-support-for-loong.patch +0062-runtime-use-StableCounter-implement-cputicks-on-linu.patch +0063-debug-elf-add-new-style-LoongArch-reloc-types.patch +0064-cmd-link-recognize-the-new-R_LARCH_32_PCREL-type-on-.patch +0065-runtime-fix-runtime.usleep-on-linux-loong64.patch +0066-cmd-internal-obj-remove-redundant-cnames-on-loong64.patch +0067-runtime-save-fetch-g-register-during-VDSO-on-loong64.patch +0068-runtime-save-restore-callee-saved-registers-in-loong.patch +0069-runtime-add-comment-for-sys_linux_loong64.patch +0070-runtime-add-support-for-buildmode-c-shared-on-loong6.patch +0071-cmd-compile-add-support-for-buildmode-c-shared-on-lo.patch +0072-cmd-internal-obj-loong64-cmd-internal-objabi-add-c-s.patch +0073-cmd-link-add-support-for-buildmode-c-shared-on-loong.patch +0074-cmd-internal-sys-enable-c-shared-feature-on-loong64.patch +0075-cmd-dist-misc-cgo-testcshared-enable-c-shared-test-o.patch +0076-cmd-internal-obj-loong64-add-MASKEQZ-and-MASKNEZ-ins.patch +0077-cmd-internal-obj-loong64-add-ROTR-ROTRV-instructions.patch +0078-cmd-compile-intrinsify-Add64-on-loong64.patch +0079-cmd-compile-intrinsify-Sub64-on-loong64.patch +0080-cmd-link-detect-glibc-vs-musl-ldso-at-link-time.patch +0081-runtime-mark-morestack_noctxt-SPWRITE-for-linux-loon.patch +0082-cmd-compile-cmd-dist-cmd-go-enable-pie-buildmode-for.patch diff --git a/loongarch64/0001-cmd-internal-sys-declare-loong64-arch.patch b/loongarch64/0001-cmd-internal-sys-declare-loong64-arch.patch new file mode 100644 index 0000000..ea1e421 --- /dev/null +++ b/loongarch64/0001-cmd-internal-sys-declare-loong64-arch.patch @@ -0,0 +1,57 @@ +From 24c67e919a28cd2e8874fdd18737d640fdb0f2f1 Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Sun, 20 Jun 2021 12:59:45 +0800 +Subject: [PATCH 01/82] cmd/internal/sys: declare loong64 arch + +Updates #46229 + +Change-Id: Icb736f2440443e9245872b091d13e5bdfb6cb01a +Reviewed-on: https://go-review.googlesource.com/c/go/+/339009 +Reviewed-by: Meng Zhuo +Reviewed-by: Cherry Mui +Trust: Meng Zhuo +Trust: Michael Knyszek +Run-TryBot: Meng Zhuo +TryBot-Result: Go Bot +--- + src/cmd/internal/sys/arch.go | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/src/cmd/internal/sys/arch.go b/src/cmd/internal/sys/arch.go +index a3e39768b6..e7063fde89 100644 +--- a/src/cmd/internal/sys/arch.go ++++ b/src/cmd/internal/sys/arch.go +@@ -22,6 +22,7 @@ const ( + RISCV64 + S390X + Wasm ++ Loong64 + ) + + // Arch represents an individual architecture. +@@ -189,6 +190,16 @@ var ArchWasm = &Arch{ + Alignment: 1, + } + ++var ArchLoong64 = &Arch{ ++ Name: "loong64", ++ Family: Loong64, ++ ByteOrder: binary.LittleEndian, ++ PtrSize: 8, ++ RegSize: 8, ++ MinLC: 4, ++ Alignment: 8, // Unaligned accesses are not guaranteed to be fast ++} ++ + var Archs = [...]*Arch{ + Arch386, + ArchAMD64, +@@ -203,4 +214,5 @@ var Archs = [...]*Arch{ + ArchRISCV64, + ArchS390X, + ArchWasm, ++ ArchLoong64, + } +-- +2.38.0 + diff --git a/loongarch64/0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch b/loongarch64/0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch new file mode 100644 index 0000000..9ec4103 --- /dev/null +++ b/loongarch64/0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch @@ -0,0 +1,87 @@ +From 67ae3a354f9eee281c46369973373d9738babdf0 Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Mon, 6 Sep 2021 15:17:11 +0800 +Subject: [PATCH 02/82] cmd/internal/sys: fix placement of loong64 definition + +In rebasing the patch series up to CL 339015, the branches were messed +up by me, and changes from v3 to v4 of CL 339009 was lost. Fix the +ordering to restore alphabetical order per original review. + +Change-Id: I8e57c96e996c4f962cab684a9d305a8dbdeea43b +Reviewed-on: https://go-review.googlesource.com/c/go/+/347731 +Run-TryBot: Tobias Klauser +TryBot-Result: Go Bot +Reviewed-by: Ian Lance Taylor +Trust: Meng Zhuo +--- + src/cmd/internal/sys/arch.go | 24 ++++++++++++------------ + 1 file changed, 12 insertions(+), 12 deletions(-) + +diff --git a/src/cmd/internal/sys/arch.go b/src/cmd/internal/sys/arch.go +index e7063fde89..4b2b4c38a0 100644 +--- a/src/cmd/internal/sys/arch.go ++++ b/src/cmd/internal/sys/arch.go +@@ -16,13 +16,13 @@ const ( + ARM + ARM64 + I386 ++ Loong64 + MIPS + MIPS64 + PPC64 + RISCV64 + S390X + Wasm +- Loong64 + ) + + // Arch represents an individual architecture. +@@ -100,6 +100,16 @@ var ArchARM64 = &Arch{ + Alignment: 1, + } + ++var ArchLoong64 = &Arch{ ++ Name: "loong64", ++ Family: Loong64, ++ ByteOrder: binary.LittleEndian, ++ PtrSize: 8, ++ RegSize: 8, ++ MinLC: 4, ++ Alignment: 8, // Unaligned accesses are not guaranteed to be fast ++} ++ + var ArchMIPS = &Arch{ + Name: "mips", + Family: MIPS, +@@ -190,21 +200,12 @@ var ArchWasm = &Arch{ + Alignment: 1, + } + +-var ArchLoong64 = &Arch{ +- Name: "loong64", +- Family: Loong64, +- ByteOrder: binary.LittleEndian, +- PtrSize: 8, +- RegSize: 8, +- MinLC: 4, +- Alignment: 8, // Unaligned accesses are not guaranteed to be fast +-} +- + var Archs = [...]*Arch{ + Arch386, + ArchAMD64, + ArchARM, + ArchARM64, ++ ArchLoong64, + ArchMIPS, + ArchMIPSLE, + ArchMIPS64, +@@ -214,5 +215,4 @@ var Archs = [...]*Arch{ + ArchRISCV64, + ArchS390X, + ArchWasm, +- ArchLoong64, + } +-- +2.38.0 + diff --git a/loongarch64/0003-internal-add-loong64-constant-definition.patch b/loongarch64/0003-internal-add-loong64-constant-definition.patch new file mode 100644 index 0000000..ac78291 --- /dev/null +++ b/loongarch64/0003-internal-add-loong64-constant-definition.patch @@ -0,0 +1,66 @@ +From ac6fe96cdc2768e046a3cde422cd8a1969ffcfc1 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 15 Nov 2021 20:53:47 +0800 +Subject: [PATCH 03/82] internal: add loong64 constant definition + +Change-Id: I39d42e5959391e47bf621b3bdd3d95de72f023cc +--- + src/internal/cpu/cpu_loong64.go | 12 ++++++++++++ + src/runtime/internal/sys/arch.go | 1 + + src/runtime/internal/sys/arch_loong64.go | 13 +++++++++++++ + 3 files changed, 26 insertions(+) + create mode 100644 src/internal/cpu/cpu_loong64.go + create mode 100644 src/runtime/internal/sys/arch_loong64.go + +diff --git a/src/internal/cpu/cpu_loong64.go b/src/internal/cpu/cpu_loong64.go +new file mode 100644 +index 0000000000..d0ff93455f +--- /dev/null ++++ b/src/internal/cpu/cpu_loong64.go +@@ -0,0 +1,12 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package cpu ++ ++const CacheLinePadSize = 32 ++ ++func doinit() {} +diff --git a/src/runtime/internal/sys/arch.go b/src/runtime/internal/sys/arch.go +index 3c99a2f7da..154673b0b3 100644 +--- a/src/runtime/internal/sys/arch.go ++++ b/src/runtime/internal/sys/arch.go +@@ -10,6 +10,7 @@ const ( + AMD64 ArchFamilyType = iota + ARM + ARM64 ++ LOONG64 + I386 + MIPS + MIPS64 +diff --git a/src/runtime/internal/sys/arch_loong64.go b/src/runtime/internal/sys/arch_loong64.go +new file mode 100644 +index 0000000000..4ed338800a +--- /dev/null ++++ b/src/runtime/internal/sys/arch_loong64.go +@@ -0,0 +1,13 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package sys ++ ++const ( ++ _ArchFamily = LOONG64 ++ _DefaultPhysPageSize = 16384 ++ _PCQuantum = 4 ++ _MinFrameSize = 8 ++ _StackAlign = PtrSize ++) +-- +2.38.0 + diff --git a/loongarch64/0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch b/loongarch64/0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch new file mode 100644 index 0000000..e112a84 --- /dev/null +++ b/loongarch64/0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch @@ -0,0 +1,39 @@ +From 0912d552120d93cf4f6c429bc0a37e035a6da238 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:57:43 +0800 +Subject: [PATCH 04/82] cmd/go/internal: configure go tool workflow for loong64 + +Change-Id: I6b537a7d842b0683586917fe7ea7cd4d70d888de +--- + src/cmd/go/internal/imports/build.go | 1 + + src/cmd/go/internal/work/exec.go | 2 ++ + 2 files changed, 3 insertions(+) + +diff --git a/src/cmd/go/internal/imports/build.go b/src/cmd/go/internal/imports/build.go +index ff6bea6777..5d4580c4a5 100644 +--- a/src/cmd/go/internal/imports/build.go ++++ b/src/cmd/go/internal/imports/build.go +@@ -326,6 +326,7 @@ var KnownArch = map[string]bool{ + "mips64le": true, + "mips64p32": true, + "mips64p32le": true, ++ "loong64": true, + "ppc": true, + "riscv": true, + "riscv64": true, +diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go +index 5a225fb9f1..83d4161c6c 100644 +--- a/src/cmd/go/internal/work/exec.go ++++ b/src/cmd/go/internal/work/exec.go +@@ -2648,6 +2648,8 @@ func (b *Builder) gccArchArgs() []string { + } else if cfg.GOMIPS == "softfloat" { + return append(args, "-msoft-float") + } ++ case "loong64": ++ return []string{"-mabi=lp64d"} + case "ppc64": + if cfg.Goos == "aix" { + return []string{"-maix64"} +-- +2.38.0 + diff --git a/loongarch64/0005-cmd-compile-register-loong64.Init-function-for-compi.patch b/loongarch64/0005-cmd-compile-register-loong64.Init-function-for-compi.patch new file mode 100644 index 0000000..7b43217 --- /dev/null +++ b/loongarch64/0005-cmd-compile-register-loong64.Init-function-for-compi.patch @@ -0,0 +1,34 @@ +From 7bd5760e25b36d3139669605a76775f90fc8e2f9 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 16:41:33 +0800 +Subject: [PATCH 05/82] cmd/compile: register loong64.Init function for + compiler + +Change-Id: Ia3cb07af626e3422e43e3834baf15b7c8fad2326 +--- + src/cmd/compile/main.go | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go +index 3af1e1fafd..7d38bea7fa 100644 +--- a/src/cmd/compile/main.go ++++ b/src/cmd/compile/main.go +@@ -10,6 +10,7 @@ import ( + "cmd/compile/internal/arm64" + "cmd/compile/internal/base" + "cmd/compile/internal/gc" ++ "cmd/compile/internal/loong64" + "cmd/compile/internal/mips" + "cmd/compile/internal/mips64" + "cmd/compile/internal/ppc64" +@@ -29,6 +30,7 @@ var archInits = map[string]func(*ssagen.ArchInfo){ + "amd64": amd64.Init, + "arm": arm.Init, + "arm64": arm64.Init, ++ "loong64": loong64.Init, + "mips": mips.Init, + "mipsle": mips.Init, + "mips64": mips64.Init, +-- +2.38.0 + diff --git a/loongarch64/0006-cmd-compile-internal-loong64-implement-Init-function.patch b/loongarch64/0006-cmd-compile-internal-loong64-implement-Init-function.patch new file mode 100644 index 0000000..ca397ba --- /dev/null +++ b/loongarch64/0006-cmd-compile-internal-loong64-implement-Init-function.patch @@ -0,0 +1,984 @@ +From 0a4d27939f703b4f00f6488204329c826877dc7b Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 16:47:08 +0800 +Subject: [PATCH 06/82] cmd/compile/internal/loong64: implement Init function + for loong64 + +Change-Id: Iab3f13b70a41f31f412b59801def3106f5eb7c11 +--- + src/cmd/compile/internal/loong64/galign.go | 24 + + src/cmd/compile/internal/loong64/ggen.go | 59 ++ + src/cmd/compile/internal/loong64/ssa.go | 864 +++++++++++++++++++++ + 3 files changed, 947 insertions(+) + create mode 100644 src/cmd/compile/internal/loong64/galign.go + create mode 100644 src/cmd/compile/internal/loong64/ggen.go + create mode 100644 src/cmd/compile/internal/loong64/ssa.go + +diff --git a/src/cmd/compile/internal/loong64/galign.go b/src/cmd/compile/internal/loong64/galign.go +new file mode 100644 +index 0000000000..ac8b5dbf23 +--- /dev/null ++++ b/src/cmd/compile/internal/loong64/galign.go +@@ -0,0 +1,24 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/compile/internal/ssa" ++ "cmd/compile/internal/ssagen" ++ "cmd/internal/obj/loong64" ++) ++ ++func Init(arch *ssagen.ArchInfo) { ++ arch.LinkArch = &loong64.Linkloong64 ++ arch.REGSP = loong64.REGSP ++ arch.MAXWIDTH = 1 << 50 ++ arch.ZeroRange = zerorange ++ arch.Ginsnop = ginsnop ++ arch.Ginsnopdefer = ginsnop ++ ++ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} ++ arch.SSAGenValue = ssaGenValue ++ arch.SSAGenBlock = ssaGenBlock ++} +diff --git a/src/cmd/compile/internal/loong64/ggen.go b/src/cmd/compile/internal/loong64/ggen.go +new file mode 100644 +index 0000000000..c3649cf152 +--- /dev/null ++++ b/src/cmd/compile/internal/loong64/ggen.go +@@ -0,0 +1,59 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/compile/internal/ir" ++ "cmd/compile/internal/objw" ++ "cmd/compile/internal/types" ++ "cmd/internal/obj" ++ "cmd/internal/obj/loong64" ++) ++ ++func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { ++ if cnt == 0 { ++ return p ++ } ++ if cnt < int64(4*types.PtrSize) { ++ for i := int64(0); i < cnt; i += int64(types.PtrSize) { ++ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, 8+off+i) ++ } ++ } else if cnt <= int64(128*types.PtrSize) { ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0) ++ p.Reg = loong64.REGSP ++ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ir.Syms.Duffzero ++ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) ++ } else { ++ // ADDV $(8+frame+lo-8), SP, r1 ++ // ADDV $cnt, r1, r2 ++ // loop: ++ // MOVV R0, (Widthptr)r1 ++ // ADDV $Widthptr, r1 ++ // BNE r1, r2, loop ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0) ++ p.Reg = loong64.REGSP ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, loong64.REGRT2, 0) ++ p.Reg = loong64.REGRT1 ++ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, int64(types.PtrSize)) ++ p1 := p ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, loong64.REGRT1, 0) ++ p = pp.Append(p, loong64.ABNE, obj.TYPE_REG, loong64.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) ++ p.Reg = loong64.REGRT2 ++ p.To.SetTarget(p1) ++ } ++ ++ return p ++} ++ ++func ginsnop(pp *objw.Progs) *obj.Prog { ++ p := pp.Prog(loong64.ANOR) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REG_R0 ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R0 ++ return p ++} +diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go +new file mode 100644 +index 0000000000..4f3aa6858f +--- /dev/null ++++ b/src/cmd/compile/internal/loong64/ssa.go +@@ -0,0 +1,864 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "math" ++ ++ "cmd/compile/internal/base" ++ "cmd/compile/internal/ir" ++ "cmd/compile/internal/logopt" ++ "cmd/compile/internal/ssa" ++ "cmd/compile/internal/ssagen" ++ "cmd/compile/internal/types" ++ "cmd/internal/obj" ++ "cmd/internal/obj/loong64" ++) ++ ++// isFPreg reports whether r is an FP register ++func isFPreg(r int16) bool { ++ return loong64.REG_F0 <= r && r <= loong64.REG_F31 ++} ++ ++// loadByType returns the load instruction of the given type. ++func loadByType(t *types.Type, r int16) obj.As { ++ if isFPreg(r) { ++ if t.Size() == 4 { ++ return loong64.AMOVF ++ } else { ++ return loong64.AMOVD ++ } ++ } else { ++ switch t.Size() { ++ case 1: ++ if t.IsSigned() { ++ return loong64.AMOVB ++ } else { ++ return loong64.AMOVBU ++ } ++ case 2: ++ if t.IsSigned() { ++ return loong64.AMOVH ++ } else { ++ return loong64.AMOVHU ++ } ++ case 4: ++ if t.IsSigned() { ++ return loong64.AMOVW ++ } else { ++ return loong64.AMOVWU ++ } ++ case 8: ++ return loong64.AMOVV ++ } ++ } ++ panic("bad load type") ++} ++ ++// storeByType returns the store instruction of the given type. ++func storeByType(t *types.Type, r int16) obj.As { ++ if isFPreg(r) { ++ if t.Size() == 4 { ++ return loong64.AMOVF ++ } else { ++ return loong64.AMOVD ++ } ++ } else { ++ switch t.Size() { ++ case 1: ++ return loong64.AMOVB ++ case 2: ++ return loong64.AMOVH ++ case 4: ++ return loong64.AMOVW ++ case 8: ++ return loong64.AMOVV ++ } ++ } ++ panic("bad store type") ++} ++ ++func ssaGenValue(s *ssagen.State, v *ssa.Value) { ++ switch v.Op { ++ case ssa.OpCopy, ssa.OpLOONG64MOVVreg: ++ if v.Type.IsMemory() { ++ return ++ } ++ x := v.Args[0].Reg() ++ y := v.Reg() ++ if x == y { ++ return ++ } ++ as := loong64.AMOVV ++ if isFPreg(x) && isFPreg(y) { ++ as = loong64.AMOVD ++ } ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = x ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = y ++ case ssa.OpLOONG64MOVVnop: ++ if v.Reg() != v.Args[0].Reg() { ++ v.Fatalf("input[0] and output not in same register %s", v.LongString()) ++ } ++ // nothing to do ++ case ssa.OpLoadReg: ++ if v.Type.IsFlags() { ++ v.Fatalf("load flags not implemented: %v", v.LongString()) ++ return ++ } ++ r := v.Reg() ++ p := s.Prog(loadByType(v.Type, r)) ++ ssagen.AddrAuto(&p.From, v.Args[0]) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = r ++ case ssa.OpStoreReg: ++ if v.Type.IsFlags() { ++ v.Fatalf("store flags not implemented: %v", v.LongString()) ++ return ++ } ++ r := v.Args[0].Reg() ++ p := s.Prog(storeByType(v.Type, r)) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = r ++ ssagen.AddrAuto(&p.To, v) ++ case ssa.OpLOONG64ADDV, ++ ssa.OpLOONG64SUBV, ++ ssa.OpLOONG64AND, ++ ssa.OpLOONG64OR, ++ ssa.OpLOONG64XOR, ++ ssa.OpLOONG64NOR, ++ ssa.OpLOONG64SLLV, ++ ssa.OpLOONG64SRLV, ++ ssa.OpLOONG64SRAV, ++ ssa.OpLOONG64ADDF, ++ ssa.OpLOONG64ADDD, ++ ssa.OpLOONG64SUBF, ++ ssa.OpLOONG64SUBD, ++ ssa.OpLOONG64MULF, ++ ssa.OpLOONG64MULD, ++ ssa.OpLOONG64DIVF, ++ ssa.OpLOONG64DIVD: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64SGT, ++ ssa.OpLOONG64SGTU: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64ADDVconst, ++ ssa.OpLOONG64SUBVconst, ++ ssa.OpLOONG64ANDconst, ++ ssa.OpLOONG64ORconst, ++ ssa.OpLOONG64XORconst, ++ ssa.OpLOONG64NORconst, ++ ssa.OpLOONG64SLLVconst, ++ ssa.OpLOONG64SRLVconst, ++ ssa.OpLOONG64SRAVconst, ++ ssa.OpLOONG64SGTconst, ++ ssa.OpLOONG64SGTUconst: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = v.AuxInt ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64MULV: ++ p := s.Prog(loong64.AMULV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AMULHV) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64MULVU: ++ p := s.Prog(loong64.AMULV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AMULHVU) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64DIVV: ++ p := s.Prog(loong64.ADIVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AREMV) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64DIVVU: ++ p := s.Prog(loong64.ADIVVU) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AREMVU) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64MOVVconst: ++ r := v.Reg() ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = v.AuxInt ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = r ++ if isFPreg(r) { ++ // cannot move into FP or special registers, use TMP as intermediate ++ p.To.Reg = loong64.REGTMP ++ p = s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGTMP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = r ++ } ++ case ssa.OpLOONG64MOVFconst, ++ ssa.OpLOONG64MOVDconst: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_FCONST ++ p.From.Val = math.Float64frombits(uint64(v.AuxInt)) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64CMPEQF, ++ ssa.OpLOONG64CMPEQD, ++ ssa.OpLOONG64CMPGEF, ++ ssa.OpLOONG64CMPGED, ++ ssa.OpLOONG64CMPGTF, ++ ssa.OpLOONG64CMPGTD: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.Reg = v.Args[1].Reg() ++ case ssa.OpLOONG64MOVVaddr: ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_ADDR ++ p.From.Reg = v.Args[0].Reg() ++ var wantreg string ++ // MOVV $sym+off(base), R ++ // the assembler expands it as the following: ++ // - base is SP: add constant offset to SP (R3) ++ // when constant is large, tmp register (R30) may be used ++ // - base is SB: load external address with relocation ++ switch v.Aux.(type) { ++ default: ++ v.Fatalf("aux is of unknown type %T", v.Aux) ++ case *obj.LSym: ++ wantreg = "SB" ++ ssagen.AddAux(&p.From, v) ++ case *ir.Name: ++ wantreg = "SP" ++ ssagen.AddAux(&p.From, v) ++ case nil: ++ // No sym, just MOVV $off(SP), R ++ wantreg = "SP" ++ p.From.Offset = v.AuxInt ++ } ++ if reg := v.Args[0].RegName(); reg != wantreg { ++ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) ++ } ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64MOVBload, ++ ssa.OpLOONG64MOVBUload, ++ ssa.OpLOONG64MOVHload, ++ ssa.OpLOONG64MOVHUload, ++ ssa.OpLOONG64MOVWload, ++ ssa.OpLOONG64MOVWUload, ++ ssa.OpLOONG64MOVVload, ++ ssa.OpLOONG64MOVFload, ++ ssa.OpLOONG64MOVDload: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.From, v) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64MOVBstore, ++ ssa.OpLOONG64MOVHstore, ++ ssa.OpLOONG64MOVWstore, ++ ssa.OpLOONG64MOVVstore, ++ ssa.OpLOONG64MOVFstore, ++ ssa.OpLOONG64MOVDstore: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.To, v) ++ case ssa.OpLOONG64MOVBstorezero, ++ ssa.OpLOONG64MOVHstorezero, ++ ssa.OpLOONG64MOVWstorezero, ++ ssa.OpLOONG64MOVVstorezero: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.To, v) ++ case ssa.OpLOONG64MOVBreg, ++ ssa.OpLOONG64MOVBUreg, ++ ssa.OpLOONG64MOVHreg, ++ ssa.OpLOONG64MOVHUreg, ++ ssa.OpLOONG64MOVWreg, ++ ssa.OpLOONG64MOVWUreg: ++ a := v.Args[0] ++ for a.Op == ssa.OpCopy || a.Op == ssa.OpLOONG64MOVVreg { ++ a = a.Args[0] ++ } ++ if a.Op == ssa.OpLoadReg && loong64.REG_R0 <= a.Reg() && a.Reg() <= loong64.REG_R31 { ++ // LoadReg from a narrower type does an extension, except loading ++ // to a floating point register. So only eliminate the extension ++ // if it is loaded to an integer register. ++ ++ t := a.Type ++ switch { ++ case v.Op == ssa.OpLOONG64MOVBreg && t.Size() == 1 && t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVBUreg && t.Size() == 1 && !t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVHreg && t.Size() == 2 && t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVHUreg && t.Size() == 2 && !t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVWreg && t.Size() == 4 && t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVWUreg && t.Size() == 4 && !t.IsSigned(): ++ // arg is a proper-typed load, already zero/sign-extended, don't extend again ++ if v.Reg() == v.Args[0].Reg() { ++ return ++ } ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ return ++ default: ++ } ++ } ++ fallthrough ++ case ssa.OpLOONG64MOVWF, ++ ssa.OpLOONG64MOVWD, ++ ssa.OpLOONG64TRUNCFW, ++ ssa.OpLOONG64TRUNCDW, ++ ssa.OpLOONG64MOVVF, ++ ssa.OpLOONG64MOVVD, ++ ssa.OpLOONG64TRUNCFV, ++ ssa.OpLOONG64TRUNCDV, ++ ssa.OpLOONG64MOVFD, ++ ssa.OpLOONG64MOVDF, ++ ssa.OpLOONG64NEGF, ++ ssa.OpLOONG64NEGD, ++ ssa.OpLOONG64SQRTD, ++ ssa.OpLOONG64SQRTF: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64NEGV: ++ // SUB from REGZERO ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64DUFFZERO: ++ // runtime.duffzero expects start address - 8 in R19 ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = 8 ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R19 ++ p = s.Prog(obj.ADUFFZERO) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ir.Syms.Duffzero ++ p.To.Offset = v.AuxInt ++ case ssa.OpLOONG64LoweredZero: ++ // SUBV $8, R19 ++ // MOVV R0, 8(R19) ++ // ADDV $8, R19 ++ // BNE Rarg1, R19, -2(PC) ++ // arg1 is the address of the last element to zero ++ var sz int64 ++ var mov obj.As ++ switch { ++ case v.AuxInt%8 == 0: ++ sz = 8 ++ mov = loong64.AMOVV ++ case v.AuxInt%4 == 0: ++ sz = 4 ++ mov = loong64.AMOVW ++ case v.AuxInt%2 == 0: ++ sz = 2 ++ mov = loong64.AMOVH ++ default: ++ sz = 1 ++ mov = loong64.AMOVB ++ } ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = sz ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R19 ++ p2 := s.Prog(mov) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGZERO ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = loong64.REG_R19 ++ p2.To.Offset = sz ++ p3 := s.Prog(loong64.AADDVU) ++ p3.From.Type = obj.TYPE_CONST ++ p3.From.Offset = sz ++ p3.To.Type = obj.TYPE_REG ++ p3.To.Reg = loong64.REG_R19 ++ p4 := s.Prog(loong64.ABNE) ++ p4.From.Type = obj.TYPE_REG ++ p4.From.Reg = v.Args[1].Reg() ++ p4.Reg = loong64.REG_R19 ++ p4.To.Type = obj.TYPE_BRANCH ++ p4.To.SetTarget(p2) ++ case ssa.OpLOONG64DUFFCOPY: ++ p := s.Prog(obj.ADUFFCOPY) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ir.Syms.Duffcopy ++ p.To.Offset = v.AuxInt ++ case ssa.OpLOONG64LoweredMove: ++ // SUBV $8, R19 ++ // MOVV 8(R19), Rtmp ++ // MOVV Rtmp, (R4) ++ // ADDV $8, R19 ++ // ADDV $8, R4 ++ // BNE Rarg2, R19, -4(PC) ++ // arg2 is the address of the last element of src ++ var sz int64 ++ var mov obj.As ++ switch { ++ case v.AuxInt%8 == 0: ++ sz = 8 ++ mov = loong64.AMOVV ++ case v.AuxInt%4 == 0: ++ sz = 4 ++ mov = loong64.AMOVW ++ case v.AuxInt%2 == 0: ++ sz = 2 ++ mov = loong64.AMOVH ++ default: ++ sz = 1 ++ mov = loong64.AMOVB ++ } ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = sz ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R19 ++ p2 := s.Prog(mov) ++ p2.From.Type = obj.TYPE_MEM ++ p2.From.Reg = loong64.REG_R19 ++ p2.From.Offset = sz ++ p2.To.Type = obj.TYPE_REG ++ p2.To.Reg = loong64.REGTMP ++ p3 := s.Prog(mov) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_MEM ++ p3.To.Reg = loong64.REG_R4 ++ p4 := s.Prog(loong64.AADDVU) ++ p4.From.Type = obj.TYPE_CONST ++ p4.From.Offset = sz ++ p4.To.Type = obj.TYPE_REG ++ p4.To.Reg = loong64.REG_R19 ++ p5 := s.Prog(loong64.AADDVU) ++ p5.From.Type = obj.TYPE_CONST ++ p5.From.Offset = sz ++ p5.To.Type = obj.TYPE_REG ++ p5.To.Reg = loong64.REG_R4 ++ p6 := s.Prog(loong64.ABNE) ++ p6.From.Type = obj.TYPE_REG ++ p6.From.Reg = v.Args[2].Reg() ++ p6.Reg = loong64.REG_R19 ++ p6.To.Type = obj.TYPE_BRANCH ++ p6.To.SetTarget(p2) ++ case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter: ++ s.Call(v) ++ case ssa.OpLOONG64LoweredWB: ++ p := s.Prog(obj.ACALL) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = v.Aux.(*obj.LSym) ++ case ssa.OpLOONG64LoweredPanicBoundsA, ssa.OpLOONG64LoweredPanicBoundsB, ssa.OpLOONG64LoweredPanicBoundsC: ++ p := s.Prog(obj.ACALL) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] ++ s.UseArgs(16) // space used in callee args area by assembly stubs ++ case ssa.OpLOONG64LoweredAtomicLoad8, ssa.OpLOONG64LoweredAtomicLoad32, ssa.OpLOONG64LoweredAtomicLoad64: ++ as := loong64.AMOVV ++ switch v.Op { ++ case ssa.OpLOONG64LoweredAtomicLoad8: ++ as = loong64.AMOVB ++ case ssa.OpLOONG64LoweredAtomicLoad32: ++ as = loong64.AMOVW ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicStore8, ssa.OpLOONG64LoweredAtomicStore32, ssa.OpLOONG64LoweredAtomicStore64: ++ as := loong64.AMOVV ++ switch v.Op { ++ case ssa.OpLOONG64LoweredAtomicStore8: ++ as = loong64.AMOVB ++ case ssa.OpLOONG64LoweredAtomicStore32: ++ as = loong64.AMOVW ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicStorezero32, ssa.OpLOONG64LoweredAtomicStorezero64: ++ as := loong64.AMOVV ++ if v.Op == ssa.OpLOONG64LoweredAtomicStorezero32 { ++ as = loong64.AMOVW ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicExchange32, ssa.OpLOONG64LoweredAtomicExchange64: ++ // DBAR ++ // MOVV Rarg1, Rtmp ++ // LL (Rarg0), Rout ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicExchange32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REGTMP ++ p1 := s.Prog(ll) ++ p1.From.Type = obj.TYPE_MEM ++ p1.From.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ p2 := s.Prog(sc) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = v.Args[0].Reg() ++ p3 := s.Prog(loong64.ABEQ) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_BRANCH ++ p3.To.SetTarget(p) ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicAdd32, ssa.OpLOONG64LoweredAtomicAdd64: ++ // DBAR ++ // LL (Rarg0), Rout ++ // ADDV Rarg1, Rout, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ // ADDV Rarg1, Rout ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicAdd32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(ll) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ p1 := s.Prog(loong64.AADDVU) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Reg0() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = loong64.REGTMP ++ p2 := s.Prog(sc) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = v.Args[0].Reg() ++ p3 := s.Prog(loong64.ABEQ) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_BRANCH ++ p3.To.SetTarget(p) ++ s.Prog(loong64.ADBAR) ++ p4 := s.Prog(loong64.AADDVU) ++ p4.From.Type = obj.TYPE_REG ++ p4.From.Reg = v.Args[1].Reg() ++ p4.Reg = v.Reg0() ++ p4.To.Type = obj.TYPE_REG ++ p4.To.Reg = v.Reg0() ++ case ssa.OpLOONG64LoweredAtomicAddconst32, ssa.OpLOONG64LoweredAtomicAddconst64: ++ // DBAR ++ // LL (Rarg0), Rout ++ // ADDV $auxint, Rout, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ // ADDV $auxint, Rout ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicAddconst32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(ll) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ p1 := s.Prog(loong64.AADDVU) ++ p1.From.Type = obj.TYPE_CONST ++ p1.From.Offset = v.AuxInt ++ p1.Reg = v.Reg0() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = loong64.REGTMP ++ p2 := s.Prog(sc) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = v.Args[0].Reg() ++ p3 := s.Prog(loong64.ABEQ) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_BRANCH ++ p3.To.SetTarget(p) ++ s.Prog(loong64.ADBAR) ++ p4 := s.Prog(loong64.AADDVU) ++ p4.From.Type = obj.TYPE_CONST ++ p4.From.Offset = v.AuxInt ++ p4.Reg = v.Reg0() ++ p4.To.Type = obj.TYPE_REG ++ p4.To.Reg = v.Reg0() ++ case ssa.OpLOONG64LoweredAtomicCas32, ssa.OpLOONG64LoweredAtomicCas64: ++ // MOVV $0, Rout ++ // DBAR ++ // LL (Rarg0), Rtmp ++ // BNE Rtmp, Rarg1, 4(PC) ++ // MOVV Rarg2, Rout ++ // SC Rout, (Rarg0) ++ // BEQ Rout, -4(PC) ++ // DBAR ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicCas32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ s.Prog(loong64.ADBAR) ++ p1 := s.Prog(ll) ++ p1.From.Type = obj.TYPE_MEM ++ p1.From.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = loong64.REGTMP ++ p2 := s.Prog(loong64.ABNE) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = v.Args[1].Reg() ++ p2.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_BRANCH ++ p3 := s.Prog(loong64.AMOVV) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = v.Args[2].Reg() ++ p3.To.Type = obj.TYPE_REG ++ p3.To.Reg = v.Reg0() ++ p4 := s.Prog(sc) ++ p4.From.Type = obj.TYPE_REG ++ p4.From.Reg = v.Reg0() ++ p4.To.Type = obj.TYPE_MEM ++ p4.To.Reg = v.Args[0].Reg() ++ p5 := s.Prog(loong64.ABEQ) ++ p5.From.Type = obj.TYPE_REG ++ p5.From.Reg = v.Reg0() ++ p5.To.Type = obj.TYPE_BRANCH ++ p5.To.SetTarget(p1) ++ p6 := s.Prog(loong64.ADBAR) ++ p2.To.SetTarget(p6) ++ case ssa.OpLOONG64LoweredNilCheck: ++ // Issue a load which will fault if arg is nil. ++ p := s.Prog(loong64.AMOVB) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.From, v) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REGTMP ++ if logopt.Enabled() { ++ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) ++ } ++ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers ++ base.WarnfAt(v.Pos, "generated nil check") ++ } ++ case ssa.OpLOONG64FPFlagTrue, ++ ssa.OpLOONG64FPFlagFalse: ++ // MOVV $0, r ++ // BFPF 2(PC) ++ // MOVV $1, r ++ branch := loong64.ABFPF ++ if v.Op == ssa.OpLOONG64FPFlagFalse { ++ branch = loong64.ABFPT ++ } ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ p2 := s.Prog(branch) ++ p2.To.Type = obj.TYPE_BRANCH ++ p3 := s.Prog(loong64.AMOVV) ++ p3.From.Type = obj.TYPE_CONST ++ p3.From.Offset = 1 ++ p3.To.Type = obj.TYPE_REG ++ p3.To.Reg = v.Reg() ++ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land ++ p2.To.SetTarget(p4) ++ case ssa.OpLOONG64LoweredGetClosurePtr: ++ // Closure pointer is R22 (loong64.REGCTXT). ++ ssagen.CheckLoweredGetClosurePtr(v) ++ case ssa.OpLOONG64LoweredGetCallerSP: ++ // caller's SP is FixedFrameSize below the address of the first arg ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_ADDR ++ p.From.Offset = -base.Ctxt.FixedFrameSize() ++ p.From.Name = obj.NAME_PARAM ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64LoweredGetCallerPC: ++ p := s.Prog(obj.AGETCALLERPC) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpClobber, ssa.OpClobberReg: ++ // TODO: implement for clobberdead experiment. Nop is ok for now. ++ default: ++ v.Fatalf("genValue not implemented: %s", v.LongString()) ++ } ++} ++ ++var blockJump = map[ssa.BlockKind]struct { ++ asm, invasm obj.As ++}{ ++ ssa.BlockLOONG64EQ: {loong64.ABEQ, loong64.ABNE}, ++ ssa.BlockLOONG64NE: {loong64.ABNE, loong64.ABEQ}, ++ ssa.BlockLOONG64LTZ: {loong64.ABLTZ, loong64.ABGEZ}, ++ ssa.BlockLOONG64GEZ: {loong64.ABGEZ, loong64.ABLTZ}, ++ ssa.BlockLOONG64LEZ: {loong64.ABLEZ, loong64.ABGTZ}, ++ ssa.BlockLOONG64GTZ: {loong64.ABGTZ, loong64.ABLEZ}, ++ ssa.BlockLOONG64FPT: {loong64.ABFPT, loong64.ABFPF}, ++ ssa.BlockLOONG64FPF: {loong64.ABFPF, loong64.ABFPT}, ++} ++ ++func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { ++ switch b.Kind { ++ case ssa.BlockPlain: ++ if b.Succs[0].Block() != next { ++ p := s.Prog(obj.AJMP) ++ p.To.Type = obj.TYPE_BRANCH ++ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) ++ } ++ case ssa.BlockDefer: ++ // defer returns in R19: ++ // 0 if we should continue executing ++ // 1 if we should jump to deferreturn call ++ p := s.Prog(loong64.ABNE) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.Reg = loong64.REG_R19 ++ p.To.Type = obj.TYPE_BRANCH ++ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) ++ if b.Succs[0].Block() != next { ++ p := s.Prog(obj.AJMP) ++ p.To.Type = obj.TYPE_BRANCH ++ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) ++ } ++ case ssa.BlockExit: ++ case ssa.BlockRet: ++ s.Prog(obj.ARET) ++ case ssa.BlockRetJmp: ++ p := s.Prog(obj.ARET) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = b.Aux.(*obj.LSym) ++ case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE, ++ ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ, ++ ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ, ++ ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF: ++ jmp := blockJump[b.Kind] ++ var p *obj.Prog ++ switch next { ++ case b.Succs[0].Block(): ++ p = s.Br(jmp.invasm, b.Succs[1].Block()) ++ case b.Succs[1].Block(): ++ p = s.Br(jmp.asm, b.Succs[0].Block()) ++ default: ++ if b.Likely != ssa.BranchUnlikely { ++ p = s.Br(jmp.asm, b.Succs[0].Block()) ++ s.Br(obj.AJMP, b.Succs[1].Block()) ++ } else { ++ p = s.Br(jmp.invasm, b.Succs[1].Block()) ++ s.Br(obj.AJMP, b.Succs[0].Block()) ++ } ++ } ++ if !b.Controls[0].Type.IsFlags() { ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = b.Controls[0].Reg() ++ } ++ default: ++ b.Fatalf("branch not implemented: %s", b.LongString()) ++ } ++} +-- +2.38.0 + diff --git a/loongarch64/0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch b/loongarch64/0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch new file mode 100644 index 0000000..10ee5e7 --- /dev/null +++ b/loongarch64/0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch @@ -0,0 +1,36 @@ +From c6fd8eee2778681e19de9715c5e281cdadaa6b22 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 17:48:09 +0800 +Subject: [PATCH 07/82] cmd/compile/internal/ssa: config lower pass function + and register on loong64 + +Change-Id: I50d20eb22f2108d245513de8ac95ebe0b7e1a1dc +--- + src/cmd/compile/internal/ssa/config.go | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go +index a8393a1999..c18a723032 100644 +--- a/src/cmd/compile/internal/ssa/config.go ++++ b/src/cmd/compile/internal/ssa/config.go +@@ -262,6 +262,17 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config + c.FPReg = framepointerRegMIPS64 + c.LinkReg = linkRegMIPS64 + c.hasGReg = true ++ case "loong64": ++ c.PtrSize = 8 ++ c.RegSize = 8 ++ c.lowerBlock = rewriteBlockLOONG64 ++ c.lowerValue = rewriteValueLOONG64 ++ c.registers = registersLOONG64[:] ++ c.gpRegMask = gpRegMaskLOONG64 ++ c.fpRegMask = fpRegMaskLOONG64 ++ c.FPReg = framepointerRegLOONG64 ++ c.LinkReg = linkRegLOONG64 ++ c.hasGReg = true + case "s390x": + c.PtrSize = 8 + c.RegSize = 8 +-- +2.38.0 + diff --git a/loongarch64/0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch b/loongarch64/0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch new file mode 100644 index 0000000..79071aa --- /dev/null +++ b/loongarch64/0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch @@ -0,0 +1,27 @@ +From 77df886848f73395e63994633f120fd062a04755 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 17:53:07 +0800 +Subject: [PATCH 08/82] cmd/compile/internal/ssa: increase the bit width of + BlockKind type + +Change-Id: I246d9c22334d0ea9e1440d29df05c9ec2d472b30 +--- + src/cmd/compile/internal/ssa/block.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go +index 71ca774431..2e9330444d 100644 +--- a/src/cmd/compile/internal/ssa/block.go ++++ b/src/cmd/compile/internal/ssa/block.go +@@ -111,7 +111,7 @@ func (e Edge) String() string { + // Plain [] [next] + // If [boolean Value] [then, else] + // Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc) +-type BlockKind int8 ++type BlockKind int16 + + // short form print + func (b *Block) String() string { +-- +2.38.0 + diff --git a/loongarch64/0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch b/loongarch64/0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch new file mode 100644 index 0000000..83a79f3 --- /dev/null +++ b/loongarch64/0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch @@ -0,0 +1,12601 @@ +From 0e5c37d246b5d903b46858f2912b6b40c0d04a7b Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 17:31:18 +0800 +Subject: [PATCH 09/82] cmd/compile/internal/ssa{,/gen}: define rules and + operation on loong64 + +Change-Id: Ia362ed7ba5d84046697aadbc8d6d4cbe495f6076 +--- + .../compile/internal/ssa/gen/LOONG64.rules | 679 ++ + .../compile/internal/ssa/gen/LOONG64Ops.go | 524 ++ + src/cmd/compile/internal/ssa/opGen.go | 2584 +++++- + .../compile/internal/ssa/rewriteLOONG64.go | 7943 +++++++++++++++++ + 4 files changed, 11357 insertions(+), 373 deletions(-) + create mode 100644 src/cmd/compile/internal/ssa/gen/LOONG64.rules + create mode 100644 src/cmd/compile/internal/ssa/gen/LOONG64Ops.go + create mode 100644 src/cmd/compile/internal/ssa/rewriteLOONG64.go + +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +new file mode 100644 +index 0000000000..3fd4552aa4 +--- /dev/null ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -0,0 +1,679 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++(Add(Ptr|64|32|16|8) ...) => (ADDV ...) ++(Add(32|64)F ...) => (ADD(F|D) ...) ++ ++(Sub(Ptr|64|32|16|8) ...) => (SUBV ...) ++(Sub(32|64)F ...) => (SUB(F|D) ...) ++ ++(Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) ++(Mul(32|64)F ...) => (MUL(F|D) ...) ++(Mul64uhilo ...) => (MULVU ...) ++(Select0 (Mul64uover x y)) => (Select1 (MULVU x y)) ++(Select1 (Mul64uover x y)) => (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) ++ ++(Hmul64 x y) => (Select0 (MULV x y)) ++(Hmul64u x y) => (Select0 (MULVU x y)) ++(Hmul32 x y) => (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) ++(Hmul32u x y) => (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) ++ ++(Div64 x y) => (Select1 (DIVV x y)) ++(Div64u x y) => (Select1 (DIVVU x y)) ++(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++(Div(32|64)F ...) => (DIV(F|D) ...) ++ ++(Mod64 x y) => (Select0 (DIVV x y)) ++(Mod64u x y) => (Select0 (DIVVU x y)) ++(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ ++// (x + y) / 2 with x>=y => (x - y) / 2 + y ++(Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) ++ ++(And(64|32|16|8) ...) => (AND ...) ++(Or(64|32|16|8) ...) => (OR ...) ++(Xor(64|32|16|8) ...) => (XOR ...) ++ ++// shifts ++// hardware instruction uses only the low 6 bits of the shift ++// we compare to 64 to ensure Go semantics for large shifts ++(Lsh64x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh64x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh64x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh64x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Lsh32x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh32x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh32x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh32x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Lsh16x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh16x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh16x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh16x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Lsh8x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh8x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh8x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh8x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Rsh64Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) ++(Rsh64Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) ++(Rsh64Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) ++(Rsh64Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) ++ ++(Rsh32Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) ++(Rsh32Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Rsh32Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) ++(Rsh32Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) ++ ++(Rsh16Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) ++(Rsh16Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) ++(Rsh16Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Rsh16Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) ++ ++(Rsh8Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) ++(Rsh8Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) ++(Rsh8Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) ++(Rsh8Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ ++(Rsh64x64 x y) => (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh64x32 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh64x16 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh64x8 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++(Rsh32x64 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh32x32 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh32x16 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh32x8 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++(Rsh16x64 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh16x32 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh16x16 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh16x8 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++(Rsh8x64 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh8x32 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh8x16 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh8x8 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++// rotates ++(RotateLeft8 x (MOVVconst [c])) => (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) ++(RotateLeft16 x (MOVVconst [c])) => (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) ++(RotateLeft32 x (MOVVconst [c])) => (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) ++(RotateLeft64 x (MOVVconst [c])) => (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) ++ ++// unary ops ++(Neg(64|32|16|8) ...) => (NEGV ...) ++(Neg(32|64)F ...) => (NEG(F|D) ...) ++ ++(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) ++ ++(Sqrt ...) => (SQRTD ...) ++(Sqrt32 ...) => (SQRTF ...) ++ ++// boolean ops -- booleans are represented with 0=false, 1=true ++(AndB ...) => (AND ...) ++(OrB ...) => (OR ...) ++(EqB x y) => (XOR (MOVVconst [1]) (XOR x y)) ++(NeqB ...) => (XOR ...) ++(Not x) => (XORconst [1] x) ++ ++// constants ++(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) ++(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) ++(ConstNil) => (MOVVconst [0]) ++(ConstBool [t]) => (MOVVconst [int64(b2i(t))]) ++ ++(Slicemask x) => (SRAVconst (NEGV x) [63]) ++ ++// truncations ++// Because we ignore high parts of registers, truncates are just copies. ++(Trunc16to8 ...) => (Copy ...) ++(Trunc32to8 ...) => (Copy ...) ++(Trunc32to16 ...) => (Copy ...) ++(Trunc64to8 ...) => (Copy ...) ++(Trunc64to16 ...) => (Copy ...) ++(Trunc64to32 ...) => (Copy ...) ++ ++// Zero-/Sign-extensions ++(ZeroExt8to16 ...) => (MOVBUreg ...) ++(ZeroExt8to32 ...) => (MOVBUreg ...) ++(ZeroExt16to32 ...) => (MOVHUreg ...) ++(ZeroExt8to64 ...) => (MOVBUreg ...) ++(ZeroExt16to64 ...) => (MOVHUreg ...) ++(ZeroExt32to64 ...) => (MOVWUreg ...) ++ ++(SignExt8to16 ...) => (MOVBreg ...) ++(SignExt8to32 ...) => (MOVBreg ...) ++(SignExt16to32 ...) => (MOVHreg ...) ++(SignExt8to64 ...) => (MOVBreg ...) ++(SignExt16to64 ...) => (MOVHreg ...) ++(SignExt32to64 ...) => (MOVWreg ...) ++ ++// float <=> int conversion ++(Cvt32to32F ...) => (MOVWF ...) ++(Cvt32to64F ...) => (MOVWD ...) ++(Cvt64to32F ...) => (MOVVF ...) ++(Cvt64to64F ...) => (MOVVD ...) ++(Cvt32Fto32 ...) => (TRUNCFW ...) ++(Cvt64Fto32 ...) => (TRUNCDW ...) ++(Cvt32Fto64 ...) => (TRUNCFV ...) ++(Cvt64Fto64 ...) => (TRUNCDV ...) ++(Cvt32Fto64F ...) => (MOVFD ...) ++(Cvt64Fto32F ...) => (MOVDF ...) ++ ++(CvtBoolToUint8 ...) => (Copy ...) ++ ++(Round(32|64)F ...) => (Copy ...) ++ ++// comparisons ++(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) ++(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y)) ++(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y)) ++(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) ++ ++(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) ++(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) ++(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) ++(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0])) ++(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0])) ++(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) ++ ++(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x)) ++(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x)) ++(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x)) ++(Less64 x y) => (SGT y x) ++(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN ++ ++(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) ++(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) ++(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) ++(Less64U x y) => (SGTU y x) ++ ++(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) ++(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) ++(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) ++(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y)) ++(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN ++ ++(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) ++ ++(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr) ++(OffPtr [off] ptr) => (ADDVconst [off] ptr) ++ ++(Addr {sym} base) => (MOVVaddr {sym} base) ++(LocalAddr {sym} base _) => (MOVVaddr {sym} base) ++ ++// loads ++(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) ++(Load ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) ++(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) ++(Load ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) ++(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) ++(Load ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem) ++(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem) ++(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem) ++(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) ++(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) ++ ++// stores ++(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVVstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem) ++ ++// zeroing ++(Zero [0] _ mem) => mem ++(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem) ++(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore ptr (MOVVconst [0]) mem) ++(Zero [2] ptr mem) => ++ (MOVBstore [1] ptr (MOVVconst [0]) ++ (MOVBstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => ++ (MOVWstore ptr (MOVVconst [0]) mem) ++(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [2] ptr (MOVVconst [0]) ++ (MOVHstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [4] ptr mem) => ++ (MOVBstore [3] ptr (MOVVconst [0]) ++ (MOVBstore [2] ptr (MOVVconst [0]) ++ (MOVBstore [1] ptr (MOVVconst [0]) ++ (MOVBstore [0] ptr (MOVVconst [0]) mem)))) ++(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => ++ (MOVVstore ptr (MOVVconst [0]) mem) ++(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [4] ptr (MOVVconst [0]) ++ (MOVWstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [6] ptr (MOVVconst [0]) ++ (MOVHstore [4] ptr (MOVVconst [0]) ++ (MOVHstore [2] ptr (MOVVconst [0]) ++ (MOVHstore [0] ptr (MOVVconst [0]) mem)))) ++ ++(Zero [3] ptr mem) => ++ (MOVBstore [2] ptr (MOVVconst [0]) ++ (MOVBstore [1] ptr (MOVVconst [0]) ++ (MOVBstore [0] ptr (MOVVconst [0]) mem))) ++(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [4] ptr (MOVVconst [0]) ++ (MOVHstore [2] ptr (MOVVconst [0]) ++ (MOVHstore [0] ptr (MOVVconst [0]) mem))) ++(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [8] ptr (MOVVconst [0]) ++ (MOVWstore [4] ptr (MOVVconst [0]) ++ (MOVWstore [0] ptr (MOVVconst [0]) mem))) ++(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [8] ptr (MOVVconst [0]) ++ (MOVVstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [16] ptr (MOVVconst [0]) ++ (MOVVstore [8] ptr (MOVVconst [0]) ++ (MOVVstore [0] ptr (MOVVconst [0]) mem))) ++ ++// medium zeroing uses a duff device ++// 8, and 128 are magic constants, see runtime/mkduff.go ++(Zero [s] {t} ptr mem) ++ && s%8 == 0 && s > 24 && s <= 8*128 ++ && t.Alignment()%8 == 0 && !config.noDuffDevice => ++ (DUFFZERO [8 * (128 - s/8)] ptr mem) ++ ++// large or unaligned zeroing uses a loop ++(Zero [s] {t} ptr mem) ++ && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 => ++ (LoweredZero [t.Alignment()] ++ ptr ++ (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) ++ mem) ++ ++// moves ++(Move [0] _ _ mem) => mem ++(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) ++(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore dst (MOVHload src mem) mem) ++(Move [2] dst src mem) => ++ (MOVBstore [1] dst (MOVBload [1] src mem) ++ (MOVBstore dst (MOVBload src mem) mem)) ++(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => ++ (MOVWstore dst (MOVWload src mem) mem) ++(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [2] dst (MOVHload [2] src mem) ++ (MOVHstore dst (MOVHload src mem) mem)) ++(Move [4] dst src mem) => ++ (MOVBstore [3] dst (MOVBload [3] src mem) ++ (MOVBstore [2] dst (MOVBload [2] src mem) ++ (MOVBstore [1] dst (MOVBload [1] src mem) ++ (MOVBstore dst (MOVBload src mem) mem)))) ++(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => ++ (MOVVstore dst (MOVVload src mem) mem) ++(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [4] dst (MOVWload [4] src mem) ++ (MOVWstore dst (MOVWload src mem) mem)) ++(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [6] dst (MOVHload [6] src mem) ++ (MOVHstore [4] dst (MOVHload [4] src mem) ++ (MOVHstore [2] dst (MOVHload [2] src mem) ++ (MOVHstore dst (MOVHload src mem) mem)))) ++ ++(Move [3] dst src mem) => ++ (MOVBstore [2] dst (MOVBload [2] src mem) ++ (MOVBstore [1] dst (MOVBload [1] src mem) ++ (MOVBstore dst (MOVBload src mem) mem))) ++(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [4] dst (MOVHload [4] src mem) ++ (MOVHstore [2] dst (MOVHload [2] src mem) ++ (MOVHstore dst (MOVHload src mem) mem))) ++(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [8] dst (MOVWload [8] src mem) ++ (MOVWstore [4] dst (MOVWload [4] src mem) ++ (MOVWstore dst (MOVWload src mem) mem))) ++(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [8] dst (MOVVload [8] src mem) ++ (MOVVstore dst (MOVVload src mem) mem)) ++(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [16] dst (MOVVload [16] src mem) ++ (MOVVstore [8] dst (MOVVload [8] src mem) ++ (MOVVstore dst (MOVVload src mem) mem))) ++ ++// medium move uses a duff device ++(Move [s] {t} dst src mem) ++ && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 ++ && !config.noDuffDevice && logLargeCopy(v, s) => ++ (DUFFCOPY [16 * (128 - s/8)] dst src mem) ++// 16 and 128 are magic constants. 16 is the number of bytes to encode: ++// MOVV (R1), R23 ++// ADDV $8, R1 ++// MOVV R23, (R2) ++// ADDV $8, R2 ++// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy. ++ ++// large or unaligned move uses a loop ++(Move [s] {t} dst src mem) ++ && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 => ++ (LoweredMove [t.Alignment()] ++ dst ++ src ++ (ADDVconst src [s-moveSize(t.Alignment(), config)]) ++ mem) ++ ++// calls ++(StaticCall ...) => (CALLstatic ...) ++(ClosureCall ...) => (CALLclosure ...) ++(InterCall ...) => (CALLinter ...) ++ ++// atomic intrinsics ++(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...) ++(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) ++ ++(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...) ++(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) ++ ++(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) ++ ++(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) ++ ++(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...) ++ ++// checks ++(NilCheck ...) => (LoweredNilCheck ...) ++(IsNonNil ptr) => (SGTU ptr (MOVVconst [0])) ++(IsInBounds idx len) => (SGTU len idx) ++(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len)) ++ ++// pseudo-ops ++(GetClosurePtr ...) => (LoweredGetClosurePtr ...) ++(GetCallerSP ...) => (LoweredGetCallerSP ...) ++(GetCallerPC ...) => (LoweredGetCallerPC ...) ++ ++(If cond yes no) => (NE cond yes no) ++ ++// Write barrier. ++(WB ...) => (LoweredWB ...) ++ ++(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) ++(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) ++(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) ++ ++// Optimizations ++ ++// Absorb boolean tests into block ++(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) ++(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) ++(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) ++(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) ++(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) ++(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) ++(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) ++(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) ++(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) ++(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) ++(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) ++(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) ++(NE (SGTUconst [1] x) yes no) => (EQ x yes no) ++(EQ (SGTUconst [1] x) yes no) => (NE x yes no) ++(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no) ++(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no) ++(NE (SGTconst [0] x) yes no) => (LTZ x yes no) ++(EQ (SGTconst [0] x) yes no) => (GEZ x yes no) ++(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) ++(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) ++ ++// fold offset into address ++(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) ++ ++// fold address into load/store ++(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem) ++(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) ++(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem) ++(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) ++(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem) ++(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) ++(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem) ++(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem) ++(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem) ++ ++(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) ++ ++(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ ++(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ ++// store zero ++(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) ++(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) ++(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) ++ ++// don't extend after proper load ++(MOVBreg x:(MOVBload _ _)) => (MOVVreg x) ++(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVHreg x:(MOVBload _ _)) => (MOVVreg x) ++(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVHreg x:(MOVHload _ _)) => (MOVVreg x) ++(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVBload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVHload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVWload _ _)) => (MOVVreg x) ++(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x) ++(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x) ++ ++// fold double extensions ++(MOVBreg x:(MOVBreg _)) => (MOVVreg x) ++(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVHreg x:(MOVBreg _)) => (MOVVreg x) ++(MOVHreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVHreg x:(MOVHreg _)) => (MOVVreg x) ++(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVBreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVHreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVWreg _)) => (MOVVreg x) ++(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x) ++(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x) ++ ++// don't extend before store ++(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) ++(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) ++ ++// if a register move has only 1 use, just use the same register without emitting instruction ++// MOVVnop doesn't emit instruction, only for ensuring the type. ++(MOVVreg x) && x.Uses == 1 => (MOVVnop x) ++ ++// fold constant into arithmatic ops ++(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x) ++(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) ++(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) ++(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) ++(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x) ++(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x) ++ ++(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) ++(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) ++(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63]) ++(SLLV x (MOVVconst [c])) => (SLLVconst x [c]) ++(SRLV x (MOVVconst [c])) => (SRLVconst x [c]) ++(SRAV x (MOVVconst [c])) => (SRAVconst x [c]) ++ ++(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x) ++(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x) ++ ++// mul by constant ++(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x) ++(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0]) ++(Select1 (MULVU x (MOVVconst [1]))) => x ++(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x) ++ ++// div by constant ++(Select1 (DIVVU x (MOVVconst [1]))) => x ++(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x) ++(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod ++(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod ++ ++// generic simplifications ++(ADDV x (NEGV y)) => (SUBV x y) ++(SUBV x x) => (MOVVconst [0]) ++(SUBV (MOVVconst [0]) x) => (NEGV x) ++(AND x x) => x ++(OR x x) => x ++(XOR x x) => (MOVVconst [0]) ++ ++// remove redundant *const ops ++(ADDVconst [0] x) => x ++(SUBVconst [0] x) => x ++(ANDconst [0] _) => (MOVVconst [0]) ++(ANDconst [-1] x) => x ++(ORconst [0] x) => x ++(ORconst [-1] _) => (MOVVconst [-1]) ++(XORconst [0] x) => x ++(XORconst [-1] x) => (NORconst [0] x) ++ ++// generic constant folding ++(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d]) ++(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x) ++(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x) ++(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c]) ++(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) ++(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) ++(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) ++(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) ++(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) ++(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c/d]) ++(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)/uint64(d))]) ++(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c%d]) // mod ++(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod ++(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) ++(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) ++(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) ++(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x) ++(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d]) ++(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x) ++(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)]) ++(NEGV (MOVVconst [c])) => (MOVVconst [-c]) ++(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))]) ++(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))]) ++(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))]) ++(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))]) ++(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) ++(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) ++(MOVVreg (MOVVconst [c])) => (MOVVconst [c]) ++//(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero32 ptr mem) ++//(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero64 ptr mem) ++//(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [c] ptr mem) ++//(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) ++ ++// constant comparisons ++(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) ++(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0]) ++(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1]) ++(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0]) ++ ++// other known comparisons ++(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1]) ++(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0]) ++(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1]) ++(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0]) ++(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1]) ++(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1]) ++(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0]) ++(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1]) ++(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0]) ++(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1]) ++(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0]) ++(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1]) ++(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1]) ++(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) ++(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) ++ ++// absorb constants into branches ++(EQ (MOVVconst [0]) yes no) => (First yes no) ++(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes) ++(NE (MOVVconst [0]) yes no) => (First no yes) ++(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no) ++(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no) ++(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes) ++(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no) ++(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes) ++(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no) ++(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) ++(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) ++(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go +new file mode 100644 +index 0000000000..1f0fec79dc +--- /dev/null ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go +@@ -0,0 +1,524 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build ignore ++// +build ignore ++ ++package main ++ ++import "strings" ++ ++// Notes: ++// - Integer types live in the low portion of registers. Upper portions are junk. ++// - Boolean types use the low-order byte of a register. 0=false, 1=true. ++// Upper bytes are junk. ++// - *const instructions may use a constant larger than the instruction can encode. ++// In this case the assembler expands to multiple instructions and uses tmp ++// register (R23). ++ ++// Suffixes encode the bit width of various instructions. ++// V (vlong) = 64 bit ++// WU (word) = 32 bit unsigned ++// W (word) = 32 bit ++// H (half word) = 16 bit ++// HU = 16 bit unsigned ++// B (byte) = 8 bit ++// BU = 8 bit unsigned ++// F (float) = 32 bit float ++// D (double) = 64 bit float ++ ++// Note: registers not used in regalloc are not included in this list, ++// so that regmask stays within int64 ++// Be careful when hand coding regmasks. ++var regNamesLOONG64 = []string{ ++ "R0", // constant 0 ++ "R1", ++ "SP", // aka R3 ++ "R4", ++ "R5", ++ "R6", ++ "R7", ++ "R8", ++ "R9", ++ "R10", ++ "R11", ++ "R12", ++ "R13", ++ "R14", ++ "R15", ++ "R16", ++ "R17", ++ "R18", ++ "R19", ++ "R20", ++ "R21", ++ "g", // aka R22 ++ "R23", ++ "R24", ++ "R25", ++ "R26", ++ "R27", ++ "R28", ++ "R29", ++ // R30 is REGTMP not used in regalloc ++ "R31", ++ ++ "F0", ++ "F1", ++ "F2", ++ "F3", ++ "F4", ++ "F5", ++ "F6", ++ "F7", ++ "F8", ++ "F9", ++ "F10", ++ "F11", ++ "F12", ++ "F13", ++ "F14", ++ "F15", ++ "F16", ++ "F17", ++ "F18", ++ "F19", ++ "F20", ++ "F21", ++ "F22", ++ "F23", ++ "F24", ++ "F25", ++ "F26", ++ "F27", ++ "F28", ++ "F29", ++ "F30", ++ "F31", ++ ++ // If you add registers, update asyncPreempt in runtime. ++ ++ // pseudo-registers ++ "SB", ++} ++ ++func init() { ++ // Make map from reg names to reg integers. ++ if len(regNamesLOONG64) > 64 { ++ panic("too many registers") ++ } ++ num := map[string]int{} ++ for i, name := range regNamesLOONG64 { ++ num[name] = i ++ } ++ buildReg := func(s string) regMask { ++ m := regMask(0) ++ for _, r := range strings.Split(s, " ") { ++ if n, ok := num[r]; ok { ++ m |= regMask(1) << uint(n) ++ continue ++ } ++ panic("register " + r + " not found") ++ } ++ return m ++ } ++ ++ // Common individual register masks ++ var ( ++ gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R21-unused, R22 is g, R30 is REGTMP ++ gps = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") | buildReg("g") ++ gpg = gp | buildReg("g") ++ gpsp = gp | buildReg("SP") ++ gpspg = gpg | buildReg("SP") ++ gpspsbg = gpspg | buildReg("SB") ++ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") ++ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g ++ r1 = buildReg("R19") ++ r2 = buildReg("R18") ++ r3 = buildReg("R17") ++ r4 = buildReg("R4") ++ ) ++ // Common regInfo ++ var ( ++ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} ++ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} ++ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} ++ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} ++ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} ++ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} ++ gpstore0 = regInfo{inputs: []regMask{gpspsbg}} ++ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} ++ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} ++ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} ++ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} ++ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} ++ fp2flags = regInfo{inputs: []regMask{fp, fp}} ++ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} ++ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} ++ readflags = regInfo{inputs: nil, outputs: []regMask{gp}} ++ ) ++ ops := []opData{ ++ // binary ops ++ {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1 ++ {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops. ++ {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1 ++ {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt ++ ++ { ++ name: "MULV", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(Int64,Int64)", ++ }, ++ ++ { ++ name: "MULVU", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(UInt64,UInt64)", ++ }, ++ ++ { ++ name: "DIVV", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(Int64,Int64)", ++ }, ++ ++ { ++ name: "DIVVU", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(UInt64,UInt64)", ++ }, ++ ++ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 ++ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 ++ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 ++ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 ++ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 ++ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 ++ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 ++ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 ++ ++ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 ++ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt ++ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 ++ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt ++ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1 ++ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt ++ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1) ++ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt) ++ ++ {name: "NEGV", argLength: 1, reg: gp11}, // -arg0 ++ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 ++ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 ++ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 ++ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 ++ ++ // shifts ++ {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64 ++ {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt ++ {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64 ++ {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned ++ {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64 ++ {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed ++ ++ // comparisons ++ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise ++ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise ++ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise ++ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise ++ ++ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32 ++ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64 ++ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32 ++ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64 ++ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32 ++ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64 ++ ++ // moves ++ {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint ++ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float ++ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float ++ ++ {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB ++ ++ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ ++ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ ++ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem. ++ ++ // conversions ++ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte ++ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte ++ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half ++ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half ++ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word ++ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word ++ {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0 ++ ++ {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register ++ ++ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32 ++ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64 ++ {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32 ++ {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64 ++ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32 ++ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32 ++ {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64 ++ {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64 ++ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64 ++ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 ++ ++ // function calls ++ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem ++ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem ++ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem ++ ++ // duffzero ++ // arg0 = address of memory to zero ++ // arg1 = mem ++ // auxint = offset into duffzero code to start executing ++ // returns mem ++ // R1 aka loong64.REGRT1 changed as side effect ++ { ++ name: "DUFFZERO", ++ aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gp}, ++ clobbers: buildReg("R19 R1"), ++ }, ++ faultOnNilArg0: true, ++ }, ++ ++ // duffcopy ++ // arg0 = address of dst memory (in R20, changed as side effect) REGRT2 ++ // arg1 = address of src memory (in R19, changed as side effect) REGRT1 ++ // arg2 = mem ++ // auxint = offset into duffcopy code to start executing ++ // returns mem ++ { ++ name: "DUFFCOPY", ++ aux: "Int64", ++ argLength: 3, ++ reg: regInfo{ ++ inputs: []regMask{buildReg("R20"), buildReg("R19")}, ++ clobbers: buildReg("R19 R20 R1"), ++ }, ++ faultOnNilArg0: true, ++ faultOnNilArg1: true, ++ }, ++ ++ // large or unaligned zeroing ++ // arg0 = address of memory to zero (in R19, changed as side effect) ++ // arg1 = address of the last element to zero ++ // arg2 = mem ++ // auxint = alignment ++ // returns mem ++ // SUBV $8, R19 ++ // MOVV R0, 8(R19) ++ // ADDV $8, R19 ++ // BNE Rarg1, R19, -2(PC) ++ { ++ name: "LoweredZero", ++ aux: "Int64", ++ argLength: 3, ++ reg: regInfo{ ++ inputs: []regMask{buildReg("R19"), gp}, ++ clobbers: buildReg("R19"), ++ }, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ }, ++ ++ // large or unaligned move ++ // arg0 = address of dst memory (in R4, changed as side effect) ++ // arg1 = address of src memory (in R19, changed as side effect) ++ // arg2 = address of the last element of src ++ // arg3 = mem ++ // auxint = alignment ++ // returns mem ++ // SUBV $8, R19 ++ // MOVV 8(R19), Rtmp ++ // MOVV Rtmp, (R4) ++ // ADDV $8, R19 ++ // ADDV $8, R4 ++ // BNE Rarg2, R19, -4(PC) ++ { ++ name: "LoweredMove", ++ aux: "Int64", ++ argLength: 4, ++ reg: regInfo{ ++ inputs: []regMask{buildReg("R4"), buildReg("R19"), gp}, ++ clobbers: buildReg("R19 R4"), ++ }, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ faultOnNilArg1: true, ++ }, ++ ++ // atomic loads. ++ // load from arg0. arg1=mem. ++ // returns so they can be properly ordered with other loads. ++ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, ++ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, ++ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, ++ ++ // atomic stores. ++ // store arg1 to arg0. arg2=mem. returns memory. ++ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, ++ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, ++ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, ++ // store zero to arg0. arg1=mem. returns memory. ++ {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, ++ {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, ++ ++ // atomic exchange. ++ // store arg1 to arg0. arg2=mem. returns . ++ // DBAR ++ // LL (Rarg0), Rout ++ // MOVV Rarg1, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ ++ // atomic add. ++ // *arg0 += arg1. arg2=mem. returns . ++ // DBAR ++ // LL (Rarg0), Rout ++ // ADDV Rarg1, Rout, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ // ADDV Rarg1, Rout ++ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ // *arg0 += auxint. arg1=mem. returns . auxint is 32-bit. ++ {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ ++ // atomic compare and swap. ++ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. ++ // if *arg0 == arg1 { ++ // *arg0 = arg2 ++ // return (true, memory) ++ // } else { ++ // return (false, memory) ++ // } ++ // DBAR ++ // MOVV $0, Rout ++ // LL (Rarg0), Rtmp ++ // BNE Rtmp, Rarg1, 4(PC) ++ // MOVV Rarg2, Rout ++ // SC Rout, (Rarg0) ++ // BEQ Rout, -4(PC) ++ // DBAR ++ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ ++ // pseudo-ops ++ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. ++ ++ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true ++ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false ++ ++ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, ++ // and sorts it to the very beginning of the block to prevent other ++ // use of R22 (loong64.REGCTXT, the closure pointer) ++ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R29")}}, zeroWidth: true}, ++ ++ // LoweredGetCallerSP returns the SP of the caller of the current function. ++ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, ++ ++ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. ++ // I.e., if f calls g "calls" getcallerpc, ++ // the result should be the PC within f that g will return to. ++ // See runtime/stubs.go for a more detailed discussion. ++ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, ++ ++ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier ++ // It saves all GP registers if necessary, ++ // but clobbers R1 (LR) because it's a call ++ // and R30 (REGTMP). ++ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R27"), buildReg("R28")}, clobbers: (callerSave &^ gpg) | buildReg("R1")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, ++ ++ // There are three of these functions so that they can have three different register inputs. ++ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the ++ // default registers to match so we don't need to copy registers around unnecessarily. ++ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). ++ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). ++ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). ++ } ++ ++ blocks := []blockData{ ++ {name: "EQ", controls: 1}, ++ {name: "NE", controls: 1}, ++ {name: "LTZ", controls: 1}, // < 0 ++ {name: "LEZ", controls: 1}, // <= 0 ++ {name: "GTZ", controls: 1}, // > 0 ++ {name: "GEZ", controls: 1}, // >= 0 ++ {name: "FPT", controls: 1}, // FP flag is true ++ {name: "FPF", controls: 1}, // FP flag is false ++ } ++ ++ archs = append(archs, arch{ ++ name: "LOONG64", ++ pkg: "cmd/internal/obj/loong64", ++ genfile: "../../loong64/ssa.go", ++ ops: ops, ++ blocks: blocks, ++ regnames: regNamesLOONG64, ++ ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11", ++ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7", ++ gpregmask: gp, ++ fpregmask: fp, ++ framepointerreg: -1, // not used ++ linkreg: int8(num["R1"]), ++ }) ++} +diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go +index 1c37fbe0db..5aa0f52819 100644 +--- a/src/cmd/compile/internal/ssa/opGen.go ++++ b/src/cmd/compile/internal/ssa/opGen.go +@@ -6,6 +6,7 @@ import ( + "cmd/internal/obj" + "cmd/internal/obj/arm" + "cmd/internal/obj/arm64" ++ "cmd/internal/obj/loong64" + "cmd/internal/obj/mips" + "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" +@@ -91,6 +92,15 @@ const ( + BlockARM64GTnoov + BlockARM64GEnoov + ++ BlockLOONG64EQ ++ BlockLOONG64NE ++ BlockLOONG64LTZ ++ BlockLOONG64LEZ ++ BlockLOONG64GTZ ++ BlockLOONG64GEZ ++ BlockLOONG64FPT ++ BlockLOONG64FPF ++ + BlockMIPSEQ + BlockMIPSNE + BlockMIPSLTZ +@@ -229,6 +239,15 @@ var blockString = [...]string{ + BlockARM64GTnoov: "GTnoov", + BlockARM64GEnoov: "GEnoov", + ++ BlockLOONG64EQ: "EQ", ++ BlockLOONG64NE: "NE", ++ BlockLOONG64LTZ: "LTZ", ++ BlockLOONG64LEZ: "LEZ", ++ BlockLOONG64GTZ: "GTZ", ++ BlockLOONG64GEZ: "GEZ", ++ BlockLOONG64FPT: "FPT", ++ BlockLOONG64FPF: "FPF", ++ + BlockMIPSEQ: "EQ", + BlockMIPSNE: "NE", + BlockMIPSLTZ: "LTZ", +@@ -1611,6 +1630,126 @@ const ( + OpARM64LoweredPanicBoundsB + OpARM64LoweredPanicBoundsC + ++ OpLOONG64ADDV ++ OpLOONG64ADDVconst ++ OpLOONG64SUBV ++ OpLOONG64SUBVconst ++ OpLOONG64MULV ++ OpLOONG64MULVU ++ OpLOONG64DIVV ++ OpLOONG64DIVVU ++ OpLOONG64ADDF ++ OpLOONG64ADDD ++ OpLOONG64SUBF ++ OpLOONG64SUBD ++ OpLOONG64MULF ++ OpLOONG64MULD ++ OpLOONG64DIVF ++ OpLOONG64DIVD ++ OpLOONG64AND ++ OpLOONG64ANDconst ++ OpLOONG64OR ++ OpLOONG64ORconst ++ OpLOONG64XOR ++ OpLOONG64XORconst ++ OpLOONG64NOR ++ OpLOONG64NORconst ++ OpLOONG64NEGV ++ OpLOONG64NEGF ++ OpLOONG64NEGD ++ OpLOONG64SQRTD ++ OpLOONG64SQRTF ++ OpLOONG64SLLV ++ OpLOONG64SLLVconst ++ OpLOONG64SRLV ++ OpLOONG64SRLVconst ++ OpLOONG64SRAV ++ OpLOONG64SRAVconst ++ OpLOONG64SGT ++ OpLOONG64SGTconst ++ OpLOONG64SGTU ++ OpLOONG64SGTUconst ++ OpLOONG64CMPEQF ++ OpLOONG64CMPEQD ++ OpLOONG64CMPGEF ++ OpLOONG64CMPGED ++ OpLOONG64CMPGTF ++ OpLOONG64CMPGTD ++ OpLOONG64MOVVconst ++ OpLOONG64MOVFconst ++ OpLOONG64MOVDconst ++ OpLOONG64MOVVaddr ++ OpLOONG64MOVBload ++ OpLOONG64MOVBUload ++ OpLOONG64MOVHload ++ OpLOONG64MOVHUload ++ OpLOONG64MOVWload ++ OpLOONG64MOVWUload ++ OpLOONG64MOVVload ++ OpLOONG64MOVFload ++ OpLOONG64MOVDload ++ OpLOONG64MOVBstore ++ OpLOONG64MOVHstore ++ OpLOONG64MOVWstore ++ OpLOONG64MOVVstore ++ OpLOONG64MOVFstore ++ OpLOONG64MOVDstore ++ OpLOONG64MOVBstorezero ++ OpLOONG64MOVHstorezero ++ OpLOONG64MOVWstorezero ++ OpLOONG64MOVVstorezero ++ OpLOONG64MOVBreg ++ OpLOONG64MOVBUreg ++ OpLOONG64MOVHreg ++ OpLOONG64MOVHUreg ++ OpLOONG64MOVWreg ++ OpLOONG64MOVWUreg ++ OpLOONG64MOVVreg ++ OpLOONG64MOVVnop ++ OpLOONG64MOVWF ++ OpLOONG64MOVWD ++ OpLOONG64MOVVF ++ OpLOONG64MOVVD ++ OpLOONG64TRUNCFW ++ OpLOONG64TRUNCDW ++ OpLOONG64TRUNCFV ++ OpLOONG64TRUNCDV ++ OpLOONG64MOVFD ++ OpLOONG64MOVDF ++ OpLOONG64CALLstatic ++ OpLOONG64CALLclosure ++ OpLOONG64CALLinter ++ OpLOONG64DUFFZERO ++ OpLOONG64DUFFCOPY ++ OpLOONG64LoweredZero ++ OpLOONG64LoweredMove ++ OpLOONG64LoweredAtomicLoad8 ++ OpLOONG64LoweredAtomicLoad32 ++ OpLOONG64LoweredAtomicLoad64 ++ OpLOONG64LoweredAtomicStore8 ++ OpLOONG64LoweredAtomicStore32 ++ OpLOONG64LoweredAtomicStore64 ++ OpLOONG64LoweredAtomicStorezero32 ++ OpLOONG64LoweredAtomicStorezero64 ++ OpLOONG64LoweredAtomicExchange32 ++ OpLOONG64LoweredAtomicExchange64 ++ OpLOONG64LoweredAtomicAdd32 ++ OpLOONG64LoweredAtomicAdd64 ++ OpLOONG64LoweredAtomicAddconst32 ++ OpLOONG64LoweredAtomicAddconst64 ++ OpLOONG64LoweredAtomicCas32 ++ OpLOONG64LoweredAtomicCas64 ++ OpLOONG64LoweredNilCheck ++ OpLOONG64FPFlagTrue ++ OpLOONG64FPFlagFalse ++ OpLOONG64LoweredGetClosurePtr ++ OpLOONG64LoweredGetCallerSP ++ OpLOONG64LoweredGetCallerPC ++ OpLOONG64LoweredWB ++ OpLOONG64LoweredPanicBoundsA ++ OpLOONG64LoweredPanicBoundsB ++ OpLOONG64LoweredPanicBoundsC ++ + OpMIPSADD + OpMIPSADDconst + OpMIPSSUB +@@ -21447,137 +21586,123 @@ var opcodeTable = [...]opInfo{ + }, + + { +- name: "ADD", ++ name: "ADDV", + argLen: 2, + commutative: true, +- asm: mips.AADDU, ++ asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "ADDconst", +- auxType: auxInt32, ++ name: "ADDVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AADDU, ++ asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 ++ {0, 1072693244}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SUB", ++ name: "SUBV", + argLen: 2, +- asm: mips.ASUBU, ++ asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SUBconst", +- auxType: auxInt32, ++ name: "SUBVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASUBU, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "MUL", +- argLen: 2, +- commutative: true, +- asm: mips.AMUL, ++ asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, +- clobbers: 105553116266496, // HI LO + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "MULT", +- argLen: 2, +- commutative: true, +- asm: mips.AMUL, ++ name: "MULV", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, + { +- name: "MULTU", +- argLen: 2, +- commutative: true, +- asm: mips.AMULU, ++ name: "MULVU", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, + { +- name: "DIV", +- argLen: 2, +- asm: mips.ADIV, ++ name: "DIVV", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, + { +- name: "DIVU", +- argLen: 2, +- asm: mips.ADIVU, ++ name: "DIVVU", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, +@@ -21585,14 +21710,14 @@ var opcodeTable = [...]opInfo{ + name: "ADDF", + argLen: 2, + commutative: true, +- asm: mips.AADDF, ++ asm: loong64.AADDF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21600,42 +21725,42 @@ var opcodeTable = [...]opInfo{ + name: "ADDD", + argLen: 2, + commutative: true, +- asm: mips.AADDD, ++ asm: loong64.AADDD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBF", + argLen: 2, +- asm: mips.ASUBF, ++ asm: loong64.ASUBF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBD", + argLen: 2, +- asm: mips.ASUBD, ++ asm: loong64.ASUBD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21643,14 +21768,14 @@ var opcodeTable = [...]opInfo{ + name: "MULF", + argLen: 2, + commutative: true, +- asm: mips.AMULF, ++ asm: loong64.AMULF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21658,42 +21783,42 @@ var opcodeTable = [...]opInfo{ + name: "MULD", + argLen: 2, + commutative: true, +- asm: mips.AMULD, ++ asm: loong64.AMULD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVF", + argLen: 2, +- asm: mips.ADIVF, ++ asm: loong64.ADIVF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVD", + argLen: 2, +- asm: mips.ADIVD, ++ asm: loong64.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21701,28 +21826,28 @@ var opcodeTable = [...]opInfo{ + name: "AND", + argLen: 2, + commutative: true, +- asm: mips.AAND, ++ asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ANDconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AAND, ++ asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -21730,28 +21855,28 @@ var opcodeTable = [...]opInfo{ + name: "OR", + argLen: 2, + commutative: true, +- asm: mips.AOR, ++ asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ORconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AOR, ++ asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -21759,28 +21884,28 @@ var opcodeTable = [...]opInfo{ + name: "XOR", + argLen: 2, + commutative: true, +- asm: mips.AXOR, ++ asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "XORconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AXOR, ++ asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -21788,361 +21913,322 @@ var opcodeTable = [...]opInfo{ + name: "NOR", + argLen: 2, + commutative: true, +- asm: mips.ANOR, ++ asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NORconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ANOR, ++ asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "NEG", ++ name: "NEGV", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NEGF", + argLen: 1, +- asm: mips.ANEGF, ++ asm: loong64.ANEGF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "NEGD", + argLen: 1, +- asm: mips.ANEGD, ++ asm: loong64.ANEGD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTD", + argLen: 1, +- asm: mips.ASQRTD, ++ asm: loong64.ASQRTD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTF", + argLen: 1, +- asm: mips.ASQRTF, ++ asm: loong64.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { +- name: "SLL", ++ name: "SLLV", + argLen: 2, +- asm: mips.ASLL, ++ asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SLLconst", +- auxType: auxInt32, ++ name: "SLLVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASLL, ++ asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRL", ++ name: "SRLV", + argLen: 2, +- asm: mips.ASRL, ++ asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRLconst", +- auxType: auxInt32, ++ name: "SRLVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASRL, ++ asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRA", ++ name: "SRAV", + argLen: 2, +- asm: mips.ASRA, ++ asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRAconst", +- auxType: auxInt32, ++ name: "SRAVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASRA, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "CLZ", +- argLen: 1, +- asm: mips.ACLZ, ++ asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGT", + argLen: 2, +- asm: mips.ASGT, ++ asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASGT, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "SGTzero", +- argLen: 1, +- asm: mips.ASGT, ++ asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTU", + argLen: 2, +- asm: mips.ASGTU, ++ asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTUconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASGTU, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "SGTUzero", +- argLen: 1, +- asm: mips.ASGTU, ++ asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "CMPEQF", + argLen: 2, +- asm: mips.ACMPEQF, ++ asm: loong64.ACMPEQF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPEQD", + argLen: 2, +- asm: mips.ACMPEQD, ++ asm: loong64.ACMPEQD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGEF", + argLen: 2, +- asm: mips.ACMPGEF, ++ asm: loong64.ACMPGEF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGED", + argLen: 2, +- asm: mips.ACMPGED, ++ asm: loong64.ACMPGED, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTF", + argLen: 2, +- asm: mips.ACMPGTF, ++ asm: loong64.ACMPGTF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTD", + argLen: 2, +- asm: mips.ACMPGTD, ++ asm: loong64.ACMPGTD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { +- name: "MOVWconst", +- auxType: auxInt32, ++ name: "MOVVconst", ++ auxType: auxInt64, + argLen: 0, + rematerializeable: true, +- asm: mips.AMOVW, ++ asm: loong64.AMOVV, + reg: regInfo{ + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVFconst", +- auxType: auxFloat32, ++ auxType: auxFloat64, + argLen: 0, + rematerializeable: true, +- asm: mips.AMOVF, ++ asm: loong64.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22151,26 +22237,26 @@ var opcodeTable = [...]opInfo{ + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, +- asm: mips.AMOVD, ++ asm: loong64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { +- name: "MOVWaddr", ++ name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, +- asm: mips.AMOVW, ++ asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140737555464192}, // SP SB ++ {0, 4611686018427387908}, // SP SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22180,13 +22266,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22196,13 +22282,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVBU, ++ asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22212,13 +22298,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22228,13 +22314,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVHU, ++ asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22244,13 +22330,45 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWUload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: loong64.AMOVWU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVVload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: loong64.AMOVV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22260,13 +22378,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVF, ++ asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22276,13 +22394,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVD, ++ asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22292,11 +22410,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22306,11 +22424,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22320,11 +22438,25 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVVstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: loong64.AMOVV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22334,11 +22466,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVF, ++ asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22348,11 +22480,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVD, ++ asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22362,10 +22494,10 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22375,10 +22507,10 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22388,197 +22520,257 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVVstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: loong64.AMOVV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, +- asm: mips.AMOVBU, ++ asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, +- asm: mips.AMOVHU, ++ asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "MOVWnop", +- argLen: 1, +- resultInArg0: true, ++ name: "MOVWUreg", ++ argLen: 1, ++ asm: loong64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "CMOVZ", +- argLen: 3, +- resultInArg0: true, +- asm: mips.ACMOVZ, ++ name: "MOVVreg", ++ argLen: 1, ++ asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "CMOVZzero", +- argLen: 2, ++ name: "MOVVnop", ++ argLen: 1, + resultInArg0: true, +- asm: mips.ACMOVZ, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWF", + argLen: 1, +- asm: mips.AMOVWF, ++ asm: loong64.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVWD", + argLen: 1, +- asm: mips.AMOVWD, ++ asm: loong64.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVVF", ++ argLen: 1, ++ asm: loong64.AMOVVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVVD", ++ argLen: 1, ++ asm: loong64.AMOVVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFW", + argLen: 1, +- asm: mips.ATRUNCFW, ++ asm: loong64.ATRUNCFW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDW", + argLen: 1, +- asm: mips.ATRUNCDW, ++ asm: loong64.ATRUNCDW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCFV", ++ argLen: 1, ++ asm: loong64.ATRUNCFV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCDV", ++ argLen: 1, ++ asm: loong64.ATRUNCDV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVFD", + argLen: 1, +- asm: mips.AMOVFD, ++ asm: loong64.AMOVFD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDF", + argLen: 1, +- asm: mips.AMOVDF, ++ asm: loong64.AMOVDF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22589,7 +22781,7 @@ var opcodeTable = [...]opInfo{ + clobberFlags: true, + call: true, + reg: regInfo{ +- clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { +@@ -22600,10 +22792,10 @@ var opcodeTable = [...]opInfo{ + call: true, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 4194304}, // R22 +- {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 ++ {1, 268435456}, // R29 ++ {0, 1070596092}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, +- clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { +@@ -22614,39 +22806,1613 @@ var opcodeTable = [...]opInfo{ + call: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, +- clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { +- name: "LoweredAtomicLoad8", ++ name: "DUFFZERO", ++ auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 262146, // R1 R19 + }, + }, + { +- name: "LoweredAtomicLoad32", +- argLen: 2, ++ name: "DUFFCOPY", ++ auxType: auxInt64, ++ argLen: 3, + faultOnNilArg0: true, ++ faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 524288}, // R20 ++ {1, 262144}, // R19 + }, ++ clobbers: 786434, // R1 R19 R20 + }, + }, + { +- name: "LoweredAtomicStore8", ++ name: "LoweredZero", ++ auxType: auxInt64, ++ argLen: 3, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 262144}, // R19 ++ {1, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ clobbers: 262144, // R19 ++ }, ++ }, ++ { ++ name: "LoweredMove", ++ auxType: auxInt64, ++ argLen: 4, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ faultOnNilArg1: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 8}, // R4 ++ {1, 262144}, // R19 ++ {2, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ clobbers: 262152, // R4 R19 ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad8", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad32", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad64", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore8", ++ argLen: 3, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore32", ++ argLen: 3, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore64", ++ argLen: 3, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStorezero32", ++ argLen: 2, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStorezero64", ++ argLen: 2, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicExchange32", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicExchange64", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAdd32", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAdd64", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAddconst32", ++ auxType: auxInt32, ++ argLen: 2, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAddconst64", ++ auxType: auxInt64, ++ argLen: 2, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicCas32", ++ argLen: 4, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicCas64", ++ argLen: 4, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredNilCheck", ++ argLen: 2, ++ nilCheck: true, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "FPFlagTrue", ++ argLen: 1, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "FPFlagFalse", ++ argLen: 1, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredGetClosurePtr", ++ argLen: 0, ++ zeroWidth: true, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 268435456}, // R29 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredGetCallerSP", ++ argLen: 0, ++ rematerializeable: true, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredGetCallerPC", ++ argLen: 0, ++ rematerializeable: true, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredWB", ++ auxType: auxSym, ++ argLen: 3, ++ clobberFlags: true, ++ symEffect: SymNone, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 67108864}, // R27 ++ {1, 134217728}, // R28 ++ }, ++ clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ { ++ name: "LoweredPanicBoundsA", ++ auxType: auxInt64, ++ argLen: 3, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 65536}, // R17 ++ {1, 8}, // R4 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredPanicBoundsB", ++ auxType: auxInt64, ++ argLen: 3, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 131072}, // R18 ++ {1, 65536}, // R17 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredPanicBoundsC", ++ auxType: auxInt64, ++ argLen: 3, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 262144}, // R19 ++ {1, 131072}, // R18 ++ }, ++ }, ++ }, ++ ++ { ++ name: "ADD", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AADDU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "ADDconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AADDU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SUB", ++ argLen: 2, ++ asm: mips.ASUBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SUBconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASUBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MUL", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMUL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ clobbers: 105553116266496, // HI LO ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MULT", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMUL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "MULTU", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMULU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "DIV", ++ argLen: 2, ++ asm: mips.ADIV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "DIVU", ++ argLen: 2, ++ asm: mips.ADIVU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "ADDF", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AADDF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "ADDD", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AADDD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SUBF", ++ argLen: 2, ++ asm: mips.ASUBF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SUBD", ++ argLen: 2, ++ asm: mips.ASUBD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MULF", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMULF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MULD", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMULD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "DIVF", ++ argLen: 2, ++ asm: mips.ADIVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "DIVD", ++ argLen: 2, ++ asm: mips.ADIVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "AND", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AAND, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "ANDconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AAND, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "OR", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "ORconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "XOR", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AXOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "XORconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AXOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NOR", ++ argLen: 2, ++ commutative: true, ++ asm: mips.ANOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NORconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ANOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NEG", ++ argLen: 1, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NEGF", ++ argLen: 1, ++ asm: mips.ANEGF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "NEGD", ++ argLen: 1, ++ asm: mips.ANEGD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SQRTD", ++ argLen: 1, ++ asm: mips.ASQRTD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SQRTF", ++ argLen: 1, ++ asm: mips.ASQRTF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SLL", ++ argLen: 2, ++ asm: mips.ASLL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SLLconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASLL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRL", ++ argLen: 2, ++ asm: mips.ASRL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRLconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASRL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRA", ++ argLen: 2, ++ asm: mips.ASRA, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRAconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASRA, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CLZ", ++ argLen: 1, ++ asm: mips.ACLZ, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGT", ++ argLen: 2, ++ asm: mips.ASGT, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASGT, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTzero", ++ argLen: 1, ++ asm: mips.ASGT, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTU", ++ argLen: 2, ++ asm: mips.ASGTU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTUconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASGTU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTUzero", ++ argLen: 1, ++ asm: mips.ASGTU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CMPEQF", ++ argLen: 2, ++ asm: mips.ACMPEQF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPEQD", ++ argLen: 2, ++ asm: mips.ACMPEQD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGEF", ++ argLen: 2, ++ asm: mips.ACMPGEF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGED", ++ argLen: 2, ++ asm: mips.ACMPGED, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGTF", ++ argLen: 2, ++ asm: mips.ACMPGTF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGTD", ++ argLen: 2, ++ asm: mips.ACMPGTD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWconst", ++ auxType: auxInt32, ++ argLen: 0, ++ rematerializeable: true, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVFconst", ++ auxType: auxFloat32, ++ argLen: 0, ++ rematerializeable: true, ++ asm: mips.AMOVF, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVDconst", ++ auxType: auxFloat64, ++ argLen: 0, ++ rematerializeable: true, ++ asm: mips.AMOVD, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWaddr", ++ auxType: auxSymOff, ++ argLen: 1, ++ rematerializeable: true, ++ symEffect: SymAddr, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140737555464192}, // SP SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBUload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHUload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVHU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVFload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVDload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVHstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVWstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVFstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVDstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVBstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVHstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVWstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVBreg", ++ argLen: 1, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBUreg", ++ argLen: 1, ++ asm: mips.AMOVBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHreg", ++ argLen: 1, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHUreg", ++ argLen: 1, ++ asm: mips.AMOVHU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWreg", ++ argLen: 1, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWnop", ++ argLen: 1, ++ resultInArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CMOVZ", ++ argLen: 3, ++ resultInArg0: true, ++ asm: mips.ACMOVZ, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CMOVZzero", ++ argLen: 2, ++ resultInArg0: true, ++ asm: mips.ACMOVZ, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWF", ++ argLen: 1, ++ asm: mips.AMOVWF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWD", ++ argLen: 1, ++ asm: mips.AMOVWD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCFW", ++ argLen: 1, ++ asm: mips.ATRUNCFW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCDW", ++ argLen: 1, ++ asm: mips.ATRUNCDW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVFD", ++ argLen: 1, ++ asm: mips.AMOVFD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVDF", ++ argLen: 1, ++ asm: mips.AMOVDF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CALLstatic", ++ auxType: auxCallOff, ++ argLen: 1, ++ clobberFlags: true, ++ call: true, ++ reg: regInfo{ ++ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ }, ++ }, ++ { ++ name: "CALLclosure", ++ auxType: auxCallOff, ++ argLen: 3, ++ clobberFlags: true, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 4194304}, // R22 ++ {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 ++ }, ++ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ }, ++ }, ++ { ++ name: "CALLinter", ++ auxType: auxCallOff, ++ argLen: 2, ++ clobberFlags: true, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad8", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad32", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, +@@ -36407,6 +38173,78 @@ var fpRegMaskARM64 = regMask(9223372034707292160) + var specialRegMaskARM64 = regMask(0) + var framepointerRegARM64 = int8(-1) + var linkRegARM64 = int8(29) ++var registersLOONG64 = [...]Register{ ++ {0, loong64.REG_R0, -1, "R0"}, ++ {1, loong64.REG_R1, -1, "R1"}, ++ {2, loong64.REGSP, -1, "SP"}, ++ {3, loong64.REG_R4, 0, "R4"}, ++ {4, loong64.REG_R5, 1, "R5"}, ++ {5, loong64.REG_R6, 2, "R6"}, ++ {6, loong64.REG_R7, 3, "R7"}, ++ {7, loong64.REG_R8, 4, "R8"}, ++ {8, loong64.REG_R9, 5, "R9"}, ++ {9, loong64.REG_R10, 6, "R10"}, ++ {10, loong64.REG_R11, 7, "R11"}, ++ {11, loong64.REG_R12, 8, "R12"}, ++ {12, loong64.REG_R13, 9, "R13"}, ++ {13, loong64.REG_R14, 10, "R14"}, ++ {14, loong64.REG_R15, 11, "R15"}, ++ {15, loong64.REG_R16, 12, "R16"}, ++ {16, loong64.REG_R17, 13, "R17"}, ++ {17, loong64.REG_R18, 14, "R18"}, ++ {18, loong64.REG_R19, 15, "R19"}, ++ {19, loong64.REG_R20, 16, "R20"}, ++ {20, loong64.REG_R21, -1, "R21"}, ++ {21, loong64.REGG, -1, "g"}, ++ {22, loong64.REG_R23, 17, "R23"}, ++ {23, loong64.REG_R24, 18, "R24"}, ++ {24, loong64.REG_R25, 19, "R25"}, ++ {25, loong64.REG_R26, 20, "R26"}, ++ {26, loong64.REG_R27, 21, "R27"}, ++ {27, loong64.REG_R28, 22, "R28"}, ++ {28, loong64.REG_R29, 23, "R29"}, ++ {29, loong64.REG_R31, 24, "R31"}, ++ {30, loong64.REG_F0, -1, "F0"}, ++ {31, loong64.REG_F1, -1, "F1"}, ++ {32, loong64.REG_F2, -1, "F2"}, ++ {33, loong64.REG_F3, -1, "F3"}, ++ {34, loong64.REG_F4, -1, "F4"}, ++ {35, loong64.REG_F5, -1, "F5"}, ++ {36, loong64.REG_F6, -1, "F6"}, ++ {37, loong64.REG_F7, -1, "F7"}, ++ {38, loong64.REG_F8, -1, "F8"}, ++ {39, loong64.REG_F9, -1, "F9"}, ++ {40, loong64.REG_F10, -1, "F10"}, ++ {41, loong64.REG_F11, -1, "F11"}, ++ {42, loong64.REG_F12, -1, "F12"}, ++ {43, loong64.REG_F13, -1, "F13"}, ++ {44, loong64.REG_F14, -1, "F14"}, ++ {45, loong64.REG_F15, -1, "F15"}, ++ {46, loong64.REG_F16, -1, "F16"}, ++ {47, loong64.REG_F17, -1, "F17"}, ++ {48, loong64.REG_F18, -1, "F18"}, ++ {49, loong64.REG_F19, -1, "F19"}, ++ {50, loong64.REG_F20, -1, "F20"}, ++ {51, loong64.REG_F21, -1, "F21"}, ++ {52, loong64.REG_F22, -1, "F22"}, ++ {53, loong64.REG_F23, -1, "F23"}, ++ {54, loong64.REG_F24, -1, "F24"}, ++ {55, loong64.REG_F25, -1, "F25"}, ++ {56, loong64.REG_F26, -1, "F26"}, ++ {57, loong64.REG_F27, -1, "F27"}, ++ {58, loong64.REG_F28, -1, "F28"}, ++ {59, loong64.REG_F29, -1, "F29"}, ++ {60, loong64.REG_F30, -1, "F30"}, ++ {61, loong64.REG_F31, -1, "F31"}, ++ {62, 0, -1, "SB"}, ++} ++var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10} ++var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37} ++var gpRegMaskLOONG64 = regMask(1070596088) ++var fpRegMaskLOONG64 = regMask(4611686017353646080) ++var specialRegMaskLOONG64 = regMask(0) ++var framepointerRegLOONG64 = int8(-1) ++var linkRegLOONG64 = int8(1) + var registersMIPS = [...]Register{ + {0, mips.REG_R0, -1, "R0"}, + {1, mips.REG_R1, 0, "R1"}, +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +new file mode 100644 +index 0000000000..463a0458a3 +--- /dev/null ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -0,0 +1,7943 @@ ++// Code generated from gen/LOONG64.rules; DO NOT EDIT. ++// generated with: cd gen; go run *.go ++ ++package ssa ++ ++import "cmd/compile/internal/types" ++ ++func rewriteValueLOONG64(v *Value) bool { ++ switch v.Op { ++ case OpAdd16: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAdd32: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAdd32F: ++ v.Op = OpLOONG64ADDF ++ return true ++ case OpAdd64: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAdd64F: ++ v.Op = OpLOONG64ADDD ++ return true ++ case OpAdd8: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAddPtr: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAddr: ++ return rewriteValueLOONG64_OpAddr(v) ++ case OpAnd16: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAnd32: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAnd64: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAnd8: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAndB: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAtomicAdd32: ++ v.Op = OpLOONG64LoweredAtomicAdd32 ++ return true ++ case OpAtomicAdd64: ++ v.Op = OpLOONG64LoweredAtomicAdd64 ++ return true ++ case OpAtomicCompareAndSwap32: ++ v.Op = OpLOONG64LoweredAtomicCas32 ++ return true ++ case OpAtomicCompareAndSwap64: ++ v.Op = OpLOONG64LoweredAtomicCas64 ++ return true ++ case OpAtomicExchange32: ++ v.Op = OpLOONG64LoweredAtomicExchange32 ++ return true ++ case OpAtomicExchange64: ++ v.Op = OpLOONG64LoweredAtomicExchange64 ++ return true ++ case OpAtomicLoad32: ++ v.Op = OpLOONG64LoweredAtomicLoad32 ++ return true ++ case OpAtomicLoad64: ++ v.Op = OpLOONG64LoweredAtomicLoad64 ++ return true ++ case OpAtomicLoad8: ++ v.Op = OpLOONG64LoweredAtomicLoad8 ++ return true ++ case OpAtomicLoadPtr: ++ v.Op = OpLOONG64LoweredAtomicLoad64 ++ return true ++ case OpAtomicStore32: ++ v.Op = OpLOONG64LoweredAtomicStore32 ++ return true ++ case OpAtomicStore64: ++ v.Op = OpLOONG64LoweredAtomicStore64 ++ return true ++ case OpAtomicStore8: ++ v.Op = OpLOONG64LoweredAtomicStore8 ++ return true ++ case OpAtomicStorePtrNoWB: ++ v.Op = OpLOONG64LoweredAtomicStore64 ++ return true ++ case OpAvg64u: ++ return rewriteValueLOONG64_OpAvg64u(v) ++ case OpClosureCall: ++ v.Op = OpLOONG64CALLclosure ++ return true ++ case OpCom16: ++ return rewriteValueLOONG64_OpCom16(v) ++ case OpCom32: ++ return rewriteValueLOONG64_OpCom32(v) ++ case OpCom64: ++ return rewriteValueLOONG64_OpCom64(v) ++ case OpCom8: ++ return rewriteValueLOONG64_OpCom8(v) ++ case OpConst16: ++ return rewriteValueLOONG64_OpConst16(v) ++ case OpConst32: ++ return rewriteValueLOONG64_OpConst32(v) ++ case OpConst32F: ++ return rewriteValueLOONG64_OpConst32F(v) ++ case OpConst64: ++ return rewriteValueLOONG64_OpConst64(v) ++ case OpConst64F: ++ return rewriteValueLOONG64_OpConst64F(v) ++ case OpConst8: ++ return rewriteValueLOONG64_OpConst8(v) ++ case OpConstBool: ++ return rewriteValueLOONG64_OpConstBool(v) ++ case OpConstNil: ++ return rewriteValueLOONG64_OpConstNil(v) ++ case OpCvt32Fto32: ++ v.Op = OpLOONG64TRUNCFW ++ return true ++ case OpCvt32Fto64: ++ v.Op = OpLOONG64TRUNCFV ++ return true ++ case OpCvt32Fto64F: ++ v.Op = OpLOONG64MOVFD ++ return true ++ case OpCvt32to32F: ++ v.Op = OpLOONG64MOVWF ++ return true ++ case OpCvt32to64F: ++ v.Op = OpLOONG64MOVWD ++ return true ++ case OpCvt64Fto32: ++ v.Op = OpLOONG64TRUNCDW ++ return true ++ case OpCvt64Fto32F: ++ v.Op = OpLOONG64MOVDF ++ return true ++ case OpCvt64Fto64: ++ v.Op = OpLOONG64TRUNCDV ++ return true ++ case OpCvt64to32F: ++ v.Op = OpLOONG64MOVVF ++ return true ++ case OpCvt64to64F: ++ v.Op = OpLOONG64MOVVD ++ return true ++ case OpCvtBoolToUint8: ++ v.Op = OpCopy ++ return true ++ case OpDiv16: ++ return rewriteValueLOONG64_OpDiv16(v) ++ case OpDiv16u: ++ return rewriteValueLOONG64_OpDiv16u(v) ++ case OpDiv32: ++ return rewriteValueLOONG64_OpDiv32(v) ++ case OpDiv32F: ++ v.Op = OpLOONG64DIVF ++ return true ++ case OpDiv32u: ++ return rewriteValueLOONG64_OpDiv32u(v) ++ case OpDiv64: ++ return rewriteValueLOONG64_OpDiv64(v) ++ case OpDiv64F: ++ v.Op = OpLOONG64DIVD ++ return true ++ case OpDiv64u: ++ return rewriteValueLOONG64_OpDiv64u(v) ++ case OpDiv8: ++ return rewriteValueLOONG64_OpDiv8(v) ++ case OpDiv8u: ++ return rewriteValueLOONG64_OpDiv8u(v) ++ case OpEq16: ++ return rewriteValueLOONG64_OpEq16(v) ++ case OpEq32: ++ return rewriteValueLOONG64_OpEq32(v) ++ case OpEq32F: ++ return rewriteValueLOONG64_OpEq32F(v) ++ case OpEq64: ++ return rewriteValueLOONG64_OpEq64(v) ++ case OpEq64F: ++ return rewriteValueLOONG64_OpEq64F(v) ++ case OpEq8: ++ return rewriteValueLOONG64_OpEq8(v) ++ case OpEqB: ++ return rewriteValueLOONG64_OpEqB(v) ++ case OpEqPtr: ++ return rewriteValueLOONG64_OpEqPtr(v) ++ case OpGetCallerPC: ++ v.Op = OpLOONG64LoweredGetCallerPC ++ return true ++ case OpGetCallerSP: ++ v.Op = OpLOONG64LoweredGetCallerSP ++ return true ++ case OpGetClosurePtr: ++ v.Op = OpLOONG64LoweredGetClosurePtr ++ return true ++ case OpHmul32: ++ return rewriteValueLOONG64_OpHmul32(v) ++ case OpHmul32u: ++ return rewriteValueLOONG64_OpHmul32u(v) ++ case OpHmul64: ++ return rewriteValueLOONG64_OpHmul64(v) ++ case OpHmul64u: ++ return rewriteValueLOONG64_OpHmul64u(v) ++ case OpInterCall: ++ v.Op = OpLOONG64CALLinter ++ return true ++ case OpIsInBounds: ++ return rewriteValueLOONG64_OpIsInBounds(v) ++ case OpIsNonNil: ++ return rewriteValueLOONG64_OpIsNonNil(v) ++ case OpIsSliceInBounds: ++ return rewriteValueLOONG64_OpIsSliceInBounds(v) ++ case OpLOONG64ADDV: ++ return rewriteValueLOONG64_OpLOONG64ADDV(v) ++ case OpLOONG64ADDVconst: ++ return rewriteValueLOONG64_OpLOONG64ADDVconst(v) ++ case OpLOONG64AND: ++ return rewriteValueLOONG64_OpLOONG64AND(v) ++ case OpLOONG64ANDconst: ++ return rewriteValueLOONG64_OpLOONG64ANDconst(v) ++ case OpLOONG64LoweredAtomicAdd32: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v) ++ case OpLOONG64LoweredAtomicAdd64: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v) ++ case OpLOONG64LoweredAtomicStore32: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v) ++ case OpLOONG64LoweredAtomicStore64: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v) ++ case OpLOONG64MOVBUload: ++ return rewriteValueLOONG64_OpLOONG64MOVBUload(v) ++ case OpLOONG64MOVBUreg: ++ return rewriteValueLOONG64_OpLOONG64MOVBUreg(v) ++ case OpLOONG64MOVBload: ++ return rewriteValueLOONG64_OpLOONG64MOVBload(v) ++ case OpLOONG64MOVBreg: ++ return rewriteValueLOONG64_OpLOONG64MOVBreg(v) ++ case OpLOONG64MOVBstore: ++ return rewriteValueLOONG64_OpLOONG64MOVBstore(v) ++ case OpLOONG64MOVBstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVBstorezero(v) ++ case OpLOONG64MOVDload: ++ return rewriteValueLOONG64_OpLOONG64MOVDload(v) ++ case OpLOONG64MOVDstore: ++ return rewriteValueLOONG64_OpLOONG64MOVDstore(v) ++ case OpLOONG64MOVFload: ++ return rewriteValueLOONG64_OpLOONG64MOVFload(v) ++ case OpLOONG64MOVFstore: ++ return rewriteValueLOONG64_OpLOONG64MOVFstore(v) ++ case OpLOONG64MOVHUload: ++ return rewriteValueLOONG64_OpLOONG64MOVHUload(v) ++ case OpLOONG64MOVHUreg: ++ return rewriteValueLOONG64_OpLOONG64MOVHUreg(v) ++ case OpLOONG64MOVHload: ++ return rewriteValueLOONG64_OpLOONG64MOVHload(v) ++ case OpLOONG64MOVHreg: ++ return rewriteValueLOONG64_OpLOONG64MOVHreg(v) ++ case OpLOONG64MOVHstore: ++ return rewriteValueLOONG64_OpLOONG64MOVHstore(v) ++ case OpLOONG64MOVHstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVHstorezero(v) ++ case OpLOONG64MOVVload: ++ return rewriteValueLOONG64_OpLOONG64MOVVload(v) ++ case OpLOONG64MOVVreg: ++ return rewriteValueLOONG64_OpLOONG64MOVVreg(v) ++ case OpLOONG64MOVVstore: ++ return rewriteValueLOONG64_OpLOONG64MOVVstore(v) ++ case OpLOONG64MOVVstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVVstorezero(v) ++ case OpLOONG64MOVWUload: ++ return rewriteValueLOONG64_OpLOONG64MOVWUload(v) ++ case OpLOONG64MOVWUreg: ++ return rewriteValueLOONG64_OpLOONG64MOVWUreg(v) ++ case OpLOONG64MOVWload: ++ return rewriteValueLOONG64_OpLOONG64MOVWload(v) ++ case OpLOONG64MOVWreg: ++ return rewriteValueLOONG64_OpLOONG64MOVWreg(v) ++ case OpLOONG64MOVWstore: ++ return rewriteValueLOONG64_OpLOONG64MOVWstore(v) ++ case OpLOONG64MOVWstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVWstorezero(v) ++ case OpLOONG64NEGV: ++ return rewriteValueLOONG64_OpLOONG64NEGV(v) ++ case OpLOONG64NOR: ++ return rewriteValueLOONG64_OpLOONG64NOR(v) ++ case OpLOONG64NORconst: ++ return rewriteValueLOONG64_OpLOONG64NORconst(v) ++ case OpLOONG64OR: ++ return rewriteValueLOONG64_OpLOONG64OR(v) ++ case OpLOONG64ORconst: ++ return rewriteValueLOONG64_OpLOONG64ORconst(v) ++ case OpLOONG64SGT: ++ return rewriteValueLOONG64_OpLOONG64SGT(v) ++ case OpLOONG64SGTU: ++ return rewriteValueLOONG64_OpLOONG64SGTU(v) ++ case OpLOONG64SGTUconst: ++ return rewriteValueLOONG64_OpLOONG64SGTUconst(v) ++ case OpLOONG64SGTconst: ++ return rewriteValueLOONG64_OpLOONG64SGTconst(v) ++ case OpLOONG64SLLV: ++ return rewriteValueLOONG64_OpLOONG64SLLV(v) ++ case OpLOONG64SLLVconst: ++ return rewriteValueLOONG64_OpLOONG64SLLVconst(v) ++ case OpLOONG64SRAV: ++ return rewriteValueLOONG64_OpLOONG64SRAV(v) ++ case OpLOONG64SRAVconst: ++ return rewriteValueLOONG64_OpLOONG64SRAVconst(v) ++ case OpLOONG64SRLV: ++ return rewriteValueLOONG64_OpLOONG64SRLV(v) ++ case OpLOONG64SRLVconst: ++ return rewriteValueLOONG64_OpLOONG64SRLVconst(v) ++ case OpLOONG64SUBV: ++ return rewriteValueLOONG64_OpLOONG64SUBV(v) ++ case OpLOONG64SUBVconst: ++ return rewriteValueLOONG64_OpLOONG64SUBVconst(v) ++ case OpLOONG64XOR: ++ return rewriteValueLOONG64_OpLOONG64XOR(v) ++ case OpLOONG64XORconst: ++ return rewriteValueLOONG64_OpLOONG64XORconst(v) ++ case OpLeq16: ++ return rewriteValueLOONG64_OpLeq16(v) ++ case OpLeq16U: ++ return rewriteValueLOONG64_OpLeq16U(v) ++ case OpLeq32: ++ return rewriteValueLOONG64_OpLeq32(v) ++ case OpLeq32F: ++ return rewriteValueLOONG64_OpLeq32F(v) ++ case OpLeq32U: ++ return rewriteValueLOONG64_OpLeq32U(v) ++ case OpLeq64: ++ return rewriteValueLOONG64_OpLeq64(v) ++ case OpLeq64F: ++ return rewriteValueLOONG64_OpLeq64F(v) ++ case OpLeq64U: ++ return rewriteValueLOONG64_OpLeq64U(v) ++ case OpLeq8: ++ return rewriteValueLOONG64_OpLeq8(v) ++ case OpLeq8U: ++ return rewriteValueLOONG64_OpLeq8U(v) ++ case OpLess16: ++ return rewriteValueLOONG64_OpLess16(v) ++ case OpLess16U: ++ return rewriteValueLOONG64_OpLess16U(v) ++ case OpLess32: ++ return rewriteValueLOONG64_OpLess32(v) ++ case OpLess32F: ++ return rewriteValueLOONG64_OpLess32F(v) ++ case OpLess32U: ++ return rewriteValueLOONG64_OpLess32U(v) ++ case OpLess64: ++ return rewriteValueLOONG64_OpLess64(v) ++ case OpLess64F: ++ return rewriteValueLOONG64_OpLess64F(v) ++ case OpLess64U: ++ return rewriteValueLOONG64_OpLess64U(v) ++ case OpLess8: ++ return rewriteValueLOONG64_OpLess8(v) ++ case OpLess8U: ++ return rewriteValueLOONG64_OpLess8U(v) ++ case OpLoad: ++ return rewriteValueLOONG64_OpLoad(v) ++ case OpLocalAddr: ++ return rewriteValueLOONG64_OpLocalAddr(v) ++ case OpLsh16x16: ++ return rewriteValueLOONG64_OpLsh16x16(v) ++ case OpLsh16x32: ++ return rewriteValueLOONG64_OpLsh16x32(v) ++ case OpLsh16x64: ++ return rewriteValueLOONG64_OpLsh16x64(v) ++ case OpLsh16x8: ++ return rewriteValueLOONG64_OpLsh16x8(v) ++ case OpLsh32x16: ++ return rewriteValueLOONG64_OpLsh32x16(v) ++ case OpLsh32x32: ++ return rewriteValueLOONG64_OpLsh32x32(v) ++ case OpLsh32x64: ++ return rewriteValueLOONG64_OpLsh32x64(v) ++ case OpLsh32x8: ++ return rewriteValueLOONG64_OpLsh32x8(v) ++ case OpLsh64x16: ++ return rewriteValueLOONG64_OpLsh64x16(v) ++ case OpLsh64x32: ++ return rewriteValueLOONG64_OpLsh64x32(v) ++ case OpLsh64x64: ++ return rewriteValueLOONG64_OpLsh64x64(v) ++ case OpLsh64x8: ++ return rewriteValueLOONG64_OpLsh64x8(v) ++ case OpLsh8x16: ++ return rewriteValueLOONG64_OpLsh8x16(v) ++ case OpLsh8x32: ++ return rewriteValueLOONG64_OpLsh8x32(v) ++ case OpLsh8x64: ++ return rewriteValueLOONG64_OpLsh8x64(v) ++ case OpLsh8x8: ++ return rewriteValueLOONG64_OpLsh8x8(v) ++ case OpMod16: ++ return rewriteValueLOONG64_OpMod16(v) ++ case OpMod16u: ++ return rewriteValueLOONG64_OpMod16u(v) ++ case OpMod32: ++ return rewriteValueLOONG64_OpMod32(v) ++ case OpMod32u: ++ return rewriteValueLOONG64_OpMod32u(v) ++ case OpMod64: ++ return rewriteValueLOONG64_OpMod64(v) ++ case OpMod64u: ++ return rewriteValueLOONG64_OpMod64u(v) ++ case OpMod8: ++ return rewriteValueLOONG64_OpMod8(v) ++ case OpMod8u: ++ return rewriteValueLOONG64_OpMod8u(v) ++ case OpMove: ++ return rewriteValueLOONG64_OpMove(v) ++ case OpMul16: ++ return rewriteValueLOONG64_OpMul16(v) ++ case OpMul32: ++ return rewriteValueLOONG64_OpMul32(v) ++ case OpMul32F: ++ v.Op = OpLOONG64MULF ++ return true ++ case OpMul64: ++ return rewriteValueLOONG64_OpMul64(v) ++ case OpMul64F: ++ v.Op = OpLOONG64MULD ++ return true ++ case OpMul64uhilo: ++ v.Op = OpLOONG64MULVU ++ return true ++ case OpMul8: ++ return rewriteValueLOONG64_OpMul8(v) ++ case OpNeg16: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeg32: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeg32F: ++ v.Op = OpLOONG64NEGF ++ return true ++ case OpNeg64: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeg64F: ++ v.Op = OpLOONG64NEGD ++ return true ++ case OpNeg8: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeq16: ++ return rewriteValueLOONG64_OpNeq16(v) ++ case OpNeq32: ++ return rewriteValueLOONG64_OpNeq32(v) ++ case OpNeq32F: ++ return rewriteValueLOONG64_OpNeq32F(v) ++ case OpNeq64: ++ return rewriteValueLOONG64_OpNeq64(v) ++ case OpNeq64F: ++ return rewriteValueLOONG64_OpNeq64F(v) ++ case OpNeq8: ++ return rewriteValueLOONG64_OpNeq8(v) ++ case OpNeqB: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpNeqPtr: ++ return rewriteValueLOONG64_OpNeqPtr(v) ++ case OpNilCheck: ++ v.Op = OpLOONG64LoweredNilCheck ++ return true ++ case OpNot: ++ return rewriteValueLOONG64_OpNot(v) ++ case OpOffPtr: ++ return rewriteValueLOONG64_OpOffPtr(v) ++ case OpOr16: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOr32: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOr64: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOr8: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOrB: ++ v.Op = OpLOONG64OR ++ return true ++ case OpPanicBounds: ++ return rewriteValueLOONG64_OpPanicBounds(v) ++ case OpRotateLeft16: ++ return rewriteValueLOONG64_OpRotateLeft16(v) ++ case OpRotateLeft32: ++ return rewriteValueLOONG64_OpRotateLeft32(v) ++ case OpRotateLeft64: ++ return rewriteValueLOONG64_OpRotateLeft64(v) ++ case OpRotateLeft8: ++ return rewriteValueLOONG64_OpRotateLeft8(v) ++ case OpRound32F: ++ v.Op = OpCopy ++ return true ++ case OpRound64F: ++ v.Op = OpCopy ++ return true ++ case OpRsh16Ux16: ++ return rewriteValueLOONG64_OpRsh16Ux16(v) ++ case OpRsh16Ux32: ++ return rewriteValueLOONG64_OpRsh16Ux32(v) ++ case OpRsh16Ux64: ++ return rewriteValueLOONG64_OpRsh16Ux64(v) ++ case OpRsh16Ux8: ++ return rewriteValueLOONG64_OpRsh16Ux8(v) ++ case OpRsh16x16: ++ return rewriteValueLOONG64_OpRsh16x16(v) ++ case OpRsh16x32: ++ return rewriteValueLOONG64_OpRsh16x32(v) ++ case OpRsh16x64: ++ return rewriteValueLOONG64_OpRsh16x64(v) ++ case OpRsh16x8: ++ return rewriteValueLOONG64_OpRsh16x8(v) ++ case OpRsh32Ux16: ++ return rewriteValueLOONG64_OpRsh32Ux16(v) ++ case OpRsh32Ux32: ++ return rewriteValueLOONG64_OpRsh32Ux32(v) ++ case OpRsh32Ux64: ++ return rewriteValueLOONG64_OpRsh32Ux64(v) ++ case OpRsh32Ux8: ++ return rewriteValueLOONG64_OpRsh32Ux8(v) ++ case OpRsh32x16: ++ return rewriteValueLOONG64_OpRsh32x16(v) ++ case OpRsh32x32: ++ return rewriteValueLOONG64_OpRsh32x32(v) ++ case OpRsh32x64: ++ return rewriteValueLOONG64_OpRsh32x64(v) ++ case OpRsh32x8: ++ return rewriteValueLOONG64_OpRsh32x8(v) ++ case OpRsh64Ux16: ++ return rewriteValueLOONG64_OpRsh64Ux16(v) ++ case OpRsh64Ux32: ++ return rewriteValueLOONG64_OpRsh64Ux32(v) ++ case OpRsh64Ux64: ++ return rewriteValueLOONG64_OpRsh64Ux64(v) ++ case OpRsh64Ux8: ++ return rewriteValueLOONG64_OpRsh64Ux8(v) ++ case OpRsh64x16: ++ return rewriteValueLOONG64_OpRsh64x16(v) ++ case OpRsh64x32: ++ return rewriteValueLOONG64_OpRsh64x32(v) ++ case OpRsh64x64: ++ return rewriteValueLOONG64_OpRsh64x64(v) ++ case OpRsh64x8: ++ return rewriteValueLOONG64_OpRsh64x8(v) ++ case OpRsh8Ux16: ++ return rewriteValueLOONG64_OpRsh8Ux16(v) ++ case OpRsh8Ux32: ++ return rewriteValueLOONG64_OpRsh8Ux32(v) ++ case OpRsh8Ux64: ++ return rewriteValueLOONG64_OpRsh8Ux64(v) ++ case OpRsh8Ux8: ++ return rewriteValueLOONG64_OpRsh8Ux8(v) ++ case OpRsh8x16: ++ return rewriteValueLOONG64_OpRsh8x16(v) ++ case OpRsh8x32: ++ return rewriteValueLOONG64_OpRsh8x32(v) ++ case OpRsh8x64: ++ return rewriteValueLOONG64_OpRsh8x64(v) ++ case OpRsh8x8: ++ return rewriteValueLOONG64_OpRsh8x8(v) ++ case OpSelect0: ++ return rewriteValueLOONG64_OpSelect0(v) ++ case OpSelect1: ++ return rewriteValueLOONG64_OpSelect1(v) ++ case OpSignExt16to32: ++ v.Op = OpLOONG64MOVHreg ++ return true ++ case OpSignExt16to64: ++ v.Op = OpLOONG64MOVHreg ++ return true ++ case OpSignExt32to64: ++ v.Op = OpLOONG64MOVWreg ++ return true ++ case OpSignExt8to16: ++ v.Op = OpLOONG64MOVBreg ++ return true ++ case OpSignExt8to32: ++ v.Op = OpLOONG64MOVBreg ++ return true ++ case OpSignExt8to64: ++ v.Op = OpLOONG64MOVBreg ++ return true ++ case OpSlicemask: ++ return rewriteValueLOONG64_OpSlicemask(v) ++ case OpSqrt: ++ v.Op = OpLOONG64SQRTD ++ return true ++ case OpSqrt32: ++ v.Op = OpLOONG64SQRTF ++ return true ++ case OpStaticCall: ++ v.Op = OpLOONG64CALLstatic ++ return true ++ case OpStore: ++ return rewriteValueLOONG64_OpStore(v) ++ case OpSub16: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSub32: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSub32F: ++ v.Op = OpLOONG64SUBF ++ return true ++ case OpSub64: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSub64F: ++ v.Op = OpLOONG64SUBD ++ return true ++ case OpSub8: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSubPtr: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpTrunc16to8: ++ v.Op = OpCopy ++ return true ++ case OpTrunc32to16: ++ v.Op = OpCopy ++ return true ++ case OpTrunc32to8: ++ v.Op = OpCopy ++ return true ++ case OpTrunc64to16: ++ v.Op = OpCopy ++ return true ++ case OpTrunc64to32: ++ v.Op = OpCopy ++ return true ++ case OpTrunc64to8: ++ v.Op = OpCopy ++ return true ++ case OpWB: ++ v.Op = OpLOONG64LoweredWB ++ return true ++ case OpXor16: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpXor32: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpXor64: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpXor8: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpZero: ++ return rewriteValueLOONG64_OpZero(v) ++ case OpZeroExt16to32: ++ v.Op = OpLOONG64MOVHUreg ++ return true ++ case OpZeroExt16to64: ++ v.Op = OpLOONG64MOVHUreg ++ return true ++ case OpZeroExt32to64: ++ v.Op = OpLOONG64MOVWUreg ++ return true ++ case OpZeroExt8to16: ++ v.Op = OpLOONG64MOVBUreg ++ return true ++ case OpZeroExt8to32: ++ v.Op = OpLOONG64MOVBUreg ++ return true ++ case OpZeroExt8to64: ++ v.Op = OpLOONG64MOVBUreg ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpAddr(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (Addr {sym} base) ++ // result: (MOVVaddr {sym} base) ++ for { ++ sym := auxToSym(v.Aux) ++ base := v_0 ++ v.reset(OpLOONG64MOVVaddr) ++ v.Aux = symToAux(sym) ++ v.AddArg(base) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpAvg64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Avg64u x y) ++ // result: (ADDV (SRLVconst (SUBV x y) [1]) y) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64ADDV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64SRLVconst, t) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) ++ v1.AddArg2(x, y) ++ v0.AddArg(v1) ++ v.AddArg2(v0, y) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom16(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com16 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom32(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com32 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom64(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com64 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom8(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com8 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst16(v *Value) bool { ++ // match: (Const16 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt16(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst32(v *Value) bool { ++ // match: (Const32 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt32(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst32F(v *Value) bool { ++ // match: (Const32F [val]) ++ // result: (MOVFconst [float64(val)]) ++ for { ++ val := auxIntToFloat32(v.AuxInt) ++ v.reset(OpLOONG64MOVFconst) ++ v.AuxInt = float64ToAuxInt(float64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst64(v *Value) bool { ++ // match: (Const64 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt64(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst64F(v *Value) bool { ++ // match: (Const64F [val]) ++ // result: (MOVDconst [float64(val)]) ++ for { ++ val := auxIntToFloat64(v.AuxInt) ++ v.reset(OpLOONG64MOVDconst) ++ v.AuxInt = float64ToAuxInt(float64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst8(v *Value) bool { ++ // match: (Const8 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt8(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConstBool(v *Value) bool { ++ // match: (ConstBool [t]) ++ // result: (MOVVconst [int64(b2i(t))]) ++ for { ++ t := auxIntToBool(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(b2i(t))) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConstNil(v *Value) bool { ++ // match: (ConstNil) ++ // result: (MOVVconst [0]) ++ for { ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div16 x y) ++ // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv16u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div16u x y) ++ // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div32 x y) ++ // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv32u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div32u x y) ++ // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div64 x y) ++ // result: (Select1 (DIVV x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div64u x y) ++ // result: (Select1 (DIVVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div8 x y) ++ // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv8u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div8u x y) ++ // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq16 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq32 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Eq32F x y) ++ // result: (FPFlagTrue (CMPEQF x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq64 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Eq64F x y) ++ // result: (FPFlagTrue (CMPEQD x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq8 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEqB(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (EqB x y) ++ // result: (XOR (MOVVconst [1]) (XOR x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.Bool) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEqPtr(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (EqPtr x y) ++ // result: (SGTU (MOVVconst [1]) (XOR x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul32 x y) ++ // result: (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(32) ++ v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MULV, types.NewTuple(typ.Int64, typ.Int64)) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul32u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul32u x y) ++ // result: (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRLVconst) ++ v.AuxInt = int64ToAuxInt(32) ++ v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul64 x y) ++ // result: (Select0 (MULV x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULV, types.NewTuple(typ.Int64, typ.Int64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul64u x y) ++ // result: (Select0 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpIsInBounds(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (IsInBounds idx len) ++ // result: (SGTU len idx) ++ for { ++ idx := v_0 ++ len := v_1 ++ v.reset(OpLOONG64SGTU) ++ v.AddArg2(len, idx) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpIsNonNil(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (IsNonNil ptr) ++ // result: (SGTU ptr (MOVVconst [0])) ++ for { ++ ptr := v_0 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(ptr, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpIsSliceInBounds(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (IsSliceInBounds idx len) ++ // result: (XOR (MOVVconst [1]) (SGTU idx len)) ++ for { ++ idx := v_0 ++ len := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v1.AddArg2(idx, len) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (ADDV x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (ADDVconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (ADDV x (NEGV y)) ++ // result: (SUBV x y) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64NEGV { ++ continue ++ } ++ y := v_1.Args[0] ++ v.reset(OpLOONG64SUBV) ++ v.AddArg2(x, y) ++ return true ++ } ++ break ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64ADDVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) ++ // cond: is32Bit(off1+int64(off2)) ++ // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) ++ for { ++ off1 := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ if !(is32Bit(off1 + int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVaddr) ++ v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg(ptr) ++ return true ++ } ++ // match: (ADDVconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (ADDVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c+d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c + d) ++ return true ++ } ++ // match: (ADDVconst [c] (ADDVconst [d] x)) ++ // cond: is32Bit(c+d) ++ // result: (ADDVconst [c+d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c + d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(c + d) ++ v.AddArg(x) ++ return true ++ } ++ // match: (ADDVconst [c] (SUBVconst [d] x)) ++ // cond: is32Bit(c-d) ++ // result: (ADDVconst [c-d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SUBVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c - d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(c - d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (AND x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (ANDconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64ANDconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (AND x x) ++ // result: x ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64ANDconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (ANDconst [0] _) ++ // result: (MOVVconst [0]) ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (ANDconst [-1] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != -1 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (ANDconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c&d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c & d) ++ return true ++ } ++ // match: (ANDconst [c] (ANDconst [d] x)) ++ // result: (ANDconst [c&d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ANDconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ v.reset(OpLOONG64ANDconst) ++ v.AuxInt = int64ToAuxInt(c & d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) ++ // cond: is32Bit(c) ++ // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ mem := v_2 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64LoweredAtomicAddconst32) ++ v.AuxInt = int32ToAuxInt(int32(c)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) ++ // cond: is32Bit(c) ++ // result: (LoweredAtomicAddconst64 [c] ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ mem := v_2 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64LoweredAtomicAddconst64) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) ++ // result: (LoweredAtomicStorezero32 ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { ++ break ++ } ++ mem := v_2 ++ v.reset(OpLOONG64LoweredAtomicStorezero32) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) ++ // result: (LoweredAtomicStorezero64 ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { ++ break ++ } ++ mem := v_2 ++ v.reset(OpLOONG64LoweredAtomicStorezero64) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVBUreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBUreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBUreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(uint8(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint8(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVBreg x:(MOVBload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBreg x:(MOVBreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(int8(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(int8(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVBreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVDload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVDstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVFload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVFstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVHUreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg x:(MOVHUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg x:(MOVHUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(uint16(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint16(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVHreg x:(MOVBload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVHload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVBreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVHreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(int16(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(int16(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVVreg x) ++ // cond: x.Uses == 1 ++ // result: (MOVVnop x) ++ for { ++ x := v_0 ++ if !(x.Uses == 1) { ++ break ++ } ++ v.reset(OpLOONG64MOVVnop) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVVreg (MOVVconst [c])) ++ // result: (MOVVconst [c]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVWUreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVHUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVWUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVHUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVWUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(uint32(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint32(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVWreg x:(MOVBload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVHload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVHUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVWload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVBreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVHreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVWreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(int32(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(int32(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) ++ // result: (MOVWstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) ++ // result: (MOVWstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64NEGV(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (NEGV (MOVVconst [c])) ++ // result: (MOVVconst [-c]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(-c) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64NOR(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (NOR x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (NORconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64NORconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64NORconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (NORconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [^(c|d)]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(^(c | d)) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64OR(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (OR x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (ORconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64ORconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (OR x x) ++ // result: x ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64ORconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (ORconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (ORconst [-1] _) ++ // result: (MOVVconst [-1]) ++ for { ++ if auxIntToInt64(v.AuxInt) != -1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(-1) ++ return true ++ } ++ // match: (ORconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c|d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c | d) ++ return true ++ } ++ // match: (ORconst [c] (ORconst [d] x)) ++ // cond: is32Bit(c|d) ++ // result: (ORconst [c|d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ORconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c | d)) { ++ break ++ } ++ v.reset(OpLOONG64ORconst) ++ v.AuxInt = int64ToAuxInt(c | d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGT(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SGT (MOVVconst [c]) x) ++ // cond: is32Bit(c) ++ // result: (SGTconst [c] x) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ x := v_1 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64SGTconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGTU(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SGTU (MOVVconst [c]) x) ++ // cond: is32Bit(c) ++ // result: (SGTUconst [c] x) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ x := v_1 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64SGTUconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGTUconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SGTUconst [c] (MOVVconst [d])) ++ // cond: uint64(c)>uint64(d) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(uint64(c) > uint64(d)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (MOVVconst [d])) ++ // cond: uint64(c)<=uint64(d) ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(uint64(c) <= uint64(d)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTUconst [c] (MOVBUreg _)) ++ // cond: 0xff < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBUreg || !(0xff < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (MOVHUreg _)) ++ // cond: 0xffff < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (ANDconst [m] _)) ++ // cond: uint64(m) < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ANDconst { ++ break ++ } ++ m := auxIntToInt64(v_0.AuxInt) ++ if !(uint64(m) < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (SRLVconst _ [d])) ++ // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SRLVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGTconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SGTconst [c] (MOVVconst [d])) ++ // cond: c>d ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(c > d) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVVconst [d])) ++ // cond: c<=d ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(c <= d) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBreg _)) ++ // cond: 0x7f < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBreg || !(0x7f < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBreg _)) ++ // cond: c <= -0x80 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBreg || !(c <= -0x80) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBUreg _)) ++ // cond: 0xff < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBUreg || !(0xff < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBUreg _)) ++ // cond: c < 0 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBUreg || !(c < 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHreg _)) ++ // cond: 0x7fff < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHreg || !(0x7fff < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHreg _)) ++ // cond: c <= -0x8000 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHreg || !(c <= -0x8000) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHUreg _)) ++ // cond: 0xffff < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHUreg _)) ++ // cond: c < 0 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHUreg || !(c < 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVWUreg _)) ++ // cond: c < 0 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVWUreg || !(c < 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (ANDconst [m] _)) ++ // cond: 0 <= m && m < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ANDconst { ++ break ++ } ++ m := auxIntToInt64(v_0.AuxInt) ++ if !(0 <= m && m < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (SRLVconst _ [d])) ++ // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SRLVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SLLV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SLLV _ (MOVVconst [c])) ++ // cond: uint64(c)>=64 ++ // result: (MOVVconst [0]) ++ for { ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(uint64(c) >= 64) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SLLV x (MOVVconst [c])) ++ // result: (SLLVconst x [c]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpLOONG64SLLVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SLLVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [d<=64 ++ // result: (SRAVconst x [63]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(uint64(c) >= 64) { ++ break ++ } ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(63) ++ v.AddArg(x) ++ return true ++ } ++ // match: (SRAV x (MOVVconst [c])) ++ // result: (SRAVconst x [c]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SRAVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SRAVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [d>>uint64(c)]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(d >> uint64(c)) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SRLV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SRLV _ (MOVVconst [c])) ++ // cond: uint64(c)>=64 ++ // result: (MOVVconst [0]) ++ for { ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(uint64(c) >= 64) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SRLV x (MOVVconst [c])) ++ // result: (SRLVconst x [c]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpLOONG64SRLVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SRLVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SRLVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [int64(uint64(d)>>uint64(c))]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SUBV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SUBV x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (SUBVconst [c] x) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64SUBVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ // match: (SUBV x x) ++ // result: (MOVVconst [0]) ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SUBV (MOVVconst [0]) x) ++ // result: (NEGV x) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ x := v_1 ++ v.reset(OpLOONG64NEGV) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SUBVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SUBVconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (SUBVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [d-c]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(d - c) ++ return true ++ } ++ // match: (SUBVconst [c] (SUBVconst [d] x)) ++ // cond: is32Bit(-c-d) ++ // result: (ADDVconst [-c-d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SUBVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(-c - d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(-c - d) ++ v.AddArg(x) ++ return true ++ } ++ // match: (SUBVconst [c] (ADDVconst [d] x)) ++ // cond: is32Bit(-c+d) ++ // result: (ADDVconst [-c+d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(-c + d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(-c + d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64XOR(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (XOR x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (XORconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64XORconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (XOR x x) ++ // result: (MOVVconst [0]) ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64XORconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (XORconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (XORconst [-1] x) ++ // result: (NORconst [0] x) ++ for { ++ if auxIntToInt64(v.AuxInt) != -1 { ++ break ++ } ++ x := v_0 ++ v.reset(OpLOONG64NORconst) ++ v.AuxInt = int64ToAuxInt(0) ++ v.AddArg(x) ++ return true ++ } ++ // match: (XORconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c^d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c ^ d) ++ return true ++ } ++ // match: (XORconst [c] (XORconst [d] x)) ++ // cond: is32Bit(c^d) ++ // result: (XORconst [c^d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64XORconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c ^ d)) { ++ break ++ } ++ v.reset(OpLOONG64XORconst) ++ v.AuxInt = int64ToAuxInt(c ^ d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLeq16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq16 x y) ++ // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq16U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq16U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq32 x y) ++ // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Leq32F x y) ++ // result: (FPFlagTrue (CMPGEF y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGEF, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq32U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq32U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq64 x y) ++ // result: (XOR (MOVVconst [1]) (SGT x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Leq64F x y) ++ // result: (FPFlagTrue (CMPGED y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGED, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq64U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq64U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq8 x y) ++ // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq8U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq8U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less16 x y) ++ // result: (SGT (SignExt16to64 y) (SignExt16to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess16U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less16U x y) ++ // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less32 x y) ++ // result: (SGT (SignExt32to64 y) (SignExt32to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Less32F x y) ++ // result: (FPFlagTrue (CMPGTF y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTF, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess32U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less32U x y) ++ // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Less64 x y) ++ // result: (SGT y x) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v.AddArg2(y, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Less64F x y) ++ // result: (FPFlagTrue (CMPGTD y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTD, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess64U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Less64U x y) ++ // result: (SGTU y x) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v.AddArg2(y, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less8 x y) ++ // result: (SGT (SignExt8to64 y) (SignExt8to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess8U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less8U x y) ++ // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLoad(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Load ptr mem) ++ // cond: t.IsBoolean() ++ // result: (MOVBUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(t.IsBoolean()) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is8BitInt(t) && isSigned(t)) ++ // result: (MOVBload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is8BitInt(t) && isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is8BitInt(t) && !isSigned(t)) ++ // result: (MOVBUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is8BitInt(t) && !isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is16BitInt(t) && isSigned(t)) ++ // result: (MOVHload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is16BitInt(t) && isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is16BitInt(t) && !isSigned(t)) ++ // result: (MOVHUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is16BitInt(t) && !isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is32BitInt(t) && isSigned(t)) ++ // result: (MOVWload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is32BitInt(t) && isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is32BitInt(t) && !isSigned(t)) ++ // result: (MOVWUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is32BitInt(t) && !isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is64BitInt(t) || isPtr(t)) ++ // result: (MOVVload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is64BitInt(t) || isPtr(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: is32BitFloat(t) ++ // result: (MOVFload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is32BitFloat(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: is64BitFloat(t) ++ // result: (MOVDload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is64BitFloat(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLocalAddr(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (LocalAddr {sym} base _) ++ // result: (MOVVaddr {sym} base) ++ for { ++ sym := auxToSym(v.Aux) ++ base := v_0 ++ v.reset(OpLOONG64MOVVaddr) ++ v.Aux = symToAux(sym) ++ v.AddArg(base) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod16 x y) ++ // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod16u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod16u x y) ++ // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod32 x y) ++ // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod32u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod32u x y) ++ // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod64 x y) ++ // result: (Select0 (DIVV x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod64u x y) ++ // result: (Select0 (DIVVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod8 x y) ++ // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod8u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod8u x y) ++ // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMove(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config ++ typ := &b.Func.Config.Types ++ // match: (Move [0] _ _ mem) ++ // result: mem ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ mem := v_2 ++ v.copyOf(mem) ++ return true ++ } ++ // match: (Move [1] dst src mem) ++ // result: (MOVBstore dst (MOVBload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 1 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [2] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore dst (MOVHload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [2] dst src mem) ++ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AuxInt = int32ToAuxInt(1) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [4] {t} dst src mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore dst (MOVWload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [4] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AuxInt = int32ToAuxInt(2) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [4] dst src mem) ++ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(3) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AuxInt = int32ToAuxInt(3) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v2.AuxInt = int32ToAuxInt(2) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v4.AuxInt = int32ToAuxInt(1) ++ v4.AddArg2(src, mem) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v6 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v6.AddArg2(src, mem) ++ v5.AddArg3(dst, v6, mem) ++ v3.AddArg3(dst, v4, v5) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [8] {t} dst src mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore dst (MOVVload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [8] {t} dst src mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v0.AuxInt = int32ToAuxInt(4) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [8] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(6) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AuxInt = int32ToAuxInt(6) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v2.AuxInt = int32ToAuxInt(4) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(2) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v4.AuxInt = int32ToAuxInt(2) ++ v4.AddArg2(src, mem) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v6 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v6.AddArg2(src, mem) ++ v5.AddArg3(dst, v6, mem) ++ v3.AddArg3(dst, v4, v5) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [3] dst src mem) ++ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 3 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AuxInt = int32ToAuxInt(2) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(1) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v2.AuxInt = int32ToAuxInt(1) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [6] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 6 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AuxInt = int32ToAuxInt(4) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v2.AuxInt = int32ToAuxInt(2) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [12] {t} dst src mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 12 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v0.AuxInt = int32ToAuxInt(8) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v2.AuxInt = int32ToAuxInt(4) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [16] {t} dst src mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 16 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v0.AuxInt = int32ToAuxInt(8) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [24] {t} dst src mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 24 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(16) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v0.AuxInt = int32ToAuxInt(16) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(8) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v2.AuxInt = int32ToAuxInt(8) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [s] {t} dst src mem) ++ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) ++ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { ++ break ++ } ++ v.reset(OpLOONG64DUFFCOPY) ++ v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) ++ v.AddArg3(dst, src, mem) ++ return true ++ } ++ // match: (Move [s] {t} dst src mem) ++ // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 ++ // result: (LoweredMove [t.Alignment()] dst src (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) { ++ break ++ } ++ v.reset(OpLOONG64LoweredMove) ++ v.AuxInt = int64ToAuxInt(t.Alignment()) ++ v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, src.Type) ++ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) ++ v0.AddArg(src) ++ v.AddArg4(dst, src, v0, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpMul16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul16 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMul32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul32 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMul64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul64 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMul8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul8 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq16 x y) ++ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq32 x y) ++ // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Neq32F x y) ++ // result: (FPFlagFalse (CMPEQF x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagFalse) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq64 x y) ++ // result: (SGTU (XOR x y) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v0.AddArg2(x, y) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Neq64F x y) ++ // result: (FPFlagFalse (CMPEQD x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagFalse) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq8 x y) ++ // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeqPtr(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (NeqPtr x y) ++ // result: (SGTU (XOR x y) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v0.AddArg2(x, y) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNot(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (Not x) ++ // result: (XORconst [1] x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64XORconst) ++ v.AuxInt = int64ToAuxInt(1) ++ v.AddArg(x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpOffPtr(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (OffPtr [off] ptr:(SP)) ++ // result: (MOVVaddr [int32(off)] ptr) ++ for { ++ off := auxIntToInt64(v.AuxInt) ++ ptr := v_0 ++ if ptr.Op != OpSP { ++ break ++ } ++ v.reset(OpLOONG64MOVVaddr) ++ v.AuxInt = int32ToAuxInt(int32(off)) ++ v.AddArg(ptr) ++ return true ++ } ++ // match: (OffPtr [off] ptr) ++ // result: (ADDVconst [off] ptr) ++ for { ++ off := auxIntToInt64(v.AuxInt) ++ ptr := v_0 ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(off) ++ v.AddArg(ptr) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpPanicBounds(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (PanicBounds [kind] x y mem) ++ // cond: boundsABI(kind) == 0 ++ // result: (LoweredPanicBoundsA [kind] x y mem) ++ for { ++ kind := auxIntToInt64(v.AuxInt) ++ x := v_0 ++ y := v_1 ++ mem := v_2 ++ if !(boundsABI(kind) == 0) { ++ break ++ } ++ v.reset(OpLOONG64LoweredPanicBoundsA) ++ v.AuxInt = int64ToAuxInt(kind) ++ v.AddArg3(x, y, mem) ++ return true ++ } ++ // match: (PanicBounds [kind] x y mem) ++ // cond: boundsABI(kind) == 1 ++ // result: (LoweredPanicBoundsB [kind] x y mem) ++ for { ++ kind := auxIntToInt64(v.AuxInt) ++ x := v_0 ++ y := v_1 ++ mem := v_2 ++ if !(boundsABI(kind) == 1) { ++ break ++ } ++ v.reset(OpLOONG64LoweredPanicBoundsB) ++ v.AuxInt = int64ToAuxInt(kind) ++ v.AddArg3(x, y, mem) ++ return true ++ } ++ // match: (PanicBounds [kind] x y mem) ++ // cond: boundsABI(kind) == 2 ++ // result: (LoweredPanicBoundsC [kind] x y mem) ++ for { ++ kind := auxIntToInt64(v.AuxInt) ++ x := v_0 ++ y := v_1 ++ mem := v_2 ++ if !(boundsABI(kind) == 2) { ++ break ++ } ++ v.reset(OpLOONG64LoweredPanicBoundsC) ++ v.AuxInt = int64ToAuxInt(kind) ++ v.AddArg3(x, y, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft16 x (MOVVconst [c])) ++ // result: (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr16) ++ v0 := b.NewValue0(v.Pos, OpLsh16x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 15) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 15) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft32 x (MOVVconst [c])) ++ // result: (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr32) ++ v0 := b.NewValue0(v.Pos, OpLsh32x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 31) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 31) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft64 x (MOVVconst [c])) ++ // result: (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr64) ++ v0 := b.NewValue0(v.Pos, OpLsh64x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 63) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 63) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft8 x (MOVVconst [c])) ++ // result: (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr8) ++ v0 := b.NewValue0(v.Pos, OpLsh8x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 7) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 7) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRsh16Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(x) ++ v3.AddArg2(v4, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x16 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x32 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x64 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(y, v4) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x8 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(x) ++ v3.AddArg2(v4, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x16 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x32 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x64 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(y, v4) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x8 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x16 x y) ++ // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(v3, v4) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, v3) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x32 x y) ++ // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(v3, v4) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, v3) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x64 x y) ++ // result: (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(y, v3) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, y) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x8 x y) ++ // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(v3, v4) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, v3) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(x) ++ v3.AddArg2(v4, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x16 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x32 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x64 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(y, v4) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x8 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpSelect0(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Select0 (Mul64uover x y)) ++ // result: (Select1 (MULVU x y)) ++ for { ++ if v_0.Op != OpMul64uover { ++ break ++ } ++ y := v_0.Args[1] ++ x := v_0.Args[0] ++ v.reset(OpSelect1) ++ v.Type = typ.UInt64 ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++ // match: (Select0 (DIVVU _ (MOVVconst [1]))) ++ // result: (MOVVconst [0]) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (Select0 (DIVVU x (MOVVconst [c]))) ++ // cond: isPowerOfTwo64(c) ++ // result: (ANDconst [c-1] x) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_1.AuxInt) ++ if !(isPowerOfTwo64(c)) { ++ break ++ } ++ v.reset(OpLOONG64ANDconst) ++ v.AuxInt = int64ToAuxInt(c - 1) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [c%d]) ++ for { ++ if v_0.Op != OpLOONG64DIVV { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c % d) ++ return true ++ } ++ // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [int64(uint64(c)%uint64(d))]) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpSelect1(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Select1 (Mul64uover x y)) ++ // result: (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) ++ for { ++ if v_0.Op != OpMul64uover { ++ break ++ } ++ y := v_0.Args[1] ++ x := v_0.Args[0] ++ v.reset(OpLOONG64SGTU) ++ v.Type = typ.Bool ++ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1.AddArg2(x, y) ++ v0.AddArg(v1) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ // match: (Select1 (MULVU x (MOVVconst [-1]))) ++ // result: (NEGV x) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 { ++ break ++ } ++ v.reset(OpLOONG64NEGV) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select1 (MULVU _ (MOVVconst [0]))) ++ // result: (MOVVconst [0]) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (Select1 (MULVU x (MOVVconst [1]))) ++ // result: x ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ // match: (Select1 (MULVU x (MOVVconst [c]))) ++ // cond: isPowerOfTwo64(c) ++ // result: (SLLVconst [log64(c)] x) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_1.AuxInt) ++ if !(isPowerOfTwo64(c)) { ++ break ++ } ++ v.reset(OpLOONG64SLLVconst) ++ v.AuxInt = int64ToAuxInt(log64(c)) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select1 (DIVVU x (MOVVconst [1]))) ++ // result: x ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ // match: (Select1 (DIVVU x (MOVVconst [c]))) ++ // cond: isPowerOfTwo64(c) ++ // result: (SRLVconst [log64(c)] x) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_1.AuxInt) ++ if !(isPowerOfTwo64(c)) { ++ break ++ } ++ v.reset(OpLOONG64SRLVconst) ++ v.AuxInt = int64ToAuxInt(log64(c)) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [c*d]) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c * d) ++ return true ++ } ++ // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [c/d]) ++ for { ++ if v_0.Op != OpLOONG64DIVV { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c / d) ++ return true ++ } ++ // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [int64(uint64(c)/uint64(d))]) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpSlicemask(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Slicemask x) ++ // result: (SRAVconst (NEGV x) [63]) ++ for { ++ t := v.Type ++ x := v_0 ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(63) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v0.AddArg(x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpStore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 1 ++ // result: (MOVBstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 1) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 2 ++ // result: (MOVHstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 2) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 4 && !is32BitFloat(val.Type) ++ // result: (MOVWstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 4 && !is32BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 8 && !is64BitFloat(val.Type) ++ // result: (MOVVstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 8 && !is64BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 4 && is32BitFloat(val.Type) ++ // result: (MOVFstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 4 && is32BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 8 && is64BitFloat(val.Type) ++ // result: (MOVDstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 8 && is64BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpZero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config ++ typ := &b.Func.Config.Types ++ // match: (Zero [0] _ mem) ++ // result: mem ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ mem := v_1 ++ v.copyOf(mem) ++ return true ++ } ++ // match: (Zero [1] ptr mem) ++ // result: (MOVBstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 1 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [2] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [2] ptr mem) ++ // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [4] {t} ptr mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [4] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [4] ptr mem) ++ // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(3) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(0) ++ v3.AddArg3(ptr, v0, mem) ++ v2.AddArg3(ptr, v0, v3) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [8] {t} ptr mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [8] {t} ptr mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [8] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(6) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(0) ++ v3.AddArg3(ptr, v0, mem) ++ v2.AddArg3(ptr, v0, v3) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [3] ptr mem) ++ // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 3 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(1) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [6] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 6 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [12] {t} ptr mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 12 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [16] {t} ptr mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 16 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [24] {t} ptr mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 24 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(16) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(8) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [s] {t} ptr mem) ++ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice ++ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) { ++ break ++ } ++ v.reset(OpLOONG64DUFFZERO) ++ v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Zero [s] {t} ptr mem) ++ // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 ++ // result: (LoweredZero [t.Alignment()] ptr (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) { ++ break ++ } ++ v.reset(OpLOONG64LoweredZero) ++ v.AuxInt = int64ToAuxInt(t.Alignment()) ++ v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, ptr.Type) ++ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) ++ v0.AddArg(ptr) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ return false ++} ++func rewriteBlockLOONG64(b *Block) bool { ++ switch b.Kind { ++ case BlockLOONG64EQ: ++ // match: (EQ (FPFlagTrue cmp) yes no) ++ // result: (FPF cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagTrue { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPF, cmp) ++ return true ++ } ++ // match: (EQ (FPFlagFalse cmp) yes no) ++ // result: (FPT cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagFalse { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPT, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGT { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTU { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTUconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (SGTUconst [1] x) yes no) ++ // result: (NE x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTUconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64NE, x) ++ return true ++ } ++ // match: (EQ (SGTU x (MOVVconst [0])) yes no) ++ // result: (EQ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTU { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, x) ++ return true ++ } ++ // match: (EQ (SGTconst [0] x) yes no) ++ // result: (GEZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64GEZ, x) ++ return true ++ } ++ // match: (EQ (SGT x (MOVVconst [0])) yes no) ++ // result: (LEZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGT { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64LEZ, x) ++ return true ++ } ++ // match: (EQ (MOVVconst [0]) yes no) ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (EQ (MOVVconst [c]) yes no) ++ // cond: c != 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c != 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64GEZ: ++ // match: (GEZ (MOVVconst [c]) yes no) ++ // cond: c >= 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c >= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (GEZ (MOVVconst [c]) yes no) ++ // cond: c < 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c < 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64GTZ: ++ // match: (GTZ (MOVVconst [c]) yes no) ++ // cond: c > 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c > 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (GTZ (MOVVconst [c]) yes no) ++ // cond: c <= 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c <= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockIf: ++ // match: (If cond yes no) ++ // result: (NE cond yes no) ++ for { ++ cond := b.Controls[0] ++ b.resetWithControl(BlockLOONG64NE, cond) ++ return true ++ } ++ case BlockLOONG64LEZ: ++ // match: (LEZ (MOVVconst [c]) yes no) ++ // cond: c <= 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c <= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (LEZ (MOVVconst [c]) yes no) ++ // cond: c > 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c > 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64LTZ: ++ // match: (LTZ (MOVVconst [c]) yes no) ++ // cond: c < 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c < 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (LTZ (MOVVconst [c]) yes no) ++ // cond: c >= 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c >= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64NE: ++ // match: (NE (FPFlagTrue cmp) yes no) ++ // result: (FPT cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagTrue { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPT, cmp) ++ return true ++ } ++ // match: (NE (FPFlagFalse cmp) yes no) ++ // result: (FPF cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagFalse { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPF, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGT { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTU { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTUconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (SGTUconst [1] x) yes no) ++ // result: (EQ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTUconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64EQ, x) ++ return true ++ } ++ // match: (NE (SGTU x (MOVVconst [0])) yes no) ++ // result: (NE x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTU { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, x) ++ return true ++ } ++ // match: (NE (SGTconst [0] x) yes no) ++ // result: (LTZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64LTZ, x) ++ return true ++ } ++ // match: (NE (SGT x (MOVVconst [0])) yes no) ++ // result: (GTZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGT { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64GTZ, x) ++ return true ++ } ++ // match: (NE (MOVVconst [0]) yes no) ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ // match: (NE (MOVVconst [c]) yes no) ++ // cond: c != 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c != 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ } ++ return false ++} +-- +2.38.0 + diff --git a/loongarch64/0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch b/loongarch64/0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch new file mode 100644 index 0000000..db608b7 --- /dev/null +++ b/loongarch64/0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch @@ -0,0 +1,27 @@ +From fe8c9ac887fa106839fdec0a860a21aa4424e98d Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 10:26:47 +0800 +Subject: [PATCH 10/82] cmd/compile/internal/ssa: inline memmove with known + size + +Change-Id: I1534b66b527efaf2bbaa8e6e6ac0618aac0b5930 +--- + src/cmd/compile/internal/ssa/rewrite.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go +index 375c4d5a56..8a9cbcd3c3 100644 +--- a/src/cmd/compile/internal/ssa/rewrite.go ++++ b/src/cmd/compile/internal/ssa/rewrite.go +@@ -1345,7 +1345,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { + return sz <= 8 + case "s390x", "ppc64", "ppc64le": + return sz <= 8 || disjoint(dst, sz, src, sz) +- case "arm", "mips", "mips64", "mipsle", "mips64le": ++ case "arm", "loong64", "mips", "mips64", "mipsle", "mips64le": + return sz <= 4 + } + return false +-- +2.38.0 + diff --git a/loongarch64/0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch b/loongarch64/0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch new file mode 100644 index 0000000..52df437 --- /dev/null +++ b/loongarch64/0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch @@ -0,0 +1,37 @@ +From ddbb53bb835d86e486a357ea0d4a5e901dab269f Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 11:41:03 +0800 +Subject: [PATCH 11/82] cmd/compile/internal/ssa: add support on loong64 for + schedule phase + +Change-Id: Id533912c62d8c4e2aa3c124561772b543d685d7d +--- + src/cmd/compile/internal/ssa/schedule.go | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go +index 4e3e5e75e3..e6f6cf2550 100644 +--- a/src/cmd/compile/internal/ssa/schedule.go ++++ b/src/cmd/compile/internal/ssa/schedule.go +@@ -78,7 +78,7 @@ func (h ValHeap) Less(i, j int) bool { + func (op Op) isLoweredGetClosurePtr() bool { + switch op { + case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr, +- Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr, ++ Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpLOONG64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr, + OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr: + return true + } +@@ -128,7 +128,8 @@ func schedule(f *Func) { + v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck || + v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck || + v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck || +- v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck: ++ v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck || ++ v.Op == OpLOONG64LoweredNilCheck: + // Nil checks must come before loads from the same address. + score[v.ID] = ScoreNilCheck + case v.Op == OpPhi: +-- +2.38.0 + diff --git a/loongarch64/0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch b/loongarch64/0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch new file mode 100644 index 0000000..6aec22f --- /dev/null +++ b/loongarch64/0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch @@ -0,0 +1,172 @@ +From 57f92a4608047559e4aaee90aafffe892c28e7b4 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 11:59:37 +0800 +Subject: [PATCH 12/82] cmd/compile/internal/ssagen: enable intrinsic operation + on loong64 + +Change-Id: If28fe03297e1de62f348373f2779dce07f54611c +--- + src/cmd/compile/internal/ssagen/ssa.go | 40 +++++++++++++------------- + 1 file changed, 20 insertions(+), 20 deletions(-) + +diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go +index 1cbe414865..f61f1c2e2d 100644 +--- a/src/cmd/compile/internal/ssagen/ssa.go ++++ b/src/cmd/compile/internal/ssagen/ssa.go +@@ -3810,7 +3810,7 @@ func InitTables() { + } + return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) + }, +- sys.AMD64, sys.I386, sys.MIPS64) ++ sys.AMD64, sys.I386, sys.MIPS64, sys.Loong64) + add("runtime", "KeepAlive", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) +@@ -3865,21 +3865,21 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Load8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Load64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "LoadAcq", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) +@@ -3900,32 +3900,32 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + addF("runtime/internal/atomic", "Store", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Store8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Store64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "StorepNoWB", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "StoreRel", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) +@@ -3945,14 +3945,14 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xchg64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, +- sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) + +@@ -4010,14 +4010,14 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xadd64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, +- sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + addF("runtime/internal/atomic", "Xadd", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), +@@ -4032,14 +4032,14 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, +- sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Cas64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, +- sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "CasRel", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) +@@ -4155,7 +4155,7 @@ func InitTables() { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0]) + }, +- sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) ++ sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) + addF("math", "Trunc", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0]) +@@ -4536,9 +4536,9 @@ func InitTables() { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) + }, +- sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64) +- alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) +- alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) ++ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.Loong64) ++ alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchLoong64) ++ alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchLoong64) + addF("math/bits", "Add64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) +@@ -7438,7 +7438,7 @@ func (s *State) Call(v *ssa.Value) *obj.Prog { + switch Arch.LinkArch.Family { + case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm: + p.To.Type = obj.TYPE_REG +- case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: ++ case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64: + p.To.Type = obj.TYPE_MEM + default: + base.Fatalf("unknown indirect call family") +-- +2.38.0 + diff --git a/loongarch64/0013-cmd-compile-internal-fix-test-error-on-loong64.patch b/loongarch64/0013-cmd-compile-internal-fix-test-error-on-loong64.patch new file mode 100644 index 0000000..53f9da3 --- /dev/null +++ b/loongarch64/0013-cmd-compile-internal-fix-test-error-on-loong64.patch @@ -0,0 +1,44 @@ +From dcc1d9f42332833908896abb2d500f965f5859a4 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 14:20:39 +0800 +Subject: [PATCH 13/82] cmd/compile/internal: fix test error on loong64 + +Change-Id: I4ca290bf725425a9a6ac2c6767a5bf4ff2339d0e +--- + src/cmd/compile/internal/logopt/logopt_test.go | 2 +- + src/cmd/compile/internal/test/inl_test.go | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go +index 71976174b0..81318699a1 100644 +--- a/src/cmd/compile/internal/logopt/logopt_test.go ++++ b/src/cmd/compile/internal/logopt/logopt_test.go +@@ -155,7 +155,7 @@ func s15a8(x *[15]int64) [15]int64 { + arches := []string{runtime.GOARCH} + goos0 := runtime.GOOS + if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js") +- arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "ppc64le", "riscv64", "s390x", "wasm"} ++ arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "loong64", "ppc64le", "riscv64", "s390x", "wasm"} + goos0 = "linux" + } + +diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go +index 6f100033cf..c91912558b 100644 +--- a/src/cmd/compile/internal/test/inl_test.go ++++ b/src/cmd/compile/internal/test/inl_test.go +@@ -160,10 +160,10 @@ func TestIntendedInlining(t *testing.T) { + }, + } + +- if runtime.GOARCH != "386" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { ++ if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { + // nextFreeFast calls sys.Ctz64, which on 386 is implemented in asm and is not inlinable. + // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386. +- // On mips64x and riscv64, Ctz64 is not intrinsified and causes nextFreeFast too expensive ++ // On loong64, mips64x and riscv64, Ctz64 is not intrinsified and causes nextFreeFast too expensive + // to inline (Issue 22239). + want["runtime"] = append(want["runtime"], "nextFreeFast") + } +-- +2.38.0 + diff --git a/loongarch64/0014-cmd-internal-obj-instructions-and-registers-for-loon.patch b/loongarch64/0014-cmd-internal-obj-instructions-and-registers-for-loon.patch new file mode 100644 index 0000000..b2b754a --- /dev/null +++ b/loongarch64/0014-cmd-internal-obj-instructions-and-registers-for-loon.patch @@ -0,0 +1,3323 @@ +From d12a00219a8c4f868e03f965058d1e3af33ebf4e Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:01:31 +0800 +Subject: [PATCH 14/82] cmd/internal/obj: instructions and registers for + loong64 + +Change-Id: I930d2a19246496e3ca36d55539183c0f9f650ad9 +--- + src/cmd/internal/obj/link.go | 1 + + src/cmd/internal/obj/loong64/a.out.go | 414 +++++ + src/cmd/internal/obj/loong64/anames.go | 130 ++ + src/cmd/internal/obj/loong64/asm.go | 1960 ++++++++++++++++++++++++ + src/cmd/internal/obj/loong64/cnames.go | 43 + + src/cmd/internal/obj/loong64/list.go | 46 + + src/cmd/internal/obj/loong64/obj.go | 625 ++++++++ + src/cmd/internal/obj/util.go | 19 +- + 8 files changed, 3229 insertions(+), 9 deletions(-) + create mode 100644 src/cmd/internal/obj/loong64/a.out.go + create mode 100644 src/cmd/internal/obj/loong64/anames.go + create mode 100644 src/cmd/internal/obj/loong64/asm.go + create mode 100644 src/cmd/internal/obj/loong64/cnames.go + create mode 100644 src/cmd/internal/obj/loong64/list.go + create mode 100644 src/cmd/internal/obj/loong64/obj.go + +diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go +index 28626e6e03..957b20661d 100644 +--- a/src/cmd/internal/obj/link.go ++++ b/src/cmd/internal/obj/link.go +@@ -435,6 +435,7 @@ const ( + ABasePPC64 + ABaseARM64 + ABaseMIPS ++ ABaseLOONG64 + ABaseRISCV + ABaseS390X + ABaseWasm +diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go +new file mode 100644 +index 0000000000..e3857eac04 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/a.out.go +@@ -0,0 +1,414 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++) ++ ++//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p loong64 ++ ++const ( ++ NSNAME = 8 ++ NSYM = 50 ++ NREG = 32 // number of general registers ++ NFREG = 32 // number of floating point registers ++) ++ ++const ( ++ REG_R0 = obj.RBaseLOONG64 + iota // must be a multiple of 32 ++ REG_R1 ++ REG_R2 ++ REG_R3 ++ REG_R4 ++ REG_R5 ++ REG_R6 ++ REG_R7 ++ REG_R8 ++ REG_R9 ++ REG_R10 ++ REG_R11 ++ REG_R12 ++ REG_R13 ++ REG_R14 ++ REG_R15 ++ REG_R16 ++ REG_R17 ++ REG_R18 ++ REG_R19 ++ REG_R20 ++ REG_R21 ++ REG_R22 ++ REG_R23 ++ REG_R24 ++ REG_R25 ++ REG_R26 ++ REG_R27 ++ REG_R28 ++ REG_R29 ++ REG_R30 ++ REG_R31 ++ ++ REG_F0 // must be a multiple of 32 ++ REG_F1 ++ REG_F2 ++ REG_F3 ++ REG_F4 ++ REG_F5 ++ REG_F6 ++ REG_F7 ++ REG_F8 ++ REG_F9 ++ REG_F10 ++ REG_F11 ++ REG_F12 ++ REG_F13 ++ REG_F14 ++ REG_F15 ++ REG_F16 ++ REG_F17 ++ REG_F18 ++ REG_F19 ++ REG_F20 ++ REG_F21 ++ REG_F22 ++ REG_F23 ++ REG_F24 ++ REG_F25 ++ REG_F26 ++ REG_F27 ++ REG_F28 ++ REG_F29 ++ REG_F30 ++ REG_F31 ++ ++ REG_FCSR0 // must be a multiple of 32 ++ REG_FCSR1 ++ REG_FCSR2 ++ REG_FCSR3 // only four registers are needed ++ REG_FCSR4 ++ REG_FCSR5 ++ REG_FCSR6 ++ REG_FCSR7 ++ REG_FCSR8 ++ REG_FCSR9 ++ REG_FCSR10 ++ REG_FCSR11 ++ REG_FCSR12 ++ REG_FCSR13 ++ REG_FCSR14 ++ REG_FCSR15 ++ REG_FCSR16 ++ REG_FCSR17 ++ REG_FCSR18 ++ REG_FCSR19 ++ REG_FCSR20 ++ REG_FCSR21 ++ REG_FCSR22 ++ REG_FCSR23 ++ REG_FCSR24 ++ REG_FCSR25 ++ REG_FCSR26 ++ REG_FCSR27 ++ REG_FCSR28 ++ REG_FCSR29 ++ REG_FCSR30 ++ REG_FCSR31 ++ ++ REG_FCC0 // must be a multiple of 32 ++ REG_FCC1 ++ REG_FCC2 ++ REG_FCC3 ++ REG_FCC4 ++ REG_FCC5 ++ REG_FCC6 ++ REG_FCC7 // only eight registers are needed ++ REG_FCC8 ++ REG_FCC9 ++ REG_FCC10 ++ REG_FCC11 ++ REG_FCC12 ++ REG_FCC13 ++ REG_FCC14 ++ REG_FCC15 ++ REG_FCC16 ++ REG_FCC17 ++ REG_FCC18 ++ REG_FCC19 ++ REG_FCC20 ++ REG_FCC21 ++ REG_FCC22 ++ REG_FCC23 ++ REG_FCC24 ++ REG_FCC25 ++ REG_FCC26 ++ REG_FCC27 ++ REG_FCC28 ++ REG_FCC29 ++ REG_FCC30 ++ REG_FCC31 ++ ++ REG_LAST = REG_FCC31 // the last defined register ++ ++ REG_SPECIAL = REG_FCSR0 ++ ++ REGZERO = REG_R0 // set to zero ++ REGLINK = REG_R1 ++ REGSP = REG_R3 ++ REGRET = REG_R19 ++ REGARG = -1 // -1 disables passing the first argument in register ++ REGRT1 = REG_R19 // reserved for runtime, duffzero and duffcopy ++ REGRT2 = REG_R20 // reserved for runtime, duffcopy ++ REGCTXT = REG_R29 // context for closures ++ REGG = REG_R22 // G in loong64 ++ REGTMP = REG_R30 // used by the assembler ++ FREGRET = REG_F0 ++) ++ ++var LOONG64DWARFRegisters = map[int16]int16{} ++ ++func init() { ++ // f assigns dwarfregisters[from:to] = (base):(to-from+base) ++ f := func(from, to, base int16) { ++ for r := int16(from); r <= to; r++ { ++ LOONG64DWARFRegisters[r] = (r - from) + base ++ } ++ } ++ f(REG_R0, REG_R31, 0) ++ f(REG_F0, REG_F31, 32) ++ ++} ++ ++const ( ++ BIG = 2046 ++) ++ ++const ( ++ // mark flags ++ LABEL = 1 << 0 ++ LEAF = 1 << 1 ++ SYNC = 1 << 2 ++ BRANCH = 1 << 3 ++) ++ ++const ( ++ C_NONE = iota ++ C_REG ++ C_FREG ++ C_FCSRREG ++ C_FCCREG ++ C_ZCON ++ C_SCON // 12 bit signed ++ C_UCON // 32 bit signed, low 12 bits 0 ++ C_ADD0CON ++ C_AND0CON ++ C_ADDCON // -0x800 <= v < 0 ++ C_ANDCON // 0 < v <= 0xFFF ++ C_LCON // other 32 ++ C_DCON // other 64 (could subdivide further) ++ C_SACON // $n(REG) where n <= int12 ++ C_SECON ++ C_LACON // $n(REG) where int12 < n <= int32 ++ C_LECON ++ C_DACON // $n(REG) where int32 < n ++ C_STCON // $tlsvar ++ C_SBRA ++ C_LBRA ++ C_SAUTO ++ C_LAUTO ++ C_SEXT ++ C_LEXT ++ C_ZOREG ++ C_SOREG ++ C_LOREG ++ C_GOK ++ C_ADDR ++ C_TLS ++ C_TEXTSIZE ++ ++ C_NCLASS // must be the last ++) ++ ++const ( ++ AABSD = obj.ABaseLOONG64 + obj.A_ARCHSPECIFIC + iota ++ AABSF ++ AADD ++ AADDD ++ AADDF ++ AADDU ++ ++ AADDW ++ AAND ++ ABEQ ++ ABGEZ ++ ABLEZ ++ ABGTZ ++ ABLTZ ++ ABFPF ++ ABFPT ++ ++ ABNE ++ ABREAK ++ ACLO ++ ACLZ ++ ++ ACMPEQD ++ ACMPEQF ++ ++ ACMPGED // ACMPGED -> fcmp.sle.d ++ ACMPGEF // ACMPGEF -> fcmp.sle.s ++ ACMPGTD // ACMPGTD -> fcmp.slt.d ++ ACMPGTF // ACMPGTF -> fcmp.slt.s ++ ++ ALU12IW ++ ALU32ID ++ ALU52ID ++ APCADDU12I ++ AJIRL ++ ABGE ++ ABLT ++ ABLTU ++ ABGEU ++ ++ ADIV ++ ADIVD ++ ADIVF ++ ADIVU ++ ADIVW ++ ++ ALL ++ ALLV ++ ++ ALUI ++ ++ AMOVB ++ AMOVBU ++ ++ AMOVD ++ AMOVDF ++ AMOVDW ++ AMOVF ++ AMOVFD ++ AMOVFW ++ ++ AMOVH ++ AMOVHU ++ AMOVW ++ ++ AMOVWD ++ AMOVWF ++ ++ AMOVWL ++ AMOVWR ++ ++ AMUL ++ AMULD ++ AMULF ++ AMULU ++ AMULH ++ AMULHU ++ AMULW ++ ANEGD ++ ANEGF ++ ++ ANEGW ++ ANEGV ++ ++ ANOOP // hardware nop ++ ANOR ++ AOR ++ AREM ++ AREMU ++ ++ ARFE ++ ++ ASC ++ ASCV ++ ++ ASGT ++ ASGTU ++ ++ ASLL ++ ASQRTD ++ ASQRTF ++ ASRA ++ ASRL ++ ASUB ++ ASUBD ++ ASUBF ++ ++ ASUBU ++ ASUBW ++ ADBAR ++ ASYSCALL ++ ++ ATEQ ++ ATNE ++ ++ AWORD ++ ++ AXOR ++ ++ // 64-bit ++ AMOVV ++ AMOVVL ++ AMOVVR ++ ++ ASLLV ++ ASRAV ++ ASRLV ++ ADIVV ++ ADIVVU ++ ++ AREMV ++ AREMVU ++ ++ AMULV ++ AMULVU ++ AMULHV ++ AMULHVU ++ AADDV ++ AADDVU ++ ASUBV ++ ASUBVU ++ ++ // 64-bit FP ++ ATRUNCFV ++ ATRUNCDV ++ ATRUNCFW ++ ATRUNCDW ++ ++ AMOVWU ++ AMOVFV ++ AMOVDV ++ AMOVVF ++ AMOVVD ++ ++ ALAST ++ ++ // aliases ++ AJMP = obj.AJMP ++ AJAL = obj.ACALL ++ ARET = obj.ARET ++) ++ ++func init() { ++ // The asm encoder generally assumes that the lowest 5 bits of the ++ // REG_XX constants match the machine instruction encoding, i.e. ++ // the lowest 5 bits is the register number. ++ // Check this here. ++ if REG_R0%32 != 0 { ++ panic("REG_R0 is not a multiple of 32") ++ } ++ if REG_F0%32 != 0 { ++ panic("REG_F0 is not a multiple of 32") ++ } ++ if REG_FCSR0%32 != 0 { ++ panic("REG_FCSR0 is not a multiple of 32") ++ } ++ if REG_FCC0%32 != 0 { ++ panic("REG_FCC0 is not a multiple of 32") ++ } ++} +diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go +new file mode 100644 +index 0000000000..48d8a78828 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/anames.go +@@ -0,0 +1,130 @@ ++// Code generated by stringer -i a.out.go -o anames.go -p loong64; DO NOT EDIT. ++ ++package loong64 ++ ++import "cmd/internal/obj" ++ ++var Anames = []string{ ++ obj.A_ARCHSPECIFIC: "ABSD", ++ "ABSF", ++ "ADD", ++ "ADDD", ++ "ADDF", ++ "ADDU", ++ "ADDW", ++ "AND", ++ "BEQ", ++ "BGEZ", ++ "BLEZ", ++ "BGTZ", ++ "BLTZ", ++ "BFPF", ++ "BFPT", ++ "BNE", ++ "BREAK", ++ "CLO", ++ "CLZ", ++ "CMPEQD", ++ "CMPEQF", ++ "CMPGED", ++ "CMPGEF", ++ "CMPGTD", ++ "CMPGTF", ++ "LU12IW", ++ "LU32ID", ++ "LU52ID", ++ "PCADDU12I", ++ "JIRL", ++ "BGE", ++ "BLT", ++ "BLTU", ++ "BGEU", ++ "DIV", ++ "DIVD", ++ "DIVF", ++ "DIVU", ++ "DIVW", ++ "LL", ++ "LLV", ++ "LUI", ++ "MOVB", ++ "MOVBU", ++ "MOVD", ++ "MOVDF", ++ "MOVDW", ++ "MOVF", ++ "MOVFD", ++ "MOVFW", ++ "MOVH", ++ "MOVHU", ++ "MOVW", ++ "MOVWD", ++ "MOVWF", ++ "MOVWL", ++ "MOVWR", ++ "MUL", ++ "MULD", ++ "MULF", ++ "MULU", ++ "MULH", ++ "MULHU", ++ "MULW", ++ "NEGD", ++ "NEGF", ++ "NEGW", ++ "NEGV", ++ "NOOP", ++ "NOR", ++ "OR", ++ "REM", ++ "REMU", ++ "RFE", ++ "SC", ++ "SCV", ++ "SGT", ++ "SGTU", ++ "SLL", ++ "SQRTD", ++ "SQRTF", ++ "SRA", ++ "SRL", ++ "SUB", ++ "SUBD", ++ "SUBF", ++ "SUBU", ++ "SUBW", ++ "DBAR", ++ "SYSCALL", ++ "TEQ", ++ "TNE", ++ "WORD", ++ "XOR", ++ "MOVV", ++ "MOVVL", ++ "MOVVR", ++ "SLLV", ++ "SRAV", ++ "SRLV", ++ "DIVV", ++ "DIVVU", ++ "REMV", ++ "REMVU", ++ "MULV", ++ "MULVU", ++ "MULHV", ++ "MULHVU", ++ "ADDV", ++ "ADDVU", ++ "SUBV", ++ "SUBVU", ++ "TRUNCFV", ++ "TRUNCDV", ++ "TRUNCFW", ++ "TRUNCDW", ++ "MOVWU", ++ "MOVFV", ++ "MOVDV", ++ "MOVVF", ++ "MOVVD", ++ "LAST", ++} +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +new file mode 100644 +index 0000000000..345366f004 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -0,0 +1,1960 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ "fmt" ++ "log" ++ "sort" ++) ++ ++// ctxt0 holds state while assembling a single function. ++// Each function gets a fresh ctxt0. ++// This allows for multiple functions to be safely concurrently assembled. ++type ctxt0 struct { ++ ctxt *obj.Link ++ newprog obj.ProgAlloc ++ cursym *obj.LSym ++ autosize int32 ++ instoffset int64 ++ pc int64 ++} ++ ++// Instruction layout. ++ ++const ( ++ FuncAlign = 4 ++) ++ ++type Optab struct { ++ as obj.As ++ a1 uint8 ++ a2 uint8 ++ a3 uint8 ++ type_ int8 ++ size int8 ++ param int16 ++ family sys.ArchFamily ++ flag uint8 ++} ++ ++const ( ++ NOTUSETMP = 1 << iota // p expands to multiple instructions, but does NOT use REGTMP ++) ++ ++var optab = []Optab{ ++ {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, ++ {AMOVV, C_REG, C_NONE, C_REG, 1, 4, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_REG, 12, 8, 0, 0, NOTUSETMP}, ++ {AMOVBU, C_REG, C_NONE, C_REG, 13, 4, 0, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_REG, 14, 8, 0, sys.Loong64, NOTUSETMP}, ++ ++ {ASUB, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {ASUBV, C_REG, C_REG, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AADD, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {AADDV, C_REG, C_REG, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AAND, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {ASUB, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {ASUBV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AADD, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {AADDV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AAND, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {ANEGW, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {ANEGV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ ++ {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, ++ {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0}, ++ {ASLLV, C_REG, C_NONE, C_REG, 9, 4, 0, sys.Loong64, 0}, ++ {ASLLV, C_REG, C_REG, C_REG, 9, 4, 0, sys.Loong64, 0}, ++ {ACLO, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, ++ ++ {AADDF, C_FREG, C_NONE, C_FREG, 32, 4, 0, 0, 0}, ++ {AADDF, C_FREG, C_REG, C_FREG, 32, 4, 0, 0, 0}, ++ {ACMPEQF, C_FREG, C_REG, C_NONE, 32, 4, 0, 0, 0}, ++ {AABSF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, ++ {AMOVVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVWU, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVBU, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVWL, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVVL, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVWL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVVL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVWL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVVL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {ASC, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {ASCV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ ++ {AMOVW, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVWU, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVB, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVBU, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVWL, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVVL, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVWU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVBU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVWL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVVL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVWU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVBU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVWL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVVL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ {ALL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {ALLV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVWU, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVBU, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, ++ {AMOVW, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {ASC, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVWU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_TLS, 53, 16, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_TLS, 53, 16, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, ++ ++ {AMOVW, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVWU, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVV, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVB, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVBU, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, ++ {AMOVWU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, ++ {AMOVBU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, ++ {AMOVW, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, ++ {AMOVWU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, ++ {AMOVBU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, ++ {AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, 0, 0}, ++ {AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVWU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVV, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, 0, 0}, ++ {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, 0, 0}, ++ {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVW, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ {AMOVWU, C_TLS, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, ++ {AMOVV, C_TLS, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, ++ {AMOVB, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ {AMOVBU, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ ++ {AMOVW, C_SECON, C_NONE, C_REG, 3, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_SECON, C_NONE, C_REG, 3, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_SACON, C_NONE, C_REG, 3, 4, REGSP, 0, 0}, ++ {AMOVV, C_SACON, C_NONE, C_REG, 3, 4, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_LECON, C_NONE, C_REG, 52, 8, 0, 0, NOTUSETMP}, ++ {AMOVW, C_LECON, C_NONE, C_REG, 52, 8, 0, sys.Loong64, NOTUSETMP}, ++ {AMOVV, C_LECON, C_NONE, C_REG, 52, 8, 0, sys.Loong64, NOTUSETMP}, ++ ++ {AMOVW, C_LACON, C_NONE, C_REG, 26, 12, REGSP, 0, 0}, ++ {AMOVV, C_LACON, C_NONE, C_REG, 26, 12, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, ++ {AMOVV, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVW, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, ++ {AMOVV, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVW, C_STCON, C_NONE, C_REG, 55, 12, 0, 0, 0}, ++ {AMOVV, C_STCON, C_NONE, C_REG, 55, 12, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_UCON, C_NONE, C_REG, 24, 4, 0, 0, 0}, ++ {AMOVV, C_UCON, C_NONE, C_REG, 24, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_LCON, C_NONE, C_REG, 19, 8, 0, 0, NOTUSETMP}, ++ {AMOVV, C_LCON, C_NONE, C_REG, 19, 8, 0, sys.Loong64, NOTUSETMP}, ++ {AMOVV, C_DCON, C_NONE, C_REG, 59, 16, 0, sys.Loong64, NOTUSETMP}, ++ ++ {AMUL, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {AMUL, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {AMULV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AMULV, C_REG, C_REG, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ ++ {AADD, C_ADD0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, ++ {AADD, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, ++ {AADD, C_ANDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, ++ {AADD, C_ANDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, ++ ++ {AADDV, C_ADD0CON, C_REG, C_REG, 4, 4, 0, sys.Loong64, 0}, ++ {AADDV, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, sys.Loong64, 0}, ++ {AADDV, C_ANDCON, C_REG, C_REG, 10, 8, 0, sys.Loong64, 0}, ++ {AADDV, C_ANDCON, C_NONE, C_REG, 10, 8, 0, sys.Loong64, 0}, ++ ++ {AAND, C_AND0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, ++ {AAND, C_AND0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, ++ {AAND, C_ADDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, ++ {AAND, C_ADDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, ++ ++ {AADD, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, ++ {AADD, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, ++ {AADDV, C_UCON, C_REG, C_REG, 25, 8, 0, sys.Loong64, 0}, ++ {AADDV, C_UCON, C_NONE, C_REG, 25, 8, 0, sys.Loong64, 0}, ++ {AAND, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, ++ {AAND, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, ++ ++ {AADD, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, ++ {AADDV, C_LCON, C_NONE, C_REG, 23, 12, 0, sys.Loong64, 0}, ++ {AAND, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, ++ {AADD, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, ++ {AADDV, C_LCON, C_REG, C_REG, 23, 12, 0, sys.Loong64, 0}, ++ {AAND, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, ++ ++ {AADDV, C_DCON, C_NONE, C_REG, 60, 20, 0, sys.Loong64, 0}, ++ {AADDV, C_DCON, C_REG, C_REG, 60, 20, 0, sys.Loong64, 0}, ++ ++ {ASLL, C_SCON, C_REG, C_REG, 16, 4, 0, 0, 0}, ++ {ASLL, C_SCON, C_NONE, C_REG, 16, 4, 0, 0, 0}, ++ ++ {ASLLV, C_SCON, C_REG, C_REG, 16, 4, 0, sys.Loong64, 0}, ++ {ASLLV, C_SCON, C_NONE, C_REG, 16, 4, 0, sys.Loong64, 0}, ++ ++ {ASYSCALL, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, ++ ++ {ABEQ, C_REG, C_REG, C_SBRA, 6, 4, 0, 0, 0}, ++ {ABEQ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, ++ {ABLEZ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, ++ {ABFPT, C_NONE, C_NONE, C_SBRA, 6, 4, 0, 0, NOTUSETMP}, ++ ++ {AJMP, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // b ++ {AJAL, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // bl ++ ++ {AJMP, C_NONE, C_NONE, C_ZOREG, 18, 4, REGZERO, 0, 0}, // jirl r0, rj, 0 ++ {AJAL, C_NONE, C_NONE, C_ZOREG, 18, 4, REGLINK, 0, 0}, // jirl r1, rj, 0 ++ ++ {AMOVW, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, ++ {AMOVF, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, ++ {AMOVD, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, ++ {AMOVD, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, ++ {AMOVW, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, ++ {AMOVD, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, ++ ++ {AMOVW, C_LEXT, C_NONE, C_FREG, 27, 12, 0, sys.Loong64, 0}, ++ {AMOVF, C_LEXT, C_NONE, C_FREG, 27, 12, 0, sys.Loong64, 0}, ++ {AMOVD, C_LEXT, C_NONE, C_FREG, 27, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, ++ {AMOVD, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, ++ {AMOVW, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, ++ {AMOVD, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, ++ {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, 0, 0}, ++ {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, 0, 0}, ++ {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_FREG, C_NONE, C_SEXT, 28, 4, 0, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_SEXT, 28, 4, 0, sys.Loong64, 0}, ++ {AMOVD, C_FREG, C_NONE, C_SEXT, 28, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, ++ {AMOVW, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, ++ ++ {AMOVW, C_FREG, C_NONE, C_LEXT, 28, 12, 0, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_LEXT, 28, 12, 0, sys.Loong64, 0}, ++ {AMOVD, C_FREG, C_NONE, C_LEXT, 28, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, ++ {AMOVW, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, ++ {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_FREG, 30, 4, 0, 0, 0}, ++ {AMOVW, C_FREG, C_NONE, C_REG, 31, 4, 0, 0, 0}, ++ {AMOVV, C_REG, C_NONE, C_FREG, 47, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_FREG, C_NONE, C_REG, 48, 4, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_ADDCON, C_NONE, C_FREG, 34, 8, 0, sys.Loong64, 0}, ++ {AMOVW, C_ANDCON, C_NONE, C_FREG, 34, 8, 0, sys.Loong64, 0}, ++ ++ {AWORD, C_LCON, C_NONE, C_NONE, 40, 4, 0, 0, 0}, ++ {AWORD, C_DCON, C_NONE, C_NONE, 61, 4, 0, 0, 0}, ++ ++ {ATEQ, C_SCON, C_REG, C_REG, 15, 8, 0, 0, 0}, ++ {ATEQ, C_SCON, C_NONE, C_REG, 15, 8, 0, 0, 0}, ++ ++ {ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, // really CACHE instruction ++ {ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {ABREAK, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, ++ ++ {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0}, ++ {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0}, ++ {obj.APCDATA, C_DCON, C_NONE, C_DCON, 0, 0, 0, 0, 0}, ++ {obj.AFUNCDATA, C_SCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0}, ++ {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, ++ {obj.ANOP, C_LCON, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689 ++ {obj.ANOP, C_DCON, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689 ++ {obj.ANOP, C_REG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, ++ {obj.ANOP, C_FREG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, ++ {obj.ADUFFZERO, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP ++ {obj.ADUFFCOPY, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP ++ ++ {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0}, ++} ++ ++var oprange [ALAST & obj.AMask][]Optab ++ ++var xcmp [C_NCLASS][C_NCLASS]bool ++ ++func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ++ if ctxt.Retpoline { ++ ctxt.Diag("-spectre=ret not supported on loong64") ++ ctxt.Retpoline = false // don't keep printing ++ } ++ ++ p := cursym.Func().Text ++ if p == nil || p.Link == nil { // handle external functions and ELF section symbols ++ return ++ } ++ ++ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset + ctxt.FixedFrameSize())} ++ ++ if oprange[AOR&obj.AMask] == nil { ++ c.ctxt.Diag("loong64 ops not initialized, call loong64.buildop first") ++ } ++ ++ pc := int64(0) ++ p.Pc = pc ++ ++ var m int ++ var o *Optab ++ for p = p.Link; p != nil; p = p.Link { ++ p.Pc = pc ++ o = c.oplook(p) ++ m = int(o.size) ++ if m == 0 { ++ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { ++ c.ctxt.Diag("zero-width instruction\n%v", p) ++ } ++ continue ++ } ++ ++ pc += int64(m) ++ } ++ ++ c.cursym.Size = pc ++ ++ /* ++ * if any procedure is large enough to ++ * generate a large SBRA branch, then ++ * generate extra passes putting branches ++ * around jmps to fix. this is rare. ++ */ ++ bflag := 1 ++ ++ var otxt int64 ++ var q *obj.Prog ++ for bflag != 0 { ++ bflag = 0 ++ pc = 0 ++ for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { ++ p.Pc = pc ++ o = c.oplook(p) ++ ++ // very large conditional branches ++ if o.type_ == 6 && p.To.Target() != nil { ++ otxt = p.To.Target().Pc - pc ++ if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { ++ q = c.newprog() ++ q.Link = p.Link ++ p.Link = q ++ q.As = AJMP ++ q.Pos = p.Pos ++ q.To.Type = obj.TYPE_BRANCH ++ q.To.SetTarget(p.To.Target()) ++ p.To.SetTarget(q) ++ q = c.newprog() ++ q.Link = p.Link ++ p.Link = q ++ q.As = AJMP ++ q.Pos = p.Pos ++ q.To.Type = obj.TYPE_BRANCH ++ q.To.SetTarget(q.Link.Link) ++ ++ c.addnop(p.Link) ++ c.addnop(p) ++ bflag = 1 ++ } ++ } ++ ++ m = int(o.size) ++ if m == 0 { ++ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { ++ c.ctxt.Diag("zero-width instruction\n%v", p) ++ } ++ continue ++ } ++ ++ pc += int64(m) ++ } ++ ++ c.cursym.Size = pc ++ } ++ pc += -pc & (FuncAlign - 1) ++ c.cursym.Size = pc ++ ++ // lay out the code, emitting code and data relocations. ++ ++ c.cursym.Grow(c.cursym.Size) ++ ++ bp := c.cursym.P ++ var i int32 ++ var out [5]uint32 ++ for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { ++ c.pc = p.Pc ++ o = c.oplook(p) ++ if int(o.size) > 4*len(out) { ++ log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p) ++ } ++ c.asmout(p, o, out[:]) ++ for i = 0; i < int32(o.size/4); i++ { ++ c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) ++ bp = bp[4:] ++ } ++ } ++ ++ // Mark nonpreemptible instruction sequences. ++ // We use REGTMP as a scratch register during call injection, ++ // so instruction sequences that use REGTMP are unsafe to ++ // preempt asynchronously. ++ obj.MarkUnsafePoints(c.ctxt, c.cursym.Func().Text, c.newprog, c.isUnsafePoint, c.isRestartable) ++} ++ ++// isUnsafePoint returns whether p is an unsafe point. ++func (c *ctxt0) isUnsafePoint(p *obj.Prog) bool { ++ // If p explicitly uses REGTMP, it's unsafe to preempt, because the ++ // preemption sequence clobbers REGTMP. ++ return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP ++} ++ ++// isRestartable returns whether p is a multi-instruction sequence that, ++// if preempted, can be restarted. ++func (c *ctxt0) isRestartable(p *obj.Prog) bool { ++ if c.isUnsafePoint(p) { ++ return false ++ } ++ // If p is a multi-instruction sequence with uses REGTMP inserted by ++ // the assembler in order to materialize a large constant/offset, we ++ // can restart p (at the start of the instruction sequence), recompute ++ // the content of REGTMP, upon async preemption. Currently, all cases ++ // of assembler-inserted REGTMP fall into this category. ++ // If p doesn't use REGTMP, it can be simply preempted, so we don't ++ // mark it. ++ o := c.oplook(p) ++ return o.size > 4 && o.flag&NOTUSETMP == 0 ++} ++ ++func isint32(v int64) bool { ++ return int64(int32(v)) == v ++} ++ ++func isuint32(v uint64) bool { ++ return uint64(uint32(v)) == v ++} ++ ++func (c *ctxt0) aclass(a *obj.Addr) int { ++ switch a.Type { ++ case obj.TYPE_NONE: ++ return C_NONE ++ ++ case obj.TYPE_REG: ++ if REG_R0 <= a.Reg && a.Reg <= REG_R31 { ++ return C_REG ++ } ++ if REG_F0 <= a.Reg && a.Reg <= REG_F31 { ++ return C_FREG ++ } ++ if REG_FCSR0 <= a.Reg && a.Reg <= REG_FCSR31 { ++ return C_FCSRREG ++ } ++ if REG_FCC0 <= a.Reg && a.Reg <= REG_FCC31 { ++ return C_FCCREG ++ } ++ return C_GOK ++ ++ case obj.TYPE_MEM: ++ switch a.Name { ++ case obj.NAME_EXTERN, ++ obj.NAME_STATIC: ++ if a.Sym == nil { ++ break ++ } ++ c.instoffset = a.Offset ++ if a.Sym != nil { // use relocation ++ if a.Sym.Type == objabi.STLSBSS { ++ return C_TLS ++ } ++ return C_ADDR ++ } ++ return C_LEXT ++ ++ case obj.NAME_AUTO: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-SP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SAUTO ++ } ++ return C_LAUTO ++ ++ case obj.NAME_PARAM: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-FP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SAUTO ++ } ++ return C_LAUTO ++ ++ case obj.NAME_NONE: ++ c.instoffset = a.Offset ++ if c.instoffset == 0 { ++ return C_ZOREG ++ } ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SOREG ++ } ++ return C_LOREG ++ } ++ ++ return C_GOK ++ ++ case obj.TYPE_TEXTSIZE: ++ return C_TEXTSIZE ++ ++ case obj.TYPE_CONST, ++ obj.TYPE_ADDR: ++ switch a.Name { ++ case obj.NAME_NONE: ++ c.instoffset = a.Offset ++ if a.Reg != 0 { ++ if -BIG <= c.instoffset && c.instoffset <= BIG { ++ return C_SACON ++ } ++ if isint32(c.instoffset) { ++ return C_LACON ++ } ++ return C_DACON ++ } ++ ++ case obj.NAME_EXTERN, ++ obj.NAME_STATIC: ++ s := a.Sym ++ if s == nil { ++ return C_GOK ++ } ++ ++ c.instoffset = a.Offset ++ if s.Type == objabi.STLSBSS { ++ return C_STCON // address of TLS variable ++ } ++ return C_LECON ++ ++ case obj.NAME_AUTO: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-SP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SACON ++ } ++ return C_LACON ++ ++ case obj.NAME_PARAM: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-FP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SACON ++ } ++ return C_LACON ++ ++ default: ++ return C_GOK ++ } ++ ++ if c.instoffset != int64(int32(c.instoffset)) { ++ return C_DCON ++ } ++ ++ if c.instoffset >= 0 { ++ if c.instoffset == 0 { ++ return C_ZCON ++ } ++ if c.instoffset <= 0x7ff { ++ return C_SCON ++ } ++ if c.instoffset <= 0xfff { ++ return C_ANDCON ++ } ++ if c.instoffset&0xfff == 0 && isuint32(uint64(c.instoffset)) { // && (instoffset & (1<<31)) == 0) ++ return C_UCON ++ } ++ if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) { ++ return C_LCON ++ } ++ return C_LCON ++ } ++ ++ if c.instoffset >= -0x800 { ++ return C_ADDCON ++ } ++ if c.instoffset&0xfff == 0 && isint32(c.instoffset) { ++ return C_UCON ++ } ++ if isint32(c.instoffset) { ++ return C_LCON ++ } ++ return C_LCON ++ ++ case obj.TYPE_BRANCH: ++ return C_SBRA ++ } ++ ++ return C_GOK ++} ++ ++func prasm(p *obj.Prog) { ++ fmt.Printf("%v\n", p) ++} ++ ++func (c *ctxt0) oplook(p *obj.Prog) *Optab { ++ if oprange[AOR&obj.AMask] == nil { ++ c.ctxt.Diag("loong64 ops not initialized, call loong64.buildop first") ++ } ++ ++ a1 := int(p.Optab) ++ if a1 != 0 { ++ return &optab[a1-1] ++ } ++ a1 = int(p.From.Class) ++ if a1 == 0 { ++ a1 = c.aclass(&p.From) + 1 ++ p.From.Class = int8(a1) ++ } ++ ++ a1-- ++ a3 := int(p.To.Class) ++ if a3 == 0 { ++ a3 = c.aclass(&p.To) + 1 ++ p.To.Class = int8(a3) ++ } ++ ++ a3-- ++ a2 := C_NONE ++ if p.Reg != 0 { ++ a2 = C_REG ++ } ++ ++ ops := oprange[p.As&obj.AMask] ++ c1 := &xcmp[a1] ++ c3 := &xcmp[a3] ++ for i := range ops { ++ op := &ops[i] ++ if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || c.ctxt.Arch.Family == op.family) { ++ p.Optab = uint16(cap(optab) - cap(ops) + i + 1) ++ return op ++ } ++ } ++ ++ c.ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3)) ++ prasm(p) ++ // Turn illegal instruction into an UNDEF, avoid crashing in asmout. ++ return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0} ++} ++ ++func cmp(a int, b int) bool { ++ if a == b { ++ return true ++ } ++ switch a { ++ case C_DCON: ++ if b == C_LCON { ++ return true ++ } ++ fallthrough ++ case C_LCON: ++ if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON { ++ return true ++ } ++ ++ case C_ADD0CON: ++ if b == C_ADDCON { ++ return true ++ } ++ fallthrough ++ ++ case C_ADDCON: ++ if b == C_ZCON || b == C_SCON { ++ return true ++ } ++ ++ case C_AND0CON: ++ if b == C_ANDCON { ++ return true ++ } ++ fallthrough ++ ++ case C_ANDCON: ++ if b == C_ZCON || b == C_SCON { ++ return true ++ } ++ ++ case C_UCON: ++ if b == C_ZCON { ++ return true ++ } ++ ++ case C_SCON: ++ if b == C_ZCON { ++ return true ++ } ++ ++ case C_LACON: ++ if b == C_SACON { ++ return true ++ } ++ ++ case C_LBRA: ++ if b == C_SBRA { ++ return true ++ } ++ ++ case C_LEXT: ++ if b == C_SEXT { ++ return true ++ } ++ ++ case C_LAUTO: ++ if b == C_SAUTO { ++ return true ++ } ++ ++ case C_REG: ++ if b == C_ZCON { ++ return true ++ } ++ ++ case C_LOREG: ++ if b == C_ZOREG || b == C_SOREG { ++ return true ++ } ++ ++ case C_SOREG: ++ if b == C_ZOREG { ++ return true ++ } ++ } ++ ++ return false ++} ++ ++type ocmp []Optab ++ ++func (x ocmp) Len() int { ++ return len(x) ++} ++ ++func (x ocmp) Swap(i, j int) { ++ x[i], x[j] = x[j], x[i] ++} ++ ++func (x ocmp) Less(i, j int) bool { ++ p1 := &x[i] ++ p2 := &x[j] ++ n := int(p1.as) - int(p2.as) ++ if n != 0 { ++ return n < 0 ++ } ++ n = int(p1.a1) - int(p2.a1) ++ if n != 0 { ++ return n < 0 ++ } ++ n = int(p1.a2) - int(p2.a2) ++ if n != 0 { ++ return n < 0 ++ } ++ n = int(p1.a3) - int(p2.a3) ++ if n != 0 { ++ return n < 0 ++ } ++ return false ++} ++ ++func opset(a, b0 obj.As) { ++ oprange[a&obj.AMask] = oprange[b0] ++} ++ ++func buildop(ctxt *obj.Link) { ++ if ctxt.DiagFunc == nil { ++ ctxt.DiagFunc = func(format string, args ...interface{}) { ++ log.Printf(format, args...) ++ } ++ } ++ ++ if oprange[AOR&obj.AMask] != nil { ++ // Already initialized; stop now. ++ // This happens in the cmd/asm tests, ++ // each of which re-initializes the arch. ++ return ++ } ++ ++ var n int ++ ++ for i := 0; i < C_NCLASS; i++ { ++ for n = 0; n < C_NCLASS; n++ { ++ if cmp(n, i) { ++ xcmp[i][n] = true ++ } ++ } ++ } ++ for n = 0; optab[n].as != obj.AXXX; n++ { ++ } ++ sort.Sort(ocmp(optab[:n])) ++ for i := 0; i < n; i++ { ++ r := optab[i].as ++ r0 := r & obj.AMask ++ start := i ++ for optab[i].as == r { ++ i++ ++ } ++ oprange[r0] = optab[start:i] ++ i-- ++ ++ switch r { ++ default: ++ ctxt.Diag("unknown op in build: %v", r) ++ ctxt.DiagFlush() ++ log.Fatalf("bad code") ++ ++ case AABSF: ++ opset(AMOVFD, r0) ++ opset(AMOVDF, r0) ++ opset(AMOVWF, r0) ++ opset(AMOVFW, r0) ++ opset(AMOVWD, r0) ++ opset(AMOVDW, r0) ++ opset(ANEGF, r0) ++ opset(ANEGD, r0) ++ opset(AABSD, r0) ++ opset(ATRUNCDW, r0) ++ opset(ATRUNCFW, r0) ++ opset(ASQRTF, r0) ++ opset(ASQRTD, r0) ++ ++ case AMOVVF: ++ opset(AMOVVD, r0) ++ opset(AMOVFV, r0) ++ opset(AMOVDV, r0) ++ opset(ATRUNCDV, r0) ++ opset(ATRUNCFV, r0) ++ ++ case AADD: ++ opset(ASGT, r0) ++ opset(ASGTU, r0) ++ opset(AADDU, r0) ++ ++ case AADDV: ++ opset(AADDVU, r0) ++ ++ case AADDF: ++ opset(ADIVF, r0) ++ opset(ADIVD, r0) ++ opset(AMULF, r0) ++ opset(AMULD, r0) ++ opset(ASUBF, r0) ++ opset(ASUBD, r0) ++ opset(AADDD, r0) ++ ++ case AAND: ++ opset(AOR, r0) ++ opset(AXOR, r0) ++ ++ case ABEQ: ++ opset(ABNE, r0) ++ opset(ABLT, r0) ++ opset(ABGE, r0) ++ opset(ABGEU, r0) ++ opset(ABLTU, r0) ++ ++ case ABLEZ: ++ opset(ABGEZ, r0) ++ opset(ABLTZ, r0) ++ opset(ABGTZ, r0) ++ ++ case AMOVB: ++ opset(AMOVH, r0) ++ ++ case AMOVBU: ++ opset(AMOVHU, r0) ++ ++ case AMUL: ++ opset(AMULU, r0) ++ opset(AMULH, r0) ++ opset(AMULHU, r0) ++ opset(AREM, r0) ++ opset(AREMU, r0) ++ opset(ADIV, r0) ++ opset(ADIVU, r0) ++ ++ case AMULV: ++ opset(AMULVU, r0) ++ opset(AMULHV, r0) ++ opset(AMULHVU, r0) ++ opset(AREMV, r0) ++ opset(AREMVU, r0) ++ opset(ADIVV, r0) ++ opset(ADIVVU, r0) ++ ++ case ASLL: ++ opset(ASRL, r0) ++ opset(ASRA, r0) ++ ++ case ASLLV: ++ opset(ASRAV, r0) ++ opset(ASRLV, r0) ++ ++ case ASUB: ++ opset(ASUBU, r0) ++ opset(ANOR, r0) ++ ++ case ASUBV: ++ opset(ASUBVU, r0) ++ ++ case ASYSCALL: ++ opset(ADBAR, r0) ++ opset(ANOOP, r0) ++ ++ case ACMPEQF: ++ opset(ACMPGTF, r0) ++ opset(ACMPGTD, r0) ++ opset(ACMPGEF, r0) ++ opset(ACMPGED, r0) ++ opset(ACMPEQD, r0) ++ ++ case ABFPT: ++ opset(ABFPF, r0) ++ ++ case AMOVWL: ++ opset(AMOVWR, r0) ++ ++ case AMOVVL: ++ opset(AMOVVR, r0) ++ ++ case AMOVW, ++ AMOVD, ++ AMOVF, ++ AMOVV, ++ ABREAK, ++ ARFE, ++ AJAL, ++ AJMP, ++ AMOVWU, ++ ALL, ++ ALLV, ++ ASC, ++ ASCV, ++ ANEGW, ++ ANEGV, ++ AWORD, ++ obj.ANOP, ++ obj.ATEXT, ++ obj.AUNDEF, ++ obj.AFUNCDATA, ++ obj.APCDATA, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ break ++ ++ case ACLO: ++ opset(ACLZ, r0) ++ ++ case ATEQ: ++ opset(ATNE, r0) ++ } ++ } ++} ++ ++func OP(x uint32, y uint32) uint32 { ++ return x<<3 | y<<0 ++} ++ ++func SP(x uint32, y uint32) uint32 { ++ return x<<29 | y<<26 ++} ++ ++func OP_TEN(x uint32, y uint32) uint32 { ++ return x<<21 | y<<10 ++} ++ ++// r1 -> rk ++// r2 -> rj ++// r3 -> rd ++func OP_RRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (r1&0x1F)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++// r2 -> rj ++// r3 -> rd ++func OP_RR(op uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++func OP_16IR_5I(op uint32, i uint32, r2 uint32) uint32 { ++ return op | (i&0xFFFF)<<10 | (r2&0x7)<<5 | ((i >> 16) & 0x1F) ++} ++ ++func OP_16IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (i&0xFFFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++func OP_12IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (i&0xFFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++func OP_IR(op uint32, i uint32, r2 uint32) uint32 { ++ return op | (i&0xFFFFF)<<5 | (r2&0x1F)<<0 // ui20, rd5 ++} ++ ++// Encoding for the 'b' or 'bl' instruction ++func OP_B_BL(op uint32, i uint32) uint32 { ++ return op | ((i & 0xFFFF) << 10) | ((i >> 16) & 0x3FF) ++} ++ ++func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { ++ o1 := uint32(0) ++ o2 := uint32(0) ++ o3 := uint32(0) ++ o4 := uint32(0) ++ o5 := uint32(0) ++ ++ add := AADDU ++ add = AADDVU ++ ++ switch o.type_ { ++ default: ++ c.ctxt.Diag("unknown type %d %v", o.type_) ++ prasm(p) ++ ++ case 0: // pseudo ops ++ break ++ ++ case 1: // mov r1,r2 ==> OR r1,r0,r2 ++ a := AOR ++ if p.As == AMOVW { ++ a = ASLL ++ } ++ o1 = OP_RRR(c.oprrr(a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 2: // add/sub r1,[r2],r3 ++ r := int(p.Reg) ++ if p.As == ANEGW || p.As == ANEGV { ++ r = REGZERO ++ } ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) ++ ++ case 3: // mov $soreg, r ==> or/add $i,o,r ++ v := c.regoff(&p.From) ++ ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ a := add ++ if o.a1 == C_ANDCON { ++ a = AOR ++ } ++ ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg)) ++ ++ case 4: // add $scon,[r1],r2 ++ v := c.regoff(&p.From) ++ ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ ++ o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg)) ++ ++ case 5: // syscall ++ o1 = c.oprrr(p.As) ++ ++ case 6: // beq r1,[r2],sbra ++ v := int32(0) ++ vcmp := int32(0) ++ if p.To.Target() != nil { ++ v = int32(p.To.Target().Pc-p.Pc) >> 2 ++ } ++ if v < 0 { ++ vcmp = -v ++ } ++ if (p.As == ABFPT || p.As == ABFPF) && ((uint32(vcmp))>>21)&0x7FF != 0 { ++ c.ctxt.Diag("21 bit-width, short branch too far\n%v", p) ++ } else if p.As != ABFPT && p.As != ABFPF && (v<<16)>>16 != v { ++ c.ctxt.Diag("16 bit-width, short branch too far\n%v", p) ++ } ++ if p.As == ABGTZ || p.As == ABLEZ { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v), uint32(p.Reg), uint32(p.From.Reg)) ++ } else if p.As == ABFPT || p.As == ABFPF { ++ // BCNEZ cj offset21 ,cj = fcc0 ++ // BCEQZ cj offset21 ,cj = fcc0 ++ o1 = OP_16IR_5I(c.opirr(p.As), uint32(v), uint32(REG_FCC0)) ++ } else { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg)) ++ } ++ ++ case 7: // mov r, soreg ++ r := int(p.To.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ v := c.regoff(&p.To) ++ o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.From.Reg)) ++ ++ case 8: // mov soreg, r ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ v := c.regoff(&p.From) ++ o1 = OP_12IRR(c.opirr(-p.As), uint32(v), uint32(r), uint32(p.To.Reg)) ++ ++ case 9: // sll r1,[r2],r3 ++ if p.As != ACLO && p.As != ACLZ { ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) ++ } else { // clo r1,r2 ++ o1 = OP_RR(c.oprr(p.As), uint32(p.From.Reg), uint32(p.To.Reg)) ++ } ++ ++ case 10: // add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 ++ v := c.regoff(&p.From) ++ a := AOR ++ if v < 0 { ++ a = AADDU ++ } ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 11: // jmp lbra ++ v := int32(0) ++ if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { ++ // use PC-relative branch for short branches ++ // BEQ R0, R0, sbra ++ if p.To.Target() != nil { ++ v = int32(p.To.Target().Pc-p.Pc) >> 2 ++ } ++ if (v<<16)>>16 == v { ++ o1 = OP_16IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO)) ++ break ++ } ++ } ++ if p.To.Target() == nil { ++ v = int32(p.Pc) >> 2 ++ } else { ++ v = int32(p.To.Target().Pc) >> 2 ++ } ++ o1 = OP_B_BL(c.opirr(p.As), uint32(v)) ++ if p.To.Sym == nil { ++ p.To.Sym = c.cursym.Func().Text.From.Sym ++ p.To.Offset = p.To.Target().Pc ++ } ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.To.Sym ++ rel.Add = p.To.Offset ++ rel.Type = objabi.R_CALLLOONG64 ++ ++ case 12: // movbs r,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ v := 16 ++ if p.As == AMOVB { ++ v = 24 ++ } ++ o1 = OP_16IRR(c.opirr(ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg)) ++ o2 = OP_16IRR(c.opirr(ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 13: // movbu r,r ++ if p.As == AMOVBU { ++ o1 = OP_12IRR(c.opirr(AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg)) ++ } else { ++ // bstrpick.d (msbd=15, lsbd=0) ++ o1 = (0x33c0 << 10) | ((uint32(p.From.Reg) & 0x1f) << 5) | (uint32(p.To.Reg) & 0x1F) ++ } ++ ++ case 14: // movwu r,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_16IRR(c.opirr(-ASLLV), uint32(32)&0x3f, uint32(p.From.Reg), uint32(p.To.Reg)) ++ o2 = OP_16IRR(c.opirr(-ASRLV), uint32(32)&0x3f, uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 15: // teq $c r,r ++ v := c.regoff(&p.From) ++ r := int(p.Reg) ++ if r == 0 { ++ r = REGZERO ++ } ++ /* ++ teq c, r1, r2 ++ fallthrough ++ ==> ++ bne r1, r2, 2 ++ break c ++ fallthrough ++ */ ++ if p.As == ATEQ { ++ o1 = OP_16IRR(c.opirr(ABNE), uint32(2), uint32(r), uint32(p.To.Reg)) ++ } else { // ATNE ++ o1 = OP_16IRR(c.opirr(ABEQ), uint32(2), uint32(r), uint32(p.To.Reg)) ++ } ++ o2 = c.oprrr(ABREAK) | (uint32(v) & 0x7FFF) ++ ++ case 16: // sll $c,[r1],r2 ++ v := c.regoff(&p.From) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ ++ // instruction ending with V:6-digit immediate, others:5-digit immediate ++ if v >= 32 && vshift(p.As) { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v)&0x3f, uint32(r), uint32(p.To.Reg)) ++ } else { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v)&0x1f, uint32(r), uint32(p.To.Reg)) ++ } ++ ++ case 17: ++ o1 = OP_RRR(c.oprrr(p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 18: // jmp [r1],0(r2) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.To.Reg), uint32(r)) ++ if p.As == obj.ACALL { ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 0 ++ rel.Type = objabi.R_CALLIND ++ } ++ ++ case 19: // mov $lcon,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(p.To.Reg)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 23: // add $lcon,r1,r2 ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o3 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 24: // mov $ucon,r ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(p.To.Reg)) ++ ++ case 25: // add/and $ucon,[r1],r2 ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 26: // mov $lsext/auto/oreg,r ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o3 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 27: // mov [sl]ext/auto/oreg,fr ++ v := c.regoff(&p.From) ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ a := -AMOVF ++ if p.As == AMOVD { ++ a = -AMOVD ++ } ++ switch o.size { ++ case 12: ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 4: ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg)) ++ } ++ ++ case 28: // mov fr,[sl]ext/auto/oreg ++ v := c.regoff(&p.To) ++ r := int(p.To.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ a := AMOVF ++ if p.As == AMOVD { ++ a = AMOVD ++ } ++ switch o.size { ++ case 12: ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.From.Reg)) ++ ++ case 4: ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.From.Reg)) ++ } ++ ++ case 30: // movw r,fr ++ a := OP_TEN(8, 1321) // movgr2fr.w ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 31: // movw fr,r ++ a := OP_TEN(8, 1325) // movfr2gr.s ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 32: // fadd fr1,[fr2],fr3 ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) ++ ++ case 33: // fabs fr1, fr3 ++ o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 34: // mov $con,fr ++ v := c.regoff(&p.From) ++ a := AADDU ++ if o.a1 == C_ANDCON { ++ a = AOR ++ } ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP)) ++ o2 = OP_RR(OP_TEN(8, 1321), uint32(REGTMP), uint32(p.To.Reg)) // movgr2fr.w ++ ++ case 35: // mov r,lext/auto/oreg ++ v := c.regoff(&p.To) ++ r := int(p.To.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg)) ++ ++ case 36: // mov lext/auto/oreg,r ++ v := c.regoff(&p.From) ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(-p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 40: // word ++ o1 = uint32(c.regoff(&p.From)) ++ ++ case 47: // movv r,fr ++ a := OP_TEN(8, 1322) // movgr2fr.d ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 48: // movv fr,r ++ a := OP_TEN(8, 1326) // movfr2gr.d ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 49: // undef ++ o1 = c.oprrr(ABREAK) ++ ++ // relocation operations ++ case 50: // mov r,addr ==> pcaddu12i + sw ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.To.Sym ++ rel.Add = p.To.Offset ++ rel.Type = objabi.R_ADDRLOONG64U ++ ++ o2 = OP_12IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.To.Sym ++ rel2.Add = p.To.Offset ++ rel2.Type = objabi.R_ADDRLOONG64 ++ ++ case 51: // mov addr,r ==> pcaddu12i + lw ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64U ++ o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64 ++ ++ case 52: // mov $lext, r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(p.To.Reg)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64U ++ o2 = OP_12IRR(c.opirr(add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64 ++ ++ case 53: // mov r, tlsvar ==> lu12i.w + ori + add r2, regtmp + sw o(regtmp) ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(ALU12IW), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.To.Sym ++ rel.Add = p.To.Offset ++ rel.Type = objabi.R_ADDRLOONG64TLSU ++ o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.To.Sym ++ rel2.Add = p.To.Offset ++ rel2.Type = objabi.R_ADDRLOONG64TLS ++ o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(REGTMP)) ++ o4 = OP_12IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) ++ ++ case 54: // lu12i.w + ori + add r2, regtmp + lw o(regtmp) ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(ALU12IW), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64TLSU ++ o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64TLS ++ o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(REGTMP)) ++ o4 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 55: // lu12i.w + ori + add r2, regtmp ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(ALU12IW), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64TLSU ++ o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64TLS ++ o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 59: // mov $dcon,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ v := c.vregoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(p.To.Reg)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) ++ o3 = OP_IR(c.opir(ALU32ID), uint32(v>>32), uint32(p.To.Reg)) ++ o4 = OP_12IRR(c.opirr(ALU52ID), uint32(v>>52), uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 60: // add $dcon,r1,r2 ++ v := c.vregoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_IR(c.opir(ALU32ID), uint32(v>>32), uint32(REGTMP)) ++ o4 = OP_12IRR(c.opirr(ALU52ID), uint32(v>>52), uint32(REGTMP), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o5 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 61: // word C_DCON ++ o1 = uint32(c.vregoff(&p.From)) ++ o2 = uint32(c.vregoff(&p.From) >> 32) ++ } ++ ++ out[0] = o1 ++ out[1] = o2 ++ out[2] = o3 ++ out[3] = o4 ++ out[4] = o5 ++} ++ ++func (c *ctxt0) vregoff(a *obj.Addr) int64 { ++ c.instoffset = 0 ++ c.aclass(a) ++ return c.instoffset ++} ++ ++func (c *ctxt0) regoff(a *obj.Addr) int32 { ++ return int32(c.vregoff(a)) ++} ++ ++func (c *ctxt0) oprrr(a obj.As) uint32 { ++ switch a { ++ case AADD: ++ return 0x20 << 15 ++ case AADDU: ++ return 0x20 << 15 ++ case ASGT: ++ return 0x24 << 15 // SLT ++ case ASGTU: ++ return 0x25 << 15 // SLTU ++ case AAND: ++ return 0x29 << 15 ++ case AOR: ++ return 0x2a << 15 ++ case AXOR: ++ return 0x2b << 15 ++ case ASUB: ++ return 0x22 << 15 ++ case ASUBU, ANEGW: ++ return 0x22 << 15 ++ case ANOR: ++ return 0x28 << 15 ++ case ASLL: ++ return 0x2e << 15 ++ case ASRL: ++ return 0x2f << 15 ++ case ASRA: ++ return 0x30 << 15 ++ case ASLLV: ++ return 0x31 << 15 ++ case ASRLV: ++ return 0x32 << 15 ++ case ASRAV: ++ return 0x33 << 15 ++ case AADDV: ++ return 0x21 << 15 ++ case AADDVU: ++ return 0x21 << 15 ++ case ASUBV: ++ return 0x23 << 15 ++ case ASUBVU, ANEGV: ++ return 0x23 << 15 ++ ++ case AMUL: ++ return 0x38 << 15 // mul.w ++ case AMULU: ++ return 0x38 << 15 // mul.w ++ case AMULH: ++ return 0x39 << 15 // mulh.w ++ case AMULHU: ++ return 0x3a << 15 // mulhu.w ++ case AMULV: ++ return 0x3b << 15 // mul.d ++ case AMULVU: ++ return 0x3b << 15 // mul.d ++ case AMULHV: ++ return 0x3c << 15 // mulh.d ++ case AMULHVU: ++ return 0x3d << 15 // mulhu.d ++ case ADIV: ++ return 0x40 << 15 // div.w ++ case ADIVU: ++ return 0x42 << 15 // div.wu ++ case ADIVV: ++ return 0x44 << 15 // div.d ++ case ADIVVU: ++ return 0x46 << 15 // div.du ++ case AREM: ++ return 0x41 << 15 // mod.w ++ case AREMU: ++ return 0x43 << 15 // mod.wu ++ case AREMV: ++ return 0x45 << 15 // mod.d ++ case AREMVU: ++ return 0x47 << 15 // mod.du ++ ++ case AJMP: ++ return 0x13 << 26 // jirl r0, rj, 0 ++ case AJAL: ++ return (0x13 << 26) | 1 // jirl r1, rj, 0 ++ ++ case ABREAK: ++ return 0x54 << 15 ++ case ASYSCALL: ++ return 0x56 << 15 ++ case ADIVF: ++ return 0x20d << 15 ++ case ADIVD: ++ return 0x20e << 15 ++ case AMULF: ++ return 0x209 << 15 ++ case AMULD: ++ return 0x20a << 15 ++ case ASUBF: ++ return 0x205 << 15 ++ case ASUBD: ++ return 0x206 << 15 ++ case AADDF: ++ return 0x201 << 15 ++ case AADDD: ++ return 0x202 << 15 ++ case ATRUNCFV: ++ return 0x46a9 << 10 ++ case ATRUNCDV: ++ return 0x46aa << 10 ++ case ATRUNCFW: ++ return 0x46a1 << 10 ++ case ATRUNCDW: ++ return 0x46a2 << 10 ++ case AMOVFV: ++ return 0x46c9 << 10 ++ case AMOVDV: ++ return 0x46ca << 10 ++ case AMOVVF: ++ return 0x4746 << 10 ++ case AMOVVD: ++ return 0x474a << 10 ++ case AMOVFW: ++ return 0x46c1 << 10 ++ case AMOVDW: ++ return 0x46c2 << 10 ++ case AMOVWF: ++ return 0x4744 << 10 ++ case AMOVDF: ++ return 0x4646 << 10 ++ case AMOVWD: ++ return 0x4748 << 10 ++ case AMOVFD: ++ return 0x4649 << 10 ++ case AABSF: ++ return 0x4501 << 10 ++ case AABSD: ++ return 0x4502 << 10 ++ case AMOVF: ++ return 0x4525 << 10 ++ case AMOVD: ++ return 0x4526 << 10 ++ case ANEGF: ++ return 0x4505 << 10 ++ case ANEGD: ++ return 0x4506 << 10 ++ case ACMPEQF: ++ return 0x0c1<<20 | 0x4<<15 // FCMP.CEQ.S ++ case ACMPEQD: ++ return 0x0c2<<20 | 0x4<<15 // FCMP.CEQ.D ++ case ACMPGED: ++ return 0x0c2<<20 | 0x7<<15 // FCMP.SLE.D ++ case ACMPGEF: ++ return 0x0c1<<20 | 0x7<<15 // FCMP.SLE.S ++ case ACMPGTD: ++ return 0x0c2<<20 | 0x3<<15 // FCMP.SLT.D ++ case ACMPGTF: ++ return 0x0c1<<20 | 0x3<<15 // FCMP.SLT.S ++ ++ case ASQRTF: ++ return 0x4511 << 10 ++ case ASQRTD: ++ return 0x4512 << 10 ++ ++ case ADBAR: ++ return 0x70e4 << 15 ++ case ANOOP: ++ // andi r0, r0, 0 ++ return 0x03400000 ++ } ++ ++ if a < 0 { ++ c.ctxt.Diag("bad rrr opcode -%v", -a) ++ } else { ++ c.ctxt.Diag("bad rrr opcode %v", a) ++ } ++ return 0 ++} ++ ++func (c *ctxt0) oprr(a obj.As) uint32 { ++ switch a { ++ case ACLO: ++ return 0x4 << 10 ++ case ACLZ: ++ return 0x5 << 10 ++ } ++ ++ c.ctxt.Diag("bad rr opcode %v", a) ++ return 0 ++} ++ ++func (c *ctxt0) opir(a obj.As) uint32 { ++ switch a { ++ case ALU12IW: ++ return 0x0a << 25 ++ case ALU32ID: ++ return 0x0b << 25 ++ case APCADDU12I: ++ return 0x0e << 25 ++ } ++ return 0 ++} ++ ++func (c *ctxt0) opirr(a obj.As) uint32 { ++ switch a { ++ case AADD, AADDU: ++ return 0x00a << 22 ++ case ASGT: ++ return 0x008 << 22 ++ case ASGTU: ++ return 0x009 << 22 ++ case AAND: ++ return 0x00d << 22 ++ case AOR: ++ return 0x00e << 22 ++ case ALU52ID: ++ return 0x00c << 22 ++ case AXOR: ++ return 0x00f << 22 ++ case ASLL: ++ return 0x00081 << 15 ++ case ASRL: ++ return 0x00089 << 15 ++ case ASRA: ++ return 0x00091 << 15 ++ case AADDV: ++ return 0x00b << 22 ++ case AADDVU: ++ return 0x00b << 22 ++ ++ case AJMP: ++ return 0x14 << 26 ++ case AJAL, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ return 0x15 << 26 ++ ++ case AJIRL: ++ return 0x13 << 26 ++ case ABLTU: ++ return 0x1a << 26 ++ case ABLT, ABLTZ, ABGTZ: ++ return 0x18 << 26 ++ case ABGEU: ++ return 0x1b << 26 ++ case ABGE, ABGEZ, ABLEZ: ++ return 0x19 << 26 ++ case ABEQ: ++ return 0x16 << 26 ++ case ABNE: ++ return 0x17 << 26 ++ case ABFPT: ++ return 0x12<<26 | 0x1<<8 ++ case ABFPF: ++ return 0x12<<26 | 0x0<<8 ++ ++ case AMOVB, ++ AMOVBU: ++ return 0x0a4 << 22 ++ case AMOVH, ++ AMOVHU: ++ return 0x0a5 << 22 ++ case AMOVW, ++ AMOVWU: ++ return 0x0a6 << 22 ++ case AMOVV: ++ return 0x0a7 << 22 ++ case AMOVF: ++ return 0x0ad << 22 ++ case AMOVD: ++ return 0x0af << 22 ++ case AMOVWL: ++ return 0x0bc << 22 ++ case AMOVWR: ++ return 0x0bd << 22 ++ case AMOVVL: ++ return 0x0be << 22 ++ case AMOVVR: ++ return 0x0bf << 22 ++ ++ case ABREAK: ++ return 0x018 << 22 ++ ++ case -AMOVWL: ++ return 0x0b8 << 22 ++ case -AMOVWR: ++ return 0x0b9 << 22 ++ case -AMOVVL: ++ return 0x0ba << 22 ++ case -AMOVVR: ++ return 0x0bb << 22 ++ case -AMOVB: ++ return 0x0a0 << 22 ++ case -AMOVBU: ++ return 0x0a8 << 22 ++ case -AMOVH: ++ return 0x0a1 << 22 ++ case -AMOVHU: ++ return 0x0a9 << 22 ++ case -AMOVW: ++ return 0x0a2 << 22 ++ case -AMOVWU: ++ return 0x0aa << 22 ++ case -AMOVV: ++ return 0x0a3 << 22 ++ case -AMOVF: ++ return 0x0ac << 22 ++ case -AMOVD: ++ return 0x0ae << 22 ++ ++ case ASLLV, ++ -ASLLV: ++ return 0x0041 << 16 ++ case ASRLV, ++ -ASRLV: ++ return 0x0045 << 16 ++ case ASRAV, ++ -ASRAV: ++ return 0x0049 << 16 ++ case -ALL: ++ return 0x020 << 24 ++ case -ALLV: ++ return 0x022 << 24 ++ case ASC: ++ return 0x021 << 24 ++ case ASCV: ++ return 0x023 << 24 ++ } ++ ++ if a < 0 { ++ c.ctxt.Diag("bad irr opcode -%v", -a) ++ } else { ++ c.ctxt.Diag("bad irr opcode %v", a) ++ } ++ return 0 ++} ++ ++func vshift(a obj.As) bool { ++ switch a { ++ case ASLLV, ++ ASRLV, ++ ASRAV: ++ return true ++ } ++ return false ++} +diff --git a/src/cmd/internal/obj/loong64/cnames.go b/src/cmd/internal/obj/loong64/cnames.go +new file mode 100644 +index 0000000000..d6d3091757 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/cnames.go +@@ -0,0 +1,43 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++var cnames0 = []string{ ++ "NONE", ++ "REG", ++ "FREG", ++ "FCREG", ++ "FCSRREG", ++ "FCCREG", ++ "ZCON", ++ "SCON", ++ "UCON", ++ "ADD0CON", ++ "AND0CON", ++ "ADDCON", ++ "ANDCON", ++ "LCON", ++ "DCON", ++ "SACON", ++ "SECON", ++ "LACON", ++ "LECON", ++ "DACON", ++ "STCON", ++ "SBRA", ++ "LBRA", ++ "SAUTO", ++ "LAUTO", ++ "SEXT", ++ "LEXT", ++ "ZOREG", ++ "SOREG", ++ "LOREG", ++ "GOK", ++ "ADDR", ++ "TLS", ++ "TEXTSIZE", ++ "NCLASS", ++} +diff --git a/src/cmd/internal/obj/loong64/list.go b/src/cmd/internal/obj/loong64/list.go +new file mode 100644 +index 0000000000..97ac659951 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/list.go +@@ -0,0 +1,46 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++ "fmt" ++) ++ ++func init() { ++ obj.RegisterRegister(obj.RBaseLOONG64, REG_LAST+1, rconv) ++ obj.RegisterOpcode(obj.ABaseLOONG64, Anames) ++} ++ ++func rconv(r int) string { ++ if r == 0 { ++ return "NONE" ++ } ++ if r == REGG { ++ // Special case. ++ return "g" ++ } ++ if REG_R0 <= r && r <= REG_R31 { ++ return fmt.Sprintf("R%d", r-REG_R0) ++ } ++ if REG_F0 <= r && r <= REG_F31 { ++ return fmt.Sprintf("F%d", r-REG_F0) ++ } ++ if REG_FCSR0 <= r && r <= REG_FCSR31 { ++ return fmt.Sprintf("FCSR%d", r-REG_FCSR0) ++ } ++ if REG_FCC0 <= r && r <= REG_FCC31 { ++ return fmt.Sprintf("FCC%d", r-REG_FCC0) ++ } ++ return fmt.Sprintf("Rgok(%d)", r-obj.RBaseLOONG64) ++} ++ ++func DRconv(a int) string { ++ s := "C_??" ++ if a >= C_NONE && a <= C_NCLASS { ++ s = cnames0[a] ++ } ++ return s ++} +diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go +new file mode 100644 +index 0000000000..36036e5cd5 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/obj.go +@@ -0,0 +1,625 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ ++ "math" ++) ++ ++func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ++ // Rewrite JMP/JAL to symbol as TYPE_BRANCH. ++ switch p.As { ++ case AJMP, ++ AJAL, ++ ARET, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ if p.To.Sym != nil { ++ p.To.Type = obj.TYPE_BRANCH ++ } ++ } ++ ++ // Rewrite float constants to values stored in memory. ++ switch p.As { ++ case AMOVF: ++ if p.From.Type == obj.TYPE_FCONST { ++ f32 := float32(p.From.Val.(float64)) ++ if math.Float32bits(f32) == 0 { ++ p.As = AMOVW ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGZERO ++ break ++ } ++ p.From.Type = obj.TYPE_MEM ++ p.From.Sym = ctxt.Float32Sym(f32) ++ p.From.Name = obj.NAME_EXTERN ++ p.From.Offset = 0 ++ } ++ ++ case AMOVD: ++ if p.From.Type == obj.TYPE_FCONST { ++ f64 := p.From.Val.(float64) ++ if math.Float64bits(f64) == 0 { ++ p.As = AMOVV ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGZERO ++ break ++ } ++ p.From.Type = obj.TYPE_MEM ++ p.From.Sym = ctxt.Float64Sym(f64) ++ p.From.Name = obj.NAME_EXTERN ++ p.From.Offset = 0 ++ } ++ } ++ ++ // Rewrite SUB constants into ADD. ++ switch p.As { ++ case ASUB: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADD ++ } ++ ++ case ASUBU: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADDU ++ } ++ ++ case ASUBV: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADDV ++ } ++ ++ case ASUBVU: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADDVU ++ } ++ } ++} ++ ++func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ++ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym} ++ ++ p := c.cursym.Func().Text ++ textstksiz := p.To.Offset ++ ++ if textstksiz < 0 { ++ c.ctxt.Diag("negative frame size %d - did you mean NOFRAME?", textstksiz) ++ } ++ if p.From.Sym.NoFrame() { ++ if textstksiz != 0 { ++ c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) ++ } ++ } ++ ++ c.cursym.Func().Args = p.To.Val.(int32) ++ c.cursym.Func().Locals = int32(textstksiz) ++ ++ /* ++ * find leaf subroutines ++ * expand RET ++ */ ++ ++ for p := c.cursym.Func().Text; p != nil; p = p.Link { ++ switch p.As { ++ case obj.ATEXT: ++ p.Mark |= LABEL | LEAF | SYNC ++ if p.Link != nil { ++ p.Link.Mark |= LABEL ++ } ++ ++ case AMOVW, ++ AMOVV: ++ if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL { ++ p.Mark |= LABEL | SYNC ++ break ++ } ++ if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL { ++ p.Mark |= LABEL | SYNC ++ } ++ ++ case ASYSCALL, ++ AWORD: ++ p.Mark |= LABEL | SYNC ++ ++ case ANOR: ++ if p.To.Type == obj.TYPE_REG { ++ if p.To.Reg == REGZERO { ++ p.Mark |= LABEL | SYNC ++ } ++ } ++ ++ case AJAL, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ c.cursym.Func().Text.Mark &^= LEAF ++ fallthrough ++ ++ case AJMP, ++ ABEQ, ++ ABGEU, ++ ABLTU, ++ ABLTZ, ++ ABNE, ++ ABFPT, ABFPF: ++ p.Mark |= BRANCH ++ q1 := p.To.Target() ++ if q1 != nil { ++ for q1.As == obj.ANOP { ++ q1 = q1.Link ++ p.To.SetTarget(q1) ++ } ++ ++ if q1.Mark&LEAF == 0 { ++ q1.Mark |= LABEL ++ } ++ } ++ q1 = p.Link ++ if q1 != nil { ++ q1.Mark |= LABEL ++ } ++ ++ case ARET: ++ if p.Link != nil { ++ p.Link.Mark |= LABEL ++ } ++ } ++ } ++ ++ var mov, add obj.As ++ ++ add = AADDV ++ mov = AMOVV ++ ++ var q *obj.Prog ++ var q1 *obj.Prog ++ autosize := int32(0) ++ var p1 *obj.Prog ++ var p2 *obj.Prog ++ for p := c.cursym.Func().Text; p != nil; p = p.Link { ++ o := p.As ++ switch o { ++ case obj.ATEXT: ++ autosize = int32(textstksiz) ++ ++ if p.Mark&LEAF != 0 && autosize == 0 { ++ // A leaf function with no locals has no frame. ++ p.From.Sym.Set(obj.AttrNoFrame, true) ++ } ++ ++ if !p.From.Sym.NoFrame() { ++ // If there is a stack frame at all, it includes ++ // space to save the LR. ++ autosize += int32(c.ctxt.FixedFrameSize()) ++ } ++ ++ if autosize&4 != 0 { ++ autosize += 4 ++ } ++ ++ if autosize == 0 && c.cursym.Func().Text.Mark&LEAF == 0 { ++ if c.cursym.Func().Text.From.Sym.NoSplit() { ++ if ctxt.Debugvlog { ++ ctxt.Logf("save suppressed in: %s\n", c.cursym.Name) ++ } ++ ++ c.cursym.Func().Text.Mark |= LEAF ++ } ++ } ++ ++ p.To.Offset = int64(autosize) - ctxt.FixedFrameSize() ++ ++ if c.cursym.Func().Text.Mark&LEAF != 0 { ++ c.cursym.Set(obj.AttrLeaf, true) ++ if p.From.Sym.NoFrame() { ++ break ++ } ++ } ++ ++ if !p.From.Sym.NoSplit() { ++ p = c.stacksplit(p, autosize) // emit split check ++ } ++ ++ q = p ++ ++ if autosize != 0 { ++ // Make sure to save link register for non-empty frame, even if ++ // it is a leaf function, so that traceback works. ++ // Store link register before decrement SP, so if a signal comes ++ // during the execution of the function prologue, the traceback ++ // code will not see a half-updated stack frame. ++ // This sequence is not async preemptible, as if we open a frame ++ // at the current SP, it will clobber the saved LR. ++ q = c.ctxt.StartUnsafePoint(q, c.newprog) ++ ++ q = obj.Appendp(q, newprog) ++ q.As = mov ++ q.Pos = p.Pos ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REGLINK ++ q.To.Type = obj.TYPE_MEM ++ q.To.Offset = int64(-autosize) ++ q.To.Reg = REGSP ++ ++ q = obj.Appendp(q, newprog) ++ q.As = add ++ q.Pos = p.Pos ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = int64(-autosize) ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REGSP ++ q.Spadj = +autosize ++ ++ q = c.ctxt.EndUnsafePoint(q, c.newprog, -1) ++ } ++ ++ if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 { ++ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame ++ // ++ // MOV g_panic(g), R1 ++ // BEQ R1, end ++ // MOV panic_argp(R1), R2 ++ // ADD $(autosize+FIXED_FRAME), R29, R3 ++ // BNE R2, R3, end ++ // ADD $FIXED_FRAME, R29, R2 ++ // MOV R2, panic_argp(R1) ++ // end: ++ // NOP ++ // ++ // The NOP is needed to give the jumps somewhere to land. ++ // It is a liblink NOP, not an hardware NOP: it encodes to 0 instruction bytes. ++ // ++ // We don't generate this for leafs because that means the wrapped ++ // function was inlined into the wrapper. ++ ++ q = obj.Appendp(q, newprog) ++ ++ q.As = mov ++ q.From.Type = obj.TYPE_MEM ++ q.From.Reg = REGG ++ q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R19 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = ABEQ ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REG_R19 ++ q.To.Type = obj.TYPE_BRANCH ++ q.Mark |= BRANCH ++ p1 = q ++ ++ q = obj.Appendp(q, newprog) ++ q.As = mov ++ q.From.Type = obj.TYPE_MEM ++ q.From.Reg = REG_R19 ++ q.From.Offset = 0 // Panic.argp ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R4 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = add ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() ++ q.Reg = REGSP ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R5 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = ABNE ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REG_R4 ++ q.Reg = REG_R5 ++ q.To.Type = obj.TYPE_BRANCH ++ q.Mark |= BRANCH ++ p2 = q ++ ++ q = obj.Appendp(q, newprog) ++ q.As = add ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = ctxt.FixedFrameSize() ++ q.Reg = REGSP ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R4 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = mov ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REG_R4 ++ q.To.Type = obj.TYPE_MEM ++ q.To.Reg = REG_R19 ++ q.To.Offset = 0 // Panic.argp ++ ++ q = obj.Appendp(q, newprog) ++ ++ q.As = obj.ANOP ++ p1.To.SetTarget(q) ++ p2.To.SetTarget(q) ++ } ++ ++ case ARET: ++ if p.From.Type == obj.TYPE_CONST { ++ ctxt.Diag("using BECOME (%v) is not supported!", p) ++ break ++ } ++ ++ retSym := p.To.Sym ++ p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction ++ p.To.Sym = nil ++ ++ if c.cursym.Func().Text.Mark&LEAF != 0 { ++ if autosize == 0 { ++ p.As = AJMP ++ p.From = obj.Addr{} ++ if retSym != nil { // retjmp ++ p.To.Type = obj.TYPE_BRANCH ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = retSym ++ } else { ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = REGLINK ++ p.To.Offset = 0 ++ } ++ p.Mark |= BRANCH ++ break ++ } ++ ++ p.As = add ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = int64(autosize) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REGSP ++ p.Spadj = -autosize ++ ++ q = c.newprog() ++ q.As = AJMP ++ q.Pos = p.Pos ++ if retSym != nil { // retjmp ++ q.To.Type = obj.TYPE_BRANCH ++ q.To.Name = obj.NAME_EXTERN ++ q.To.Sym = retSym ++ } else { ++ q.To.Type = obj.TYPE_MEM ++ q.To.Offset = 0 ++ q.To.Reg = REGLINK ++ } ++ q.Mark |= BRANCH ++ q.Spadj = +autosize ++ ++ q.Link = p.Link ++ p.Link = q ++ break ++ } ++ ++ p.As = mov ++ p.From.Type = obj.TYPE_MEM ++ p.From.Offset = 0 ++ p.From.Reg = REGSP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REGLINK ++ ++ if autosize != 0 { ++ q = c.newprog() ++ q.As = add ++ q.Pos = p.Pos ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = int64(autosize) ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REGSP ++ q.Spadj = -autosize ++ ++ q.Link = p.Link ++ p.Link = q ++ } ++ ++ q1 = c.newprog() ++ q1.As = AJMP ++ q1.Pos = p.Pos ++ if retSym != nil { // retjmp ++ q1.To.Type = obj.TYPE_BRANCH ++ q1.To.Name = obj.NAME_EXTERN ++ q1.To.Sym = retSym ++ } else { ++ q1.To.Type = obj.TYPE_MEM ++ q1.To.Offset = 0 ++ q1.To.Reg = REGLINK ++ } ++ q1.Mark |= BRANCH ++ q1.Spadj = +autosize ++ ++ q1.Link = q.Link ++ q.Link = q1 ++ ++ case AADD, ++ AADDU, ++ AADDV, ++ AADDVU: ++ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { ++ p.Spadj = int32(-p.From.Offset) ++ } ++ ++ case obj.AGETCALLERPC: ++ if cursym.Leaf() { ++ // MOV LR, Rd ++ p.As = mov ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGLINK ++ } else { ++ // MOV (RSP), Rd ++ p.As = mov ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = REGSP ++ } ++ } ++ } ++} ++ ++func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { ++ var mov, add obj.As ++ ++ add = AADDV ++ mov = AMOVV ++ ++ // MOV g_stackguard(g), R19 ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = mov ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = REGG ++ p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 ++ if c.cursym.CFunc() { ++ p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 ++ } ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R19 ++ ++ // Mark the stack bound check and morestack call async nonpreemptible. ++ // If we get preempted here, when resumed the preemption request is ++ // cleared, but we'll still call morestack, which will double the stack ++ // unnecessarily. See issue #35470. ++ p = c.ctxt.StartUnsafePoint(p, c.newprog) ++ ++ var q *obj.Prog ++ if framesize <= objabi.StackSmall { ++ // small stack: SP < stackguard ++ // AGTU SP, stackguard, R19 ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = ASGTU ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGSP ++ p.Reg = REG_R19 ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R19 ++ } else { ++ // large stack: SP-framesize < stackguard-StackSmall ++ offset := int64(framesize) - objabi.StackSmall ++ if framesize > objabi.StackBig { ++ // Such a large stack we need to protect against underflow. ++ // The runtime guarantees SP > objabi.StackBig, but ++ // framesize is large enough that SP-framesize may ++ // underflow, causing a direct comparison with the ++ // stack guard to incorrectly succeed. We explicitly ++ // guard against underflow. ++ // ++ // SGTU $(framesize-StackSmall), SP, R4 ++ // BNE R4, label-of-call-to-morestack ++ ++ p = obj.Appendp(p, c.newprog) ++ p.As = ASGTU ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = offset ++ p.Reg = REGSP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R4 ++ ++ p = obj.Appendp(p, c.newprog) ++ q = p ++ p.As = ABNE ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REG_R4 ++ p.To.Type = obj.TYPE_BRANCH ++ p.Mark |= BRANCH ++ } ++ ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = add ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = -offset ++ p.Reg = REGSP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R4 ++ ++ p = obj.Appendp(p, c.newprog) ++ p.As = ASGTU ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REG_R4 ++ p.Reg = REG_R19 ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R19 ++ } ++ ++ // q1: BNE R19, done ++ p = obj.Appendp(p, c.newprog) ++ q1 := p ++ ++ p.As = ABNE ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REG_R19 ++ p.To.Type = obj.TYPE_BRANCH ++ p.Mark |= BRANCH ++ ++ // MOV LINK, R5 ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = mov ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGLINK ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R5 ++ if q != nil { ++ q.To.SetTarget(p) ++ p.Mark |= LABEL ++ } ++ ++ p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) ++ ++ // JAL runtime.morestack(SB) ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = AJAL ++ p.To.Type = obj.TYPE_BRANCH ++ if c.cursym.CFunc() { ++ p.To.Sym = c.ctxt.Lookup("runtime.morestackc") ++ } else if !c.cursym.Func().Text.From.Sym.NeedCtxt() { ++ p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt") ++ } else { ++ p.To.Sym = c.ctxt.Lookup("runtime.morestack") ++ } ++ p.Mark |= BRANCH ++ ++ p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) ++ ++ // JMP start ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = AJMP ++ p.To.Type = obj.TYPE_BRANCH ++ p.To.SetTarget(c.cursym.Func().Text.Link) ++ p.Mark |= BRANCH ++ ++ // placeholder for q1's jump target ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = obj.ANOP // zero-width place holder ++ q1.To.SetTarget(p) ++ ++ return p ++} ++ ++func (c *ctxt0) addnop(p *obj.Prog) { ++ q := c.newprog() ++ q.As = ANOOP ++ q.Pos = p.Pos ++ q.Link = p.Link ++ p.Link = q ++} ++ ++var Linkloong64 = obj.LinkArch{ ++ Arch: sys.ArchLoong64, ++ Init: buildop, ++ Preprocess: preprocess, ++ Assemble: span0, ++ Progedit: progedit, ++ DWARFRegisters: LOONG64DWARFRegisters, ++} +diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go +index e8441a6969..9f8606e28d 100644 +--- a/src/cmd/internal/obj/util.go ++++ b/src/cmd/internal/obj/util.go +@@ -499,15 +499,16 @@ var regSpace []regSet + const ( + // Because of masking operations in the encodings, each register + // space should start at 0 modulo some power of 2. +- RBase386 = 1 * 1024 +- RBaseAMD64 = 2 * 1024 +- RBaseARM = 3 * 1024 +- RBasePPC64 = 4 * 1024 // range [4k, 8k) +- RBaseARM64 = 8 * 1024 // range [8k, 13k) +- RBaseMIPS = 13 * 1024 // range [13k, 14k) +- RBaseS390X = 14 * 1024 // range [14k, 15k) +- RBaseRISCV = 15 * 1024 // range [15k, 16k) +- RBaseWasm = 16 * 1024 ++ RBase386 = 1 * 1024 ++ RBaseAMD64 = 2 * 1024 ++ RBaseARM = 3 * 1024 ++ RBasePPC64 = 4 * 1024 // range [4k, 8k) ++ RBaseARM64 = 8 * 1024 // range [8k, 13k) ++ RBaseMIPS = 13 * 1024 // range [13k, 14k) ++ RBaseS390X = 14 * 1024 // range [14k, 15k) ++ RBaseRISCV = 15 * 1024 // range [15k, 16k) ++ RBaseWasm = 16 * 1024 ++ RBaseLOONG64 = 17 * 1024 + ) + + // RegisterRegister binds a pretty-printer (Rconv) for register +-- +2.38.0 + diff --git a/loongarch64/0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch b/loongarch64/0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch new file mode 100644 index 0000000..232db68 --- /dev/null +++ b/loongarch64/0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch @@ -0,0 +1,812 @@ +From b65bebe93d3e5d0fc5449079af0eb293f45af7ac Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:25:14 +0800 +Subject: [PATCH 15/82] cmd/asm/internal: helper function and end-to-end test + for assembler + +Change-Id: I93d8be36e44e516df70b25e20d9c0695a05510d1 +--- + src/cmd/asm/internal/arch/arch.go | 56 +++++ + src/cmd/asm/internal/arch/loong64.go | 67 ++++++ + src/cmd/asm/internal/asm/asm.go | 18 ++ + src/cmd/asm/internal/asm/endtoend_test.go | 7 + + src/cmd/asm/internal/asm/operand_test.go | 88 ++++++++ + src/cmd/asm/internal/asm/testdata/loong64.s | 11 + + .../asm/internal/asm/testdata/loong64enc1.s | 209 ++++++++++++++++++ + .../asm/internal/asm/testdata/loong64enc2.s | 82 +++++++ + .../asm/internal/asm/testdata/loong64enc3.s | 131 +++++++++++ + 9 files changed, 669 insertions(+) + create mode 100644 src/cmd/asm/internal/arch/loong64.go + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64.s + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64enc1.s + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64enc2.s + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64enc3.s + +diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go +index 026d8abf81..95afa1dac3 100644 +--- a/src/cmd/asm/internal/arch/arch.go ++++ b/src/cmd/asm/internal/arch/arch.go +@@ -9,6 +9,7 @@ import ( + "cmd/internal/obj" + "cmd/internal/obj/arm" + "cmd/internal/obj/arm64" ++ "cmd/internal/obj/loong64" + "cmd/internal/obj/mips" + "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" +@@ -60,6 +61,8 @@ func Set(GOARCH string) *Arch { + return archArm() + case "arm64": + return archArm64() ++ case "loong64": ++ return archLoong64(&loong64.Linkloong64) + case "mips": + return archMips(&mips.Linkmips) + case "mipsle": +@@ -534,6 +537,59 @@ func archMips64(linkArch *obj.LinkArch) *Arch { + } + } + ++func archLoong64(linkArch *obj.LinkArch) *Arch { ++ register := make(map[string]int16) ++ // Create maps for easy lookup of instruction names etc. ++ // Note that there is no list of names as there is for x86. ++ for i := loong64.REG_R0; i <= loong64.REG_R31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ for i := loong64.REG_F0; i <= loong64.REG_F31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ for i := loong64.REG_FCSR0; i <= loong64.REG_FCSR31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ for i := loong64.REG_FCC0; i <= loong64.REG_FCC31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ // Pseudo-registers. ++ register["SB"] = RSB ++ register["FP"] = RFP ++ register["PC"] = RPC ++ // Avoid unintentionally clobbering g using R22. ++ delete(register, "R22") ++ register["g"] = loong64.REG_R22 ++ register["RSB"] = loong64.REG_R31 ++ registerPrefix := map[string]bool{ ++ "F": true, ++ "FCSR": true, ++ "FCC": true, ++ "R": true, ++ } ++ ++ instructions := make(map[string]obj.As) ++ for i, s := range obj.Anames { ++ instructions[s] = obj.As(i) ++ } ++ for i, s := range loong64.Anames { ++ if obj.As(i) >= obj.A_ARCHSPECIFIC { ++ instructions[s] = obj.As(i) + obj.ABaseLOONG64 ++ } ++ } ++ // Annoying alias. ++ instructions["JAL"] = loong64.AJAL ++ ++ return &Arch{ ++ LinkArch: linkArch, ++ Instructions: instructions, ++ Register: register, ++ RegisterPrefix: registerPrefix, ++ RegisterNumber: loong64RegisterNumber, ++ IsJump: jumpLOONG64, ++ } ++} ++ + func archRISCV64() *Arch { + register := make(map[string]int16) + +diff --git a/src/cmd/asm/internal/arch/loong64.go b/src/cmd/asm/internal/arch/loong64.go +new file mode 100644 +index 0000000000..e1fae1f2d7 +--- /dev/null ++++ b/src/cmd/asm/internal/arch/loong64.go +@@ -0,0 +1,67 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// This file encapsulates some of the odd characteristics of the ++// LOONG64 (LOONG64) instruction set, to minimize its interaction ++// with the core of the assembler. ++ ++package arch ++ ++import ( ++ "cmd/internal/obj" ++ "cmd/internal/obj/loong64" ++) ++ ++func jumpLOONG64(word string) bool { ++ switch word { ++ case "BEQ", "BFPF", "BFPT", "BLTZ", "BGEZ", "BLEZ", "BGTZ", "BLT", "BLTU", "JIRL", "BNE", "BGE", "BGEU", "JMP", "JAL", "CALL": ++ return true ++ } ++ return false ++} ++ ++// IsLOONG64CMP reports whether the op (as defined by an loong64.A* constant) is ++// one of the CMP instructions that require special handling. ++func IsLOONG64CMP(op obj.As) bool { ++ switch op { ++ case loong64.ACMPEQF, loong64.ACMPEQD, loong64.ACMPGEF, loong64.ACMPGED, ++ loong64.ACMPGTF, loong64.ACMPGTD: ++ return true ++ } ++ return false ++} ++ ++// IsLOONG64MUL reports whether the op (as defined by an loong64.A* constant) is ++// one of the MUL/DIV/REM instructions that require special handling. ++func IsLOONG64MUL(op obj.As) bool { ++ switch op { ++ case loong64.AMUL, loong64.AMULU, loong64.AMULV, loong64.AMULVU, ++ loong64.ADIV, loong64.ADIVU, loong64.ADIVV, loong64.ADIVVU, ++ loong64.AREM, loong64.AREMU, loong64.AREMV, loong64.AREMVU: ++ return true ++ } ++ return false ++} ++ ++func loong64RegisterNumber(name string, n int16) (int16, bool) { ++ switch name { ++ case "F": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_F0 + n, true ++ } ++ case "FCSR": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_FCSR0 + n, true ++ } ++ case "FCC": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_FCC0 + n, true ++ } ++ case "R": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_R0 + n, true ++ } ++ } ++ return 0, false ++} +diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go +index cf0d1550f9..1397fa87f6 100644 +--- a/src/cmd/asm/internal/asm/asm.go ++++ b/src/cmd/asm/internal/asm/asm.go +@@ -433,6 +433,14 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } ++ if p.arch.Family == sys.Loong64 { ++ // 3-operand jumps. ++ // First two must be registers ++ target = &a[2] ++ prog.From = a[0] ++ prog.Reg = p.getRegister(prog, op, &a[1]) ++ break ++ } + if p.arch.Family == sys.S390X { + // 3-operand jumps. + target = &a[2] +@@ -593,6 +601,12 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } ++ } else if p.arch.Family == sys.Loong64 { ++ if arch.IsLOONG64CMP(op) { ++ prog.From = a[0] ++ prog.Reg = p.getRegister(prog, op, &a[1]) ++ break ++ } + } + prog.From = a[0] + prog.To = a[1] +@@ -602,6 +616,10 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] ++ case sys.Loong64: ++ prog.From = a[0] ++ prog.Reg = p.getRegister(prog, op, &a[1]) ++ prog.To = a[2] + case sys.ARM: + // Special cases. + if arch.IsARMSTREX(op) { +diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go +index ead8b27b01..33a4465af3 100644 +--- a/src/cmd/asm/internal/asm/endtoend_test.go ++++ b/src/cmd/asm/internal/asm/endtoend_test.go +@@ -447,6 +447,13 @@ func TestMIPSEndToEnd(t *testing.T) { + testEndToEnd(t, "mips64", "mips64") + } + ++func TestLOONG64Encoder(t *testing.T) { ++ testEndToEnd(t, "loong64", "loong64enc1") ++ testEndToEnd(t, "loong64", "loong64enc2") ++ testEndToEnd(t, "loong64", "loong64enc3") ++ testEndToEnd(t, "loong64", "loong64") ++} ++ + func TestPPC64EndToEnd(t *testing.T) { + testEndToEnd(t, "ppc64", "ppc64") + } +diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go +index 8ef02b1a0e..f8c9571378 100644 +--- a/src/cmd/asm/internal/asm/operand_test.go ++++ b/src/cmd/asm/internal/asm/operand_test.go +@@ -125,6 +125,11 @@ func TestMIPS64OperandParser(t *testing.T) { + testOperandParser(t, parser, mips64OperandTests) + } + ++func TestLOONG64OperandParser(t *testing.T) { ++ parser := newParser("loong64") ++ testOperandParser(t, parser, loong64OperandTests) ++} ++ + func TestS390XOperandParser(t *testing.T) { + parser := newParser("s390x") + testOperandParser(t, parser, s390xOperandTests) +@@ -143,6 +148,7 @@ func TestFuncAddress(t *testing.T) { + {"ppc64", ppc64OperandTests}, + {"mips", mipsOperandTests}, + {"mips64", mips64OperandTests}, ++ {"loong64", loong64OperandTests}, + {"s390x", s390xOperandTests}, + } { + t.Run(sub.arch, func(t *testing.T) { +@@ -845,6 +851,88 @@ var mipsOperandTests = []operandTest{ + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. + } + ++var loong64OperandTests = []operandTest{ ++ {"$((1<<63)-1)", "$9223372036854775807"}, ++ {"$(-64*1024)", "$-65536"}, ++ {"$(1024 * 8)", "$8192"}, ++ {"$-1", "$-1"}, ++ {"$-24(R4)", "$-24(R4)"}, ++ {"$0", "$0"}, ++ {"$0(R1)", "$(R1)"}, ++ {"$0.5", "$(0.5)"}, ++ {"$0x7000", "$28672"}, ++ {"$0x88888eef", "$2290650863"}, ++ {"$1", "$1"}, ++ {"$_main<>(SB)", "$_main<>(SB)"}, ++ {"$argframe(FP)", "$argframe(FP)"}, ++ {"$~3", "$-4"}, ++ {"(-288-3*8)(R1)", "-312(R1)"}, ++ {"(16)(R7)", "16(R7)"}, ++ {"(8)(g)", "8(g)"}, ++ {"(R0)", "(R0)"}, ++ {"(R3)", "(R3)"}, ++ {"(R4)", "(R4)"}, ++ {"(R5)", "(R5)"}, ++ {"-1(R4)", "-1(R4)"}, ++ {"-1(R5)", "-1(R5)"}, ++ {"6(PC)", "6(PC)"}, ++ {"F14", "F14"}, ++ {"F15", "F15"}, ++ {"F16", "F16"}, ++ {"F17", "F17"}, ++ {"F18", "F18"}, ++ {"F19", "F19"}, ++ {"F20", "F20"}, ++ {"F21", "F21"}, ++ {"F22", "F22"}, ++ {"F23", "F23"}, ++ {"F24", "F24"}, ++ {"F25", "F25"}, ++ {"F26", "F26"}, ++ {"F27", "F27"}, ++ {"F28", "F28"}, ++ {"F29", "F29"}, ++ {"F30", "F30"}, ++ {"F31", "F31"}, ++ {"R0", "R0"}, ++ {"R1", "R1"}, ++ {"R11", "R11"}, ++ {"R12", "R12"}, ++ {"R13", "R13"}, ++ {"R14", "R14"}, ++ {"R15", "R15"}, ++ {"R16", "R16"}, ++ {"R17", "R17"}, ++ {"R18", "R18"}, ++ {"R19", "R19"}, ++ {"R2", "R2"}, ++ {"R20", "R20"}, ++ {"R21", "R21"}, ++ {"R23", "R23"}, ++ {"R24", "R24"}, ++ {"R25", "R25"}, ++ {"R26", "R26"}, ++ {"R27", "R27"}, ++ {"R28", "R28"}, ++ {"R29", "R29"}, ++ {"R30", "R30"}, ++ {"R3", "R3"}, ++ {"R4", "R4"}, ++ {"R5", "R5"}, ++ {"R6", "R6"}, ++ {"R7", "R7"}, ++ {"R8", "R8"}, ++ {"R9", "R9"}, ++ {"a(FP)", "a(FP)"}, ++ {"g", "g"}, ++ {"RSB", "R31"}, ++ {"ret+8(FP)", "ret+8(FP)"}, ++ {"runtime·abort(SB)", "runtime.abort(SB)"}, ++ {"·AddUint32(SB)", "\"\".AddUint32(SB)"}, ++ {"·trunc(SB)", "\"\".trunc(SB)"}, ++ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. ++} ++ + var s390xOperandTests = []operandTest{ + {"$((1<<63)-1)", "$9223372036854775807"}, + {"$(-64*1024)", "$-65536"}, +diff --git a/src/cmd/asm/internal/asm/testdata/loong64.s b/src/cmd/asm/internal/asm/testdata/loong64.s +new file mode 100644 +index 0000000000..a4bf9ec103 +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64.s +@@ -0,0 +1,11 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++//TODO ++ ++TEXT foo(SB),DUPOK|NOSPLIT,$0 ++ JAL 1(PC) //CALL 1(PC) //000c0054 ++ JAL (R4) //CALL (R4) //8100004c ++ JAL foo(SB) //CALL foo(SB) //00100054 +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +new file mode 100644 +index 0000000000..c724cf97f5 +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +@@ -0,0 +1,209 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++ ++TEXT asmtest(SB),DUPOK|NOSPLIT,$0 ++lable1: ++ BFPT 1(PC) // 00050048 ++ BFPT lable1 // BFPT 2 //1ffdff4b ++ ++lable2: ++ BFPF 1(PC) // 00040048 ++ BFPF lable2 // BFPF 4 // 1ffcff4b ++ ++ JMP foo(SB) // 00100050 ++ JMP (R4) // 8000004c ++ JMP 1(PC) // 00040058 ++ MOVW $65536, R4 // 04020014 ++ MOVW $4096, R4 // 24000014 ++ MOVV $65536, R4 // 04020014 ++ MOVV $4096, R4 // 24000014 ++ MOVW R4, R5 // 85001700 ++ MOVV R4, R5 // 85001500 ++ MOVBU R4, R5 // 85fc4303 ++ SUB R4, R5, R6 // a6101100 ++ SUBV R4, R5, R6 // a6901100 ++ ADD R4, R5, R6 // a6101000 ++ ADDV R4, R5, R6 // a6901000 ++ AND R4, R5, R6 // a6901400 ++ SUB R4, R5 // a5101100 ++ SUBV R4, R5 // a5901100 ++ ADD R4, R5 // a5101000 ++ ADDV R4, R5 // a5901000 ++ AND R4, R5 // a5901400 ++ NEGW R4, R5 // 05101100 ++ NEGV R4, R5 // 05901100 ++ SLL R4, R5 // a5101700 ++ SLL R4, R5, R6 // a6101700 ++ SRL R4, R5 // a5901700 ++ SRL R4, R5, R6 // a6901700 ++ SRA R4, R5 // a5101800 ++ SRA R4, R5, R6 // a6101800 ++ SLLV R4, R5 // a5901800 ++ SLLV R4, R5, R6 // a6901800 ++ CLO R4, R5 // 85100000 ++ CLZ R4, R5 // 85140000 ++ ADDF F4, F5 // a5900001 ++ ADDF F4, R5, F6 // a6900001 ++ CMPEQF F4, R5 // a010120c ++ ABSF F4, F5 // 85041401 ++ MOVVF F4, F5 // 85181d01 ++ MOVF F4, F5 // 85941401 ++ MOVD F4, F5 // 85981401 ++ MOVW R4, result+16(FP) // 64608029 ++ MOVWU R4, result+16(FP) // 64608029 ++ MOVV R4, result+16(FP) // 6460c029 ++ MOVB R4, result+16(FP) // 64600029 ++ MOVBU R4, result+16(FP) // 64600029 ++ MOVWL R4, result+16(FP) // 6460002f ++ MOVVL R4, result+16(FP) // 6460802f ++ MOVW R4, 1(R5) // a4048029 ++ MOVWU R4, 1(R5) // a4048029 ++ MOVV R4, 1(R5) // a404c029 ++ MOVB R4, 1(R5) // a4040029 ++ MOVBU R4, 1(R5) // a4040029 ++ MOVWL R4, 1(R5) // a404002f ++ MOVVL R4, 1(R5) // a404802f ++ SC R4, 1(R5) // a4040021 ++ SCV R4, 1(R5) // a4040023 ++ MOVW y+8(FP), R4 // 64408028 ++ MOVWU y+8(FP), R4 // 6440802a ++ MOVV y+8(FP), R4 // 6440c028 ++ MOVB y+8(FP), R4 // 64400028 ++ MOVBU y+8(FP), R4 // 6440002a ++ MOVWL y+8(FP), R4 // 6440002e ++ MOVVL y+8(FP), R4 // 6440802e ++ MOVW 1(R5), R4 // a4048028 ++ MOVWU 1(R5), R4 // a404802a ++ MOVV 1(R5), R4 // a404c028 ++ MOVB 1(R5), R4 // a4040028 ++ MOVBU 1(R5), R4 // a404002a ++ MOVWL 1(R5), R4 // a404002e ++ MOVVL 1(R5), R4 // a404802e ++ LL 1(R5), R4 // a4040020 ++ LLV 1(R5), R4 // a4040022 ++ MOVW $4(R4), R5 // 8510c002 ++ MOVV $4(R4), R5 // 8510c002 ++ MOVW $-1, R4 // 04fcff02 ++ MOVV $-1, R4 // 04fcff02 ++ MOVW $1, R4 // 0404c002 ++ MOVV $1, R4 // 0404c002 ++ ADD $-1, R4, R5 // 85fcbf02 ++ ADD $-1, R4 // 84fcbf02 ++ ADDV $-1, R4, R5 // 85fcff02 ++ ADDV $-1, R4 // 84fcff02 ++ AND $1, R4, R5 // 85044003 ++ AND $1, R4 // 84044003 ++ SLL $4, R4, R5 // 85904000 ++ SLL $4, R4 // 84904000 ++ SRL $4, R4, R5 // 85904400 ++ SRL $4, R4 // 84904400 ++ SRA $4, R4, R5 // 85904800 ++ SRA $4, R4 // 84904800 ++ SLLV $4, R4, R5 // 85104100 ++ SLLV $4, R4 // 84104100 ++ SYSCALL // 00002b00 ++ BEQ R4, R5, 1(PC) // 85040058 ++ BEQ R4, 1(PC) // 80040058 ++ BLTU R4, 1(PC) // 80040068 ++ MOVW y+8(FP), F4 // 6440002b ++ MOVF y+8(FP), F4 // 6440002b ++ MOVD y+8(FP), F4 // 6440802b ++ MOVW 1(F5), F4 // a404002b ++ MOVF 1(F5), F4 // a404002b ++ MOVD 1(F5), F4 // a404802b ++ MOVW F4, result+16(FP) // 6460402b ++ MOVF F4, result+16(FP) // 6460402b ++ MOVD F4, result+16(FP) // 6460c02b ++ MOVW F4, 1(F5) // a404402b ++ MOVF F4, 1(F5) // a404402b ++ MOVD F4, 1(F5) // a404c02b ++ MOVW R4, F5 // 85a41401 ++ MOVW F4, R5 // 85b41401 ++ MOVV R4, F5 // 85a81401 ++ MOVV F4, R5 // 85b81401 ++ WORD $74565 // 45230100 ++ BREAK R4, result+16(FP) // 64600006 ++ BREAK R4, 1(R5) // a4040006 ++ BREAK // 00002a00 ++ UNDEF // 00002a00 ++ ++ // mul ++ MUL R4, R5 // a5101c00 ++ MUL R4, R5, R6 // a6101c00 ++ MULV R4, R5 // a5901d00 ++ MULV R4, R5, R6 // a6901d00 ++ MULVU R4, R5 // a5901d00 ++ MULVU R4, R5, R6 // a6901d00 ++ MULHV R4, R5 // a5101e00 ++ MULHV R4, R5, R6 // a6101e00 ++ MULHVU R4, R5 // a5901e00 ++ MULHVU R4, R5, R6 // a6901e00 ++ REMV R4, R5 // a5902200 ++ REMV R4, R5, R6 // a6902200 ++ REMVU R4, R5 // a5902300 ++ REMVU R4, R5, R6 // a6902300 ++ DIVV R4, R5 // a5102200 ++ DIVV R4, R5, R6 // a6102200 ++ DIVVU R4, R5 // a5102300 ++ DIVVU R4, R5, R6 // a6102300 ++ ++ MOVH R4, result+16(FP) // 64604029 ++ MOVH R4, 1(R5) // a4044029 ++ MOVH y+8(FP), R4 // 64404028 ++ MOVH 1(R5), R4 // a4044028 ++ MOVHU R4, R5 // 8500cf00 ++ MOVHU R4, result+16(FP) // 64604029 ++ MOVHU R4, 1(R5) // a4044029 ++ MOVHU y+8(FP), R4 // 6440402a ++ MOVHU 1(R5), R4 // a404402a ++ MULU R4, R5 // a5101c00 ++ MULU R4, R5, R6 // a6101c00 ++ MULH R4, R5 // a5901c00 ++ MULH R4, R5, R6 // a6901c00 ++ MULHU R4, R5 // a5101d00 ++ MULHU R4, R5, R6 // a6101d00 ++ REM R4, R5 // a5902000 ++ REM R4, R5, R6 // a6902000 ++ REMU R4, R5 // a5902100 ++ REMU R4, R5, R6 // a6902100 ++ DIV R4, R5 // a5102000 ++ DIV R4, R5, R6 // a6102000 ++ DIVU R4, R5 // a5102100 ++ DIVU R4, R5, R6 // a6102100 ++ SRLV R4, R5 // a5101900 ++ SRLV R4, R5, R6 // a6101900 ++ SRLV $4, R4, R5 // 85104500 ++ SRLV $4, R4 // 84104500 ++ SRLV $32, R4, R5 // 85804500 ++ SRLV $32, R4 // 84804500 ++ ++ MOVFD F4, F5 // 85241901 ++ MOVDF F4, F5 // 85181901 ++ MOVWF F4, F5 // 85101d01 ++ MOVFW F4, F5 // 85041b01 ++ MOVWD F4, F5 // 85201d01 ++ MOVDW F4, F5 // 85081b01 ++ NEGF F4, F5 // 85141401 ++ NEGD F4, F5 // 85181401 ++ ABSD F4, F5 // 85081401 ++ TRUNCDW F4, F5 // 85881a01 ++ TRUNCFW F4, F5 // 85841a01 ++ SQRTF F4, F5 // 85441401 ++ SQRTD F4, F5 // 85481401 ++ ++ DBAR // 00007238 ++ NOOP // 00004003 ++ ++ MOVWR R4, result+16(FP) // 6460402f ++ MOVWR R4, 1(R5) // a404402f ++ MOVWR y+8(FP), R4 // 6440402e ++ MOVWR 1(R5), R4 // a404402e ++ ++ CMPGTF F4, R5 // a090110c ++ CMPGTD F4, R5 // a090210c ++ CMPGEF F4, R5 // a090130c ++ CMPGED F4, R5 // a090230c ++ CMPEQD F4, R5 // a010220c +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc2.s b/src/cmd/asm/internal/asm/testdata/loong64enc2.s +new file mode 100644 +index 0000000000..675b263acf +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc2.s +@@ -0,0 +1,82 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++ ++TEXT asmtest(SB),DUPOK|NOSPLIT,$0 ++ MOVB R4, R5 // 85e04000a5e04800 ++ MOVWU R4, R5 // 85804100a5804500 ++ MOVW $74565, R4 // 4402001484148d03 ++ MOVW $4097, R4 // 2400001484048003 ++ MOVV $74565, R4 // 4402001484148d03 ++ MOVV $4097, R4 // 2400001484048003 ++ AND $-1, R4, R5 // 1efcbf0285f81400 ++ AND $-1, R4 // 1efcbf0284f81400 ++ MOVW $-1, F4 // 1efcbf02c4a71401 ++ MOVW $1, F4 // 1e048002c4a71401 ++ TEQ $4, R4, R5 // 8508005c04002a00 ++ TEQ $4, R4 // 0408005c04002a00 ++ TNE $4, R4, R5 // 8508005804002a00 ++ TNE $4, R4 // 0408005804002a00 ++ ADD $65536, R4, R5 // 1e02001485781000 ++ ADD $4096, R4, R5 // 3e00001485781000 ++ ADD $65536, R4 // 1e02001484781000 ++ ADD $4096, R4 // 3e00001484781000 ++ ADDV $65536, R4, R5 // 1e02001485f81000 ++ ADDV $4096, R4, R5 // 3e00001485f81000 ++ ADDV $65536, R4 // 1e02001484f81000 ++ ADDV $4096, R4 // 3e00001484f81000 ++ AND $65536, R4, R5 // 1e02001485f81400 ++ AND $4096, R4, R5 // 3e00001485f81400 ++ AND $65536, R4 // 1e02001484f81400 ++ AND $4096, R4 // 3e00001484f81400 ++ SGT $65536, R4, R5 // 1e02001485781200 ++ SGT $4096, R4, R5 // 3e00001485781200 ++ SGT $65536, R4 // 1e02001484781200 ++ SGT $4096, R4 // 3e00001484781200 ++ SGTU $65536, R4, R5 // 1e02001485f81200 ++ SGTU $4096, R4, R5 // 3e00001485f81200 ++ SGTU $65536, R4 // 1e02001484f81200 ++ SGTU $4096, R4 // 3e00001484f81200 ++ ADDU $65536, R4, R5 // 1e02001485781000 ++ ADDU $4096, R4, R5 // 3e00001485781000 ++ ADDU $65536, R4 // 1e02001484781000 ++ ADDU $4096, R4 // 3e00001484781000 ++ ADDVU $65536, R4, R5 // 1e02001485f81000 ++ ADDVU $4096, R4, R5 // 3e00001485f81000 ++ ADDVU $65536, R4 // 1e02001484f81000 ++ ADDVU $4096, R4 // 3e00001484f81000 ++ OR $65536, R4, R5 // 1e02001485781500 ++ OR $4096, R4, R5 // 3e00001485781500 ++ OR $65536, R4 // 1e02001484781500 ++ OR $4096, R4 // 3e00001484781500 ++ OR $-1, R4, R5 // 1efcbf0285781500 ++ OR $-1, R4 // 1efcbf0284781500 ++ XOR $65536, R4, R5 // 1e02001485f81500 ++ XOR $4096, R4, R5 // 3e00001485f81500 ++ XOR $65536, R4 // 1e02001484f81500 ++ XOR $4096, R4 // 3e00001484f81500 ++ XOR $-1, R4, R5 // 1efcbf0285f81500 ++ XOR $-1, R4 // 1efcbf0284f81500 ++ MOVH R4, R5 // 85c04000a5c04800 ++ ++ // relocation instructions ++ MOVW R4, name(SB) // 1e00001cc4038029 ++ MOVWU R4, name(SB) // 1e00001cc4038029 ++ MOVV R4, name(SB) // 1e00001cc403c029 ++ MOVB R4, name(SB) // 1e00001cc4030029 ++ MOVBU R4, name(SB) // 1e00001cc4030029 ++ MOVF F4, name(SB) // 1e00001cc403402b ++ MOVD F4, name(SB) // 1e00001cc403c02b ++ MOVW name(SB), R4 // 1e00001cc4038028 ++ MOVWU name(SB), R4 // 1e00001cc403802a ++ MOVV name(SB), R4 // 1e00001cc403c028 ++ MOVB name(SB), R4 // 1e00001cc4030028 ++ MOVBU name(SB), R4 // 1e00001cc403002a ++ MOVF name(SB), F4 // 1e00001cc403002b ++ MOVD name(SB), F4 // 1e00001cc403802b ++ MOVH R4, name(SB) // 1e00001cc4034029 ++ MOVH name(SB), R4 // 1e00001cc4034028 ++ MOVHU R4, name(SB) // 1e00001cc4034029 ++ MOVHU name(SB), R4 // 1e00001cc403402a +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc3.s b/src/cmd/asm/internal/asm/testdata/loong64enc3.s +new file mode 100644 +index 0000000000..fd6d9fedc4 +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc3.s +@@ -0,0 +1,131 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++ ++TEXT asmtest(SB),DUPOK|NOSPLIT,$0 ++ MOVW $65536(R4), R5 // 1e020014de03800385f81000 ++ MOVW $4096(R4), R5 // 3e000014de03800385f81000 ++ MOVV $65536(R4), R5 // 1e020014de03800385f81000 ++ MOVV $4096(R4), R5 // 3e000014de03800385f81000 ++ ADD $74565, R4 // 5e020014de178d0384781000 ++ ADD $4097, R4 // 3e000014de07800384781000 ++ ADDV $74565, R4 // 5e020014de178d0384f81000 ++ ADDV $4097, R4 // 3e000014de07800384f81000 ++ AND $74565, R4 // 5e020014de178d0384f81400 ++ AND $4097, R4 // 3e000014de07800384f81400 ++ ADD $74565, R4, R5 // 5e020014de178d0385781000 ++ ADD $4097, R4, R5 // 3e000014de07800385781000 ++ ADDV $74565, R4, R5 // 5e020014de178d0385f81000 ++ ADDV $4097, R4, R5 // 3e000014de07800385f81000 ++ AND $74565, R4, R5 // 5e020014de178d0385f81400 ++ AND $4097, R4, R5 // 3e000014de07800385f81400 ++ ++ MOVW R4, result+65540(FP) // 1e020014de8f1000c4338029 ++ MOVW R4, result+4097(FP) // 3e000014de8f1000c4278029 ++ MOVWU R4, result+65540(FP) // 1e020014de8f1000c4338029 ++ MOVWU R4, result+4097(FP) // 3e000014de8f1000c4278029 ++ MOVV R4, result+65540(FP) // 1e020014de8f1000c433c029 ++ MOVV R4, result+4097(FP) // 3e000014de8f1000c427c029 ++ MOVB R4, result+65540(FP) // 1e020014de8f1000c4330029 ++ MOVB R4, result+4097(FP) // 3e000014de8f1000c4270029 ++ MOVBU R4, result+65540(FP) // 1e020014de8f1000c4330029 ++ MOVBU R4, result+4097(FP) // 3e000014de8f1000c4270029 ++ MOVW R4, 65536(R5) // 1e020014de971000c4038029 ++ MOVW R4, 4096(R5) // 3e000014de971000c4038029 ++ MOVWU R4, 65536(R5) // 1e020014de971000c4038029 ++ MOVWU R4, 4096(R5) // 3e000014de971000c4038029 ++ MOVV R4, 65536(R5) // 1e020014de971000c403c029 ++ MOVV R4, 4096(R5) // 3e000014de971000c403c029 ++ MOVB R4, 65536(R5) // 1e020014de971000c4030029 ++ MOVB R4, 4096(R5) // 3e000014de971000c4030029 ++ MOVBU R4, 65536(R5) // 1e020014de971000c4030029 ++ MOVBU R4, 4096(R5) // 3e000014de971000c4030029 ++ SC R4, 65536(R5) // 1e020014de971000c4030021 ++ SC R4, 4096(R5) // 3e000014de971000c4030021 ++ MOVW y+65540(FP), R4 // 1e020014de8f1000c4338028 ++ MOVWU y+65540(FP), R4 // 1e020014de8f1000c433802a ++ MOVV y+65540(FP), R4 // 1e020014de8f1000c433c028 ++ MOVB y+65540(FP), R4 // 1e020014de8f1000c4330028 ++ MOVBU y+65540(FP), R4 // 1e020014de8f1000c433002a ++ MOVW y+4097(FP), R4 // 3e000014de8f1000c4278028 ++ MOVWU y+4097(FP), R4 // 3e000014de8f1000c427802a ++ MOVV y+4097(FP), R4 // 3e000014de8f1000c427c028 ++ MOVB y+4097(FP), R4 // 3e000014de8f1000c4270028 ++ MOVBU y+4097(FP), R4 // 3e000014de8f1000c427002a ++ MOVW 65536(R5), R4 // 1e020014de971000c4038028 ++ MOVWU 65536(R5), R4 // 1e020014de971000c403802a ++ MOVV 65536(R5), R4 // 1e020014de971000c403c028 ++ MOVB 65536(R5), R4 // 1e020014de971000c4030028 ++ MOVBU 65536(R5), R4 // 1e020014de971000c403002a ++ MOVW 4096(R5), R4 // 3e000014de971000c4038028 ++ MOVWU 4096(R5), R4 // 3e000014de971000c403802a ++ MOVV 4096(R5), R4 // 3e000014de971000c403c028 ++ MOVB 4096(R5), R4 // 3e000014de971000c4030028 ++ MOVBU 4096(R5), R4 // 3e000014de971000c403002a ++ MOVW y+65540(FP), F4 // 1e020014de8f1000c433002b ++ MOVF y+65540(FP), F4 // 1e020014de8f1000c433002b ++ MOVD y+65540(FP), F4 // 1e020014de8f1000c433802b ++ MOVW y+4097(FP), F4 // 3e000014de8f1000c427002b ++ MOVF y+4097(FP), F4 // 3e000014de8f1000c427002b ++ MOVD y+4097(FP), F4 // 3e000014de8f1000c427802b ++ MOVW 65536(R5), F4 // 1e020014de971000c403002b ++ MOVF 65536(R5), F4 // 1e020014de971000c403002b ++ MOVD 65536(R5), F4 // 1e020014de971000c403802b ++ MOVW 4096(R5), F4 // 3e000014de971000c403002b ++ MOVF 4096(R5), F4 // 3e000014de971000c403002b ++ MOVD 4096(R5), F4 // 3e000014de971000c403802b ++ MOVW F4, result+65540(FP) // 1e020014de8f1000c433402b ++ MOVF F4, result+65540(FP) // 1e020014de8f1000c433402b ++ MOVD F4, result+65540(FP) // 1e020014de8f1000c433c02b ++ MOVW F4, result+4097(FP) // 3e000014de8f1000c427402b ++ MOVF F4, result+4097(FP) // 3e000014de8f1000c427402b ++ MOVD F4, result+4097(FP) // 3e000014de8f1000c427c02b ++ MOVW F4, 65536(R5) // 1e020014de971000c403402b ++ MOVF F4, 65536(R5) // 1e020014de971000c403402b ++ MOVD F4, 65536(R5) // 1e020014de971000c403c02b ++ MOVW F4, 4096(R5) // 3e000014de971000c403402b ++ MOVF F4, 4096(R5) // 3e000014de971000c403402b ++ MOVD F4, 4096(R5) // 3e000014de971000c403c02b ++ ++ MOVH R4, result+65540(FP) // 1e020014de8f1000c4334029 ++ MOVH R4, 65536(R5) // 1e020014de971000c4034029 ++ MOVH y+65540(FP), R4 // 1e020014de8f1000c4334028 ++ MOVH 65536(R5), R4 // 1e020014de971000c4034028 ++ MOVH R4, result+4097(FP) // 3e000014de8f1000c4274029 ++ MOVH R4, 4096(R5) // 3e000014de971000c4034029 ++ MOVH y+4097(FP), R4 // 3e000014de8f1000c4274028 ++ MOVH 4096(R5), R4 // 3e000014de971000c4034028 ++ MOVHU R4, result+65540(FP) // 1e020014de8f1000c4334029 ++ MOVHU R4, 65536(R5) // 1e020014de971000c4034029 ++ MOVHU y+65540(FP), R4 // 1e020014de8f1000c433402a ++ MOVHU 65536(R5), R4 // 1e020014de971000c403402a ++ MOVHU R4, result+4097(FP) // 3e000014de8f1000c4274029 ++ MOVHU R4, 4096(R5) // 3e000014de971000c4034029 ++ MOVHU y+4097(FP), R4 // 3e000014de8f1000c427402a ++ MOVHU 4096(R5), R4 // 3e000014de971000c403402a ++ SGT $74565, R4 // 5e020014de178d0384781200 ++ SGT $74565, R4, R5 // 5e020014de178d0385781200 ++ SGT $4097, R4 // 3e000014de07800384781200 ++ SGT $4097, R4, R5 // 3e000014de07800385781200 ++ SGTU $74565, R4 // 5e020014de178d0384f81200 ++ SGTU $74565, R4, R5 // 5e020014de178d0385f81200 ++ SGTU $4097, R4 // 3e000014de07800384f81200 ++ SGTU $4097, R4, R5 // 3e000014de07800385f81200 ++ ADDU $74565, R4 // 5e020014de178d0384781000 ++ ADDU $74565, R4, R5 // 5e020014de178d0385781000 ++ ADDU $4097, R4 // 3e000014de07800384781000 ++ ADDU $4097, R4, R5 // 3e000014de07800385781000 ++ ADDVU $4097, R4 // 3e000014de07800384f81000 ++ ADDVU $4097, R4, R5 // 3e000014de07800385f81000 ++ ADDVU $74565, R4 // 5e020014de178d0384f81000 ++ ADDVU $74565, R4, R5 // 5e020014de178d0385f81000 ++ OR $74565, R4 // 5e020014de178d0384781500 ++ OR $74565, R4, R5 // 5e020014de178d0385781500 ++ OR $4097, R4 // 3e000014de07800384781500 ++ OR $4097, R4, R5 // 3e000014de07800385781500 ++ XOR $74565, R4 // 5e020014de178d0384f81500 ++ XOR $74565, R4, R5 // 5e020014de178d0385f81500 ++ XOR $4097, R4 // 3e000014de07800384f81500 ++ XOR $4097, R4, R5 // 3e000014de07800385f81500 +-- +2.38.0 + diff --git a/loongarch64/0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch b/loongarch64/0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch new file mode 100644 index 0000000..a8d998a --- /dev/null +++ b/loongarch64/0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch @@ -0,0 +1,750 @@ +From caccac630226d78485f57bb9739b836eb491784b Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:25:46 +0800 +Subject: [PATCH 16/82] cmd/internal/objabi,cmd/link: support linker for + linux/loong64 + +Change-Id: I4680eb0635dd0fa3d6ea8348a2488da9c7e33d3b +--- + src/cmd/internal/objabi/reloctype.go | 22 +- + src/cmd/internal/objabi/reloctype_string.go | 124 +++++----- + src/cmd/link/internal/ld/config.go | 2 +- + src/cmd/link/internal/ld/elf.go | 9 +- + src/cmd/link/internal/ld/lib.go | 2 + + src/cmd/link/internal/ld/pcln.go | 2 +- + src/cmd/link/internal/ld/target.go | 4 + + src/cmd/link/internal/loadelf/ldelf.go | 14 ++ + src/cmd/link/internal/loong64/asm.go | 237 ++++++++++++++++++++ + src/cmd/link/internal/loong64/l.go | 17 ++ + src/cmd/link/internal/loong64/obj.go | 58 +++++ + src/cmd/link/internal/sym/reloc.go | 2 + + src/cmd/link/link_test.go | 2 + + src/cmd/link/main.go | 3 + + 14 files changed, 434 insertions(+), 64 deletions(-) + create mode 100644 src/cmd/link/internal/loong64/asm.go + create mode 100644 src/cmd/link/internal/loong64/l.go + create mode 100644 src/cmd/link/internal/loong64/obj.go + +diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go +index 52827a6dee..99bc5f7a68 100644 +--- a/src/cmd/internal/objabi/reloctype.go ++++ b/src/cmd/internal/objabi/reloctype.go +@@ -47,6 +47,9 @@ const ( + // R_ADDRMIPS (only used on mips/mips64) resolves to the low 16 bits of an external + // address, by encoding it into the instruction. + R_ADDRMIPS ++ // R_ADDRLOONG64 (only used on loong64) resolves to the low 12 bits of an external ++ // address, by encoding it into the instruction. ++ R_ADDRLOONG64 + // R_ADDROFF resolves to a 32-bit offset from the beginning of the section + // holding the data being relocated to the referenced symbol. + R_ADDROFF +@@ -61,6 +64,9 @@ const ( + R_CALLMIPS + // R_CALLRISCV marks RISC-V CALLs for stack checking. + R_CALLRISCV ++ // R_CALLLOONG64 (only used on loong64) resolves to non-PC-relative target address ++ // of a CALL (BL/JIRL) instruction, by encoding the address into the instruction. ++ R_CALLLOONG64 + R_CONST + R_PCREL + // R_TLS_LE, used on 386, amd64, and ARM, resolves to the offset of the +@@ -110,6 +116,9 @@ const ( + // of a JMP instruction, by encoding the address into the instruction. + // The stack nosplit check ignores this since it is not a function call. + R_JMPMIPS ++ // R_JMPLOONG64 (only used on loong64) resolves to non-PC-relative target address ++ // of a JMP instruction, by encoding the address into the instruction. ++ R_JMPLOONG64 + + // R_DWARFSECREF resolves to the offset of the symbol from its section. + // Target of relocation must be size 4 (in current implementation). +@@ -241,9 +250,18 @@ const ( + // R_ADDRMIPSU (only used on mips/mips64) resolves to the sign-adjusted "upper" 16 + // bits (bit 16-31) of an external address, by encoding it into the instruction. + R_ADDRMIPSU ++ // R_ADDRLOONG64U (only used on loong64) resolves to the sign-adjusted "upper" 20 ++ // bits (bit 12-31) of an external address, by encoding it into the instruction. ++ R_ADDRLOONG64U + // R_ADDRMIPSTLS (only used on mips64) resolves to the low 16 bits of a TLS + // address (offset from thread pointer), by encoding it into the instruction. + R_ADDRMIPSTLS ++ // R_ADDRLOONG64TLS (only used on loong64) resolves to the low 12 bits of a TLS ++ // address (offset from thread pointer), by encoding it into the instruction. ++ R_ADDRLOONG64TLS ++ // R_ADDRLOONG64TLSU (only used on loong64) resolves to the high 20 bits of a TLS ++ // address (offset from thread pointer), by encoding it into the instruction. ++ R_ADDRLOONG64TLSU + + // R_ADDRCUOFF resolves to a pointer-sized offset from the start of the + // symbol's DWARF compile unit. +@@ -274,7 +292,7 @@ const ( + // the target address in register or memory. + func (r RelocType) IsDirectCall() bool { + switch r { +- case R_CALL, R_CALLARM, R_CALLARM64, R_CALLMIPS, R_CALLPOWER, R_CALLRISCV: ++ case R_CALL, R_CALLARM, R_CALLARM64, R_CALLLOONG64, R_CALLMIPS, R_CALLPOWER, R_CALLRISCV: + return true + } + return false +@@ -289,6 +307,8 @@ func (r RelocType) IsDirectJump() bool { + switch r { + case R_JMPMIPS: + return true ++ case R_JMPLOONG64: ++ return true + } + return false + } +diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go +index 4638ef14d9..9d34a0f7d5 100644 +--- a/src/cmd/internal/objabi/reloctype_string.go ++++ b/src/cmd/internal/objabi/reloctype_string.go +@@ -1,4 +1,4 @@ +-// Code generated by "stringer -type=RelocType"; DO NOT EDIT. ++// Code generated by "stringer -type RelocType reloctype.go"; DO NOT EDIT. + + package objabi + +@@ -12,67 +12,73 @@ func _() { + _ = x[R_ADDRPOWER-2] + _ = x[R_ADDRARM64-3] + _ = x[R_ADDRMIPS-4] +- _ = x[R_ADDROFF-5] +- _ = x[R_SIZE-6] +- _ = x[R_CALL-7] +- _ = x[R_CALLARM-8] +- _ = x[R_CALLARM64-9] +- _ = x[R_CALLIND-10] +- _ = x[R_CALLPOWER-11] +- _ = x[R_CALLMIPS-12] +- _ = x[R_CALLRISCV-13] +- _ = x[R_CONST-14] +- _ = x[R_PCREL-15] +- _ = x[R_TLS_LE-16] +- _ = x[R_TLS_IE-17] +- _ = x[R_GOTOFF-18] +- _ = x[R_PLT0-19] +- _ = x[R_PLT1-20] +- _ = x[R_PLT2-21] +- _ = x[R_USEFIELD-22] +- _ = x[R_USETYPE-23] +- _ = x[R_USEIFACE-24] +- _ = x[R_USEIFACEMETHOD-25] +- _ = x[R_METHODOFF-26] +- _ = x[R_KEEP-27] +- _ = x[R_POWER_TOC-28] +- _ = x[R_GOTPCREL-29] +- _ = x[R_JMPMIPS-30] +- _ = x[R_DWARFSECREF-31] +- _ = x[R_DWARFFILEREF-32] +- _ = x[R_ARM64_TLS_LE-33] +- _ = x[R_ARM64_TLS_IE-34] +- _ = x[R_ARM64_GOTPCREL-35] +- _ = x[R_ARM64_GOT-36] +- _ = x[R_ARM64_PCREL-37] +- _ = x[R_ARM64_LDST8-38] +- _ = x[R_ARM64_LDST16-39] +- _ = x[R_ARM64_LDST32-40] +- _ = x[R_ARM64_LDST64-41] +- _ = x[R_ARM64_LDST128-42] +- _ = x[R_POWER_TLS_LE-43] +- _ = x[R_POWER_TLS_IE-44] +- _ = x[R_POWER_TLS-45] +- _ = x[R_ADDRPOWER_DS-46] +- _ = x[R_ADDRPOWER_GOT-47] +- _ = x[R_ADDRPOWER_PCREL-48] +- _ = x[R_ADDRPOWER_TOCREL-49] +- _ = x[R_ADDRPOWER_TOCREL_DS-50] +- _ = x[R_RISCV_PCREL_ITYPE-51] +- _ = x[R_RISCV_PCREL_STYPE-52] +- _ = x[R_RISCV_TLS_IE_ITYPE-53] +- _ = x[R_RISCV_TLS_IE_STYPE-54] +- _ = x[R_PCRELDBL-55] +- _ = x[R_ADDRMIPSU-56] +- _ = x[R_ADDRMIPSTLS-57] +- _ = x[R_ADDRCUOFF-58] +- _ = x[R_WASMIMPORT-59] +- _ = x[R_XCOFFREF-60] ++ _ = x[R_ADDRLOONG64-5] ++ _ = x[R_ADDROFF-6] ++ _ = x[R_SIZE-7] ++ _ = x[R_CALL-8] ++ _ = x[R_CALLARM-9] ++ _ = x[R_CALLARM64-10] ++ _ = x[R_CALLIND-11] ++ _ = x[R_CALLPOWER-12] ++ _ = x[R_CALLMIPS-13] ++ _ = x[R_CALLRISCV-14] ++ _ = x[R_CALLLOONG64-15] ++ _ = x[R_CONST-16] ++ _ = x[R_PCREL-17] ++ _ = x[R_TLS_LE-18] ++ _ = x[R_TLS_IE-19] ++ _ = x[R_GOTOFF-20] ++ _ = x[R_PLT0-21] ++ _ = x[R_PLT1-22] ++ _ = x[R_PLT2-23] ++ _ = x[R_USEFIELD-24] ++ _ = x[R_USETYPE-25] ++ _ = x[R_USEIFACE-26] ++ _ = x[R_USEIFACEMETHOD-27] ++ _ = x[R_METHODOFF-28] ++ _ = x[R_KEEP-29] ++ _ = x[R_POWER_TOC-30] ++ _ = x[R_GOTPCREL-31] ++ _ = x[R_JMPMIPS-32] ++ _ = x[R_JMPLOONG64-33] ++ _ = x[R_DWARFSECREF-34] ++ _ = x[R_DWARFFILEREF-35] ++ _ = x[R_ARM64_TLS_LE-36] ++ _ = x[R_ARM64_TLS_IE-37] ++ _ = x[R_ARM64_GOTPCREL-38] ++ _ = x[R_ARM64_GOT-39] ++ _ = x[R_ARM64_PCREL-40] ++ _ = x[R_ARM64_LDST8-41] ++ _ = x[R_ARM64_LDST16-42] ++ _ = x[R_ARM64_LDST32-43] ++ _ = x[R_ARM64_LDST64-44] ++ _ = x[R_ARM64_LDST128-45] ++ _ = x[R_POWER_TLS_LE-46] ++ _ = x[R_POWER_TLS_IE-47] ++ _ = x[R_POWER_TLS-48] ++ _ = x[R_ADDRPOWER_DS-49] ++ _ = x[R_ADDRPOWER_GOT-50] ++ _ = x[R_ADDRPOWER_PCREL-51] ++ _ = x[R_ADDRPOWER_TOCREL-52] ++ _ = x[R_ADDRPOWER_TOCREL_DS-53] ++ _ = x[R_RISCV_PCREL_ITYPE-54] ++ _ = x[R_RISCV_PCREL_STYPE-55] ++ _ = x[R_RISCV_TLS_IE_ITYPE-56] ++ _ = x[R_RISCV_TLS_IE_STYPE-57] ++ _ = x[R_PCRELDBL-58] ++ _ = x[R_ADDRMIPSU-59] ++ _ = x[R_ADDRLOONG64U-60] ++ _ = x[R_ADDRMIPSTLS-61] ++ _ = x[R_ADDRLOONG64TLS-62] ++ _ = x[R_ADDRLOONG64TLSU-63] ++ _ = x[R_ADDRCUOFF-64] ++ _ = x[R_WASMIMPORT-65] ++ _ = x[R_XCOFFREF-66] + } + +-const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" ++const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDRLOONG64R_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CALLLOONG64R_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_JMPLOONG64R_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRLOONG64UR_ADDRMIPSTLSR_ADDRLOONG64TLSR_ADDRLOONG64TLSUR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" + +-var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 53, 59, 68, 79, 88, 99, 109, 120, 127, 134, 142, 150, 158, 164, 170, 176, 186, 195, 205, 221, 232, 238, 249, 259, 268, 281, 295, 309, 323, 339, 350, 363, 376, 390, 404, 418, 433, 447, 461, 472, 486, 501, 518, 536, 557, 576, 595, 615, 635, 645, 656, 669, 680, 692, 702} ++var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 51, 60, 66, 72, 81, 92, 101, 112, 122, 133, 146, 153, 160, 168, 176, 184, 190, 196, 202, 212, 221, 231, 247, 258, 264, 275, 285, 294, 306, 319, 333, 347, 361, 377, 388, 401, 414, 428, 442, 456, 471, 485, 499, 510, 524, 539, 556, 574, 595, 614, 633, 653, 673, 683, 694, 708, 721, 737, 754, 765, 777, 787} + + func (i RelocType) String() string { + i -= 1 +diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go +index 20f1d0b8c1..c15aac8b6a 100644 +--- a/src/cmd/link/internal/ld/config.go ++++ b/src/cmd/link/internal/ld/config.go +@@ -196,7 +196,7 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { + // Internally linking cgo is incomplete on some architectures. + // https://golang.org/issue/14449 + // https://golang.org/issue/21961 +- if iscgo && ctxt.Arch.InFamily(sys.MIPS64, sys.MIPS, sys.PPC64, sys.RISCV64) { ++ if iscgo && ctxt.Arch.InFamily(sys.Loong64, sys.MIPS64, sys.MIPS, sys.PPC64, sys.RISCV64) { + return true, buildcfg.GOARCH + " does not support internal cgo" + } + if iscgo && (buildcfg.GOOS == "android" || buildcfg.GOOS == "dragonfly") { +diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go +index d16539e4bf..015a698479 100644 +--- a/src/cmd/link/internal/ld/elf.go ++++ b/src/cmd/link/internal/ld/elf.go +@@ -208,7 +208,7 @@ var buildinfo []byte + func Elfinit(ctxt *Link) { + ctxt.IsELF = true + +- if ctxt.Arch.InFamily(sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) { ++ if ctxt.Arch.InFamily(sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) { + elfRelType = ".rela" + } else { + elfRelType = ".rel" +@@ -223,10 +223,13 @@ func Elfinit(ctxt *Link) { + ehdr.Flags = 2 /* Version 2 ABI */ + } + fallthrough +- case sys.AMD64, sys.ARM64, sys.MIPS64, sys.RISCV64: ++ case sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.RISCV64: + if ctxt.Arch.Family == sys.MIPS64 { + ehdr.Flags = 0x20000004 /* MIPS 3 CPIC */ + } ++ if ctxt.Arch.Family == sys.Loong64 { ++ ehdr.Flags = 0x3 /* LoongArch lp64d */ ++ } + if ctxt.Arch.Family == sys.RISCV64 { + ehdr.Flags = 0x4 /* RISCV Float ABI Double */ + } +@@ -1647,6 +1650,8 @@ func asmbElf(ctxt *Link) { + Exitf("unknown architecture in asmbelf: %v", ctxt.Arch.Family) + case sys.MIPS, sys.MIPS64: + eh.Machine = uint16(elf.EM_MIPS) ++ case sys.Loong64: ++ eh.Machine = uint16(elf.EM_LOONGARCH) + case sys.ARM: + eh.Machine = uint16(elf.EM_ARM) + case sys.AMD64: +diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go +index 644faeb2fb..7bd6193693 100644 +--- a/src/cmd/link/internal/ld/lib.go ++++ b/src/cmd/link/internal/ld/lib.go +@@ -1774,6 +1774,8 @@ func hostlinkArchArgs(arch *sys.Arch) []string { + if buildcfg.GOOS == "darwin" { + return []string{"-arch", "arm64"} + } ++ case sys.Loong64: ++ return []string{"-mabi=lp64d"} + case sys.MIPS64: + return []string{"-mabi=64"} + case sys.MIPS: +diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go +index 05fd302369..a3897fbd3c 100644 +--- a/src/cmd/link/internal/ld/pcln.go ++++ b/src/cmd/link/internal/ld/pcln.go +@@ -148,7 +148,7 @@ func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 { + switch target.Arch.Family { + case sys.AMD64, sys.I386: + deferreturn-- +- case sys.PPC64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: ++ case sys.PPC64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64: + // no change + case sys.RISCV64: + // TODO(jsing): The JALR instruction is marked with +diff --git a/src/cmd/link/internal/ld/target.go b/src/cmd/link/internal/ld/target.go +index f68de8fff1..69ce26a7fd 100644 +--- a/src/cmd/link/internal/ld/target.go ++++ b/src/cmd/link/internal/ld/target.go +@@ -112,6 +112,10 @@ func (t *Target) IsMIPS64() bool { + return t.Arch.Family == sys.MIPS64 + } + ++func (t *Target) IsLOONG64() bool { ++ return t.Arch.Family == sys.Loong64 ++} ++ + func (t *Target) IsPPC64() bool { + return t.Arch.Family == sys.PPC64 + } +diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go +index c6956297f6..d677bff3c9 100644 +--- a/src/cmd/link/internal/loadelf/ldelf.go ++++ b/src/cmd/link/internal/loadelf/ldelf.go +@@ -346,6 +346,10 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, + if mach != elf.EM_MIPS || class != elf.ELFCLASS64 { + return errorf("elf object but not mips64") + } ++ case sys.Loong64: ++ if mach != elf.EM_LOONGARCH || class != elf.ELFCLASS64 { ++ return errorf("elf object but not loong64") ++ } + + case sys.ARM: + if e != binary.LittleEndian || mach != elf.EM_ARM || class != elf.ELFCLASS32 { +@@ -958,6 +962,7 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, uint8, error) { + ARM = uint32(sys.ARM) + ARM64 = uint32(sys.ARM64) + I386 = uint32(sys.I386) ++ LOONG64 = uint32(sys.Loong64) + MIPS = uint32(sys.MIPS) + MIPS64 = uint32(sys.MIPS64) + PPC64 = uint32(sys.PPC64) +@@ -993,6 +998,15 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, uint8, error) { + MIPS64 | uint32(elf.R_MIPS_GOT_DISP)<<16: + return 4, 4, nil + ++ case LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_PCREL)<<16, ++ LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_GPREL)<<16, ++ LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_ABSOLUTE)<<16, ++ LOONG64 | uint32(elf.R_LARCH_MARK_LA)<<16, ++ LOONG64 | uint32(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)<<16, ++ LOONG64 | uint32(elf.R_LARCH_64)<<16, ++ LOONG64 | uint32(elf.R_LARCH_MARK_PCREL)<<16: ++ return 4, 4, nil ++ + case S390X | uint32(elf.R_390_8)<<16: + return 1, 1, nil + +diff --git a/src/cmd/link/internal/loong64/asm.go b/src/cmd/link/internal/loong64/asm.go +new file mode 100644 +index 0000000000..9c264311c4 +--- /dev/null ++++ b/src/cmd/link/internal/loong64/asm.go +@@ -0,0 +1,237 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ "cmd/link/internal/ld" ++ "cmd/link/internal/loader" ++ "cmd/link/internal/sym" ++ "debug/elf" ++ "log" ++) ++ ++func gentext(ctxt *ld.Link, ldr *loader.Loader) {} ++ ++func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { ++ log.Fatalf("adddynrel not implemented") ++ return false ++} ++ ++func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { ++ // loong64 ELF relocation (endian neutral) ++ // offset uint64 ++ // sym uint64 ++ // addend int64 ++ ++ elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) ++ switch r.Type { ++ default: ++ return false ++ case objabi.R_ADDR, objabi.R_DWARFSECREF: ++ switch r.Size { ++ case 4: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_32) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ case 8: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_64) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ default: ++ return false ++ } ++ case objabi.R_ADDRLOONG64TLS: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_TLS_TPREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xfff)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_AND)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_U_10_12)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_ADDRLOONG64TLSU: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_TLS_TPREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_5_20) | uint64(0)<<32) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_CALLLOONG64: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PLT_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_ADDRLOONG64: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd + 0x4)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd + 0x804)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SL)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SUB)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_10_12)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_ADDRLOONG64U: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd + 0x800)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_5_20) | uint64(0)<<32) ++ out.Write64(uint64(0x0)) ++ } ++ ++ return true ++} ++func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { ++ return ++} ++ ++func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { ++ return false ++} ++ ++func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { ++ rs := r.Sym() ++ rs = ldr.ResolveABIAlias(rs) ++ ++ if target.IsExternal() { ++ nExtReloc := 0 ++ switch r.Type() { ++ default: ++ return val, 0, false ++ case objabi.R_ADDRLOONG64, ++ objabi.R_ADDRLOONG64U: ++ // set up addend for eventual relocation via outer symbol. ++ rs, _ := ld.FoldSubSymbolOffset(ldr, rs) ++ rst := ldr.SymType(rs) ++ if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && ldr.SymSect(rs) == nil { ++ ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) ++ } ++ nExtReloc = 8 // need two ELF relocations. see elfreloc1 ++ if r.Type() == objabi.R_ADDRLOONG64U { ++ nExtReloc = 4 ++ } ++ return val, nExtReloc, true ++ case objabi.R_ADDRLOONG64TLS, ++ objabi.R_ADDRLOONG64TLSU, ++ objabi.R_CALLLOONG64, ++ objabi.R_JMPLOONG64: ++ nExtReloc = 4 ++ if r.Type() == objabi.R_CALLLOONG64 || r.Type() == objabi.R_JMPLOONG64 { ++ nExtReloc = 2 ++ } ++ return val, nExtReloc, true ++ } ++ } ++ ++ const isOk = true ++ const noExtReloc = 0 ++ ++ switch r.Type() { ++ case objabi.R_CONST: ++ return r.Add(), noExtReloc, isOk ++ case objabi.R_GOTOFF: ++ return ldr.SymValue(r.Sym()) + r.Add() - ldr.SymValue(syms.GOT), noExtReloc, isOk ++ case objabi.R_ADDRLOONG64, ++ objabi.R_ADDRLOONG64U: ++ pc := ldr.SymValue(s) + int64(r.Off()) ++ t := ldr.SymAddr(rs) + r.Add() - pc ++ if r.Type() == objabi.R_ADDRLOONG64 { ++ return int64(val&0xffc003ff | (((t + 4 - ((t + 4 + 1<<11) >> 12 << 12)) << 10) & 0x3ffc00)), noExtReloc, isOk ++ } ++ return int64(val&0xfe00001f | (((t + 1<<11) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk ++ case objabi.R_ADDRLOONG64TLS, ++ objabi.R_ADDRLOONG64TLSU: ++ t := ldr.SymAddr(rs) + r.Add() ++ if r.Type() == objabi.R_ADDRLOONG64TLS { ++ return int64(val&0xffc003ff | ((t & 0xfff) << 10)), noExtReloc, isOk ++ } ++ return int64(val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk ++ case objabi.R_CALLLOONG64, ++ objabi.R_JMPLOONG64: ++ pc := ldr.SymValue(s) + int64(r.Off()) ++ t := ldr.SymAddr(rs) + r.Add() - pc ++ return int64(val&0xfc000000 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x3ff0000) >> 16)), noExtReloc, isOk ++ } ++ ++ return val, 0, false ++} ++ ++func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { ++ return -1 ++} ++ ++func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { ++ switch r.Type() { ++ case objabi.R_ADDRLOONG64, ++ objabi.R_ADDRLOONG64U: ++ return ld.ExtrelocViaOuterSym(ldr, r, s), true ++ ++ case objabi.R_ADDRLOONG64TLS, ++ objabi.R_ADDRLOONG64TLSU, ++ objabi.R_CONST, ++ objabi.R_GOTOFF, ++ objabi.R_CALLLOONG64, ++ objabi.R_JMPLOONG64: ++ return ld.ExtrelocSimple(ldr, r), true ++ } ++ return loader.ExtReloc{}, false ++} +diff --git a/src/cmd/link/internal/loong64/l.go b/src/cmd/link/internal/loong64/l.go +new file mode 100644 +index 0000000000..a06090170d +--- /dev/null ++++ b/src/cmd/link/internal/loong64/l.go +@@ -0,0 +1,17 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++const ( ++ maxAlign = 32 // max data alignment ++ minAlign = 1 // min data alignment ++ funcAlign = 8 ++) ++ ++/* Used by ../../internal/ld/dwarf.go */ ++const ( ++ dwarfRegSP = 3 ++ dwarfRegLR = 1 ++) +diff --git a/src/cmd/link/internal/loong64/obj.go b/src/cmd/link/internal/loong64/obj.go +new file mode 100644 +index 0000000000..62014fabc1 +--- /dev/null ++++ b/src/cmd/link/internal/loong64/obj.go +@@ -0,0 +1,58 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ "cmd/link/internal/ld" ++) ++ ++func Init() (*sys.Arch, ld.Arch) { ++ arch := sys.ArchLoong64 ++ ++ theArch := ld.Arch{ ++ Funcalign: funcAlign, ++ Maxalign: maxAlign, ++ Minalign: minAlign, ++ Dwarfregsp: dwarfRegSP, ++ Dwarfreglr: dwarfRegLR, ++ Adddynrel: adddynrel, ++ Archinit: archinit, ++ Archreloc: archreloc, ++ Archrelocvariant: archrelocvariant, ++ Extreloc: extreloc, ++ Elfreloc1: elfreloc1, ++ ElfrelocSize: 24, ++ Elfsetupplt: elfsetupplt, ++ Machoreloc1: machoreloc1, ++ Gentext: gentext, ++ ++ Linuxdynld: "/lib64/ld.so.1", ++ Freebsddynld: "XXX", ++ Openbsddynld: "XXX", ++ Netbsddynld: "XXX", ++ Dragonflydynld: "XXX", ++ Solarisdynld: "XXX", ++ } ++ ++ return arch, theArch ++} ++ ++func archinit(ctxt *ld.Link) { ++ switch ctxt.HeadType { ++ default: ++ ld.Exitf("unknown -H option: %v", ctxt.HeadType) ++ case objabi.Hlinux: /* loong64 elf */ ++ ld.Elfinit(ctxt) ++ ld.HEADR = ld.ELFRESERVE ++ if *ld.FlagTextAddr == -1 { ++ *ld.FlagTextAddr = 0x10000 + int64(ld.HEADR) ++ } ++ if *ld.FlagRound == -1 { ++ *ld.FlagRound = 0x10000 ++ } ++ } ++} +diff --git a/src/cmd/link/internal/sym/reloc.go b/src/cmd/link/internal/sym/reloc.go +index a543233a1d..a44dcdd517 100644 +--- a/src/cmd/link/internal/sym/reloc.go ++++ b/src/cmd/link/internal/sym/reloc.go +@@ -59,6 +59,8 @@ func RelocName(arch *sys.Arch, r objabi.RelocType) string { + return elf.R_AARCH64(nr).String() + case sys.I386: + return elf.R_386(nr).String() ++ case sys.Loong64: ++ return elf.R_LARCH(nr).String() + case sys.MIPS, sys.MIPS64: + return elf.R_MIPS(nr).String() + case sys.PPC64: +diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go +index 7230054bed..7ffdf8d398 100644 +--- a/src/cmd/link/link_test.go ++++ b/src/cmd/link/link_test.go +@@ -174,6 +174,8 @@ func TestIssue33979(t *testing.T) { + + // Skip test on platforms that do not support cgo internal linking. + switch runtime.GOARCH { ++ case "loong64": ++ t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH) + } +diff --git a/src/cmd/link/main.go b/src/cmd/link/main.go +index d92478e61e..16e5a01151 100644 +--- a/src/cmd/link/main.go ++++ b/src/cmd/link/main.go +@@ -10,6 +10,7 @@ import ( + "cmd/link/internal/arm" + "cmd/link/internal/arm64" + "cmd/link/internal/ld" ++ "cmd/link/internal/loong64" + "cmd/link/internal/mips" + "cmd/link/internal/mips64" + "cmd/link/internal/ppc64" +@@ -53,6 +54,8 @@ func main() { + arch, theArch = arm.Init() + case "arm64": + arch, theArch = arm64.Init() ++ case "loong64": ++ arch, theArch = loong64.Init() + case "mips", "mipsle": + arch, theArch = mips.Init() + case "mips64", "mips64le": +-- +2.38.0 + diff --git a/loongarch64/0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch b/loongarch64/0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch new file mode 100644 index 0000000..91520c8 --- /dev/null +++ b/loongarch64/0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch @@ -0,0 +1,989 @@ +From 9f8e488815a8fd8170cd8e2b488a2efe1dabdab0 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 15:43:08 +0800 +Subject: [PATCH 17/82] runtime: bootstrap for linux/loong64 and implement + runtime core assembly function + +Change-Id: I252ecd0b13580c5e71723715023b1951985045f3 +--- + src/runtime/asm_loong64.s | 823 ++++++++++++++++++++++++++++++++ + src/runtime/atomic_loong64.s | 14 + + src/runtime/cputicks.go | 4 +- + src/runtime/os_linux_loong64.go | 19 + + src/runtime/os_linux_noauxv.go | 4 +- + src/runtime/rt0_linux_loong64.s | 27 ++ + src/runtime/sys_loong64.go | 21 + + 7 files changed, 908 insertions(+), 4 deletions(-) + create mode 100644 src/runtime/asm_loong64.s + create mode 100644 src/runtime/atomic_loong64.s + create mode 100644 src/runtime/os_linux_loong64.go + create mode 100644 src/runtime/rt0_linux_loong64.s + create mode 100644 src/runtime/sys_loong64.go + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +new file mode 100644 +index 0000000000..cfc270f28b +--- /dev/null ++++ b/src/runtime/asm_loong64.s +@@ -0,0 +1,823 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "go_tls.h" ++#include "funcdata.h" ++#include "textflag.h" ++ ++#define REGCTXT R29 ++ ++TEXT runtime·rt0_go(SB),NOSPLIT,$0 ++ // R3 = stack; R4 = argc; R5 = argv ++ ++ ADDV $-24, R3 ++ MOVW R4, 8(R3) // argc ++ MOVV R5, 16(R3) // argv ++ ++ // create istack out of the given (operating system) stack. ++ // _cgo_init may update stackguard. ++ MOVV $runtime·g0(SB), g ++ MOVV $(-64*1024), R30 ++ ADDV R30, R3, R19 ++ MOVV R19, g_stackguard0(g) ++ MOVV R19, g_stackguard1(g) ++ MOVV R19, (g_stack+stack_lo)(g) ++ MOVV R3, (g_stack+stack_hi)(g) ++ ++ // if there is a _cgo_init, call it using the gcc ABI. ++ MOVV _cgo_init(SB), R25 ++ BEQ R25, nocgo ++ ++ MOVV R0, R7 // arg 3: not used ++ MOVV R0, R6 // arg 2: not used ++ MOVV $setg_gcc<>(SB), R5 // arg 1: setg ++ MOVV g, R4 // arg 0: G ++ JAL (R25) ++ ++nocgo: ++ // update stackguard after _cgo_init ++ MOVV (g_stack+stack_lo)(g), R19 ++ ADDV $const__StackGuard, R19 ++ MOVV R19, g_stackguard0(g) ++ MOVV R19, g_stackguard1(g) ++ ++ // set the per-goroutine and per-mach "registers" ++ MOVV $runtime·m0(SB), R19 ++ ++ // save m->g0 = g0 ++ MOVV g, m_g0(R19) ++ // save m0 to g0->m ++ MOVV R19, g_m(g) ++ ++ JAL runtime·check(SB) ++ ++ // args are already prepared ++ JAL runtime·args(SB) ++ JAL runtime·osinit(SB) ++ JAL runtime·schedinit(SB) ++ ++ // create a new goroutine to start program ++ MOVV $runtime·mainPC(SB), R19 // entry ++ ADDV $-24, R3 ++ MOVV R19, 16(R3) ++ MOVV R0, 8(R3) ++ MOVV R0, 0(R3) ++ JAL runtime·newproc(SB) ++ ADDV $24, R3 ++ ++ // start this M ++ JAL runtime·mstart(SB) ++ ++ MOVV R0, 1(R0) ++ RET ++ ++DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) ++GLOBL runtime·mainPC(SB),RODATA,$8 ++ ++TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV R0, 2(R0) // TODO: TD ++ RET ++ ++TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 ++ RET ++ ++TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 ++ JAL runtime·mstart0(SB) ++ RET // not reached ++ ++// void jmpdefer(fv, sp); ++// called from deferreturn. ++// 1. grab stored LR for caller ++// 2. sub 4 bytes to get back to JAL deferreturn ++// 3. JMP to fn ++TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 ++ MOVV 0(R3), R1 ++ //-4 because loong doesn't have delay slot ++ ADDV $-4, R1 ++ ++ MOVV fv+0(FP), REGCTXT ++ MOVV argp+8(FP), R3 ++ ADDV $-8, R3 ++ NOR R0, R0 // prevent scheduling ++ MOVV 0(REGCTXT), R4 ++ JMP (R4) ++ ++/* ++ * go-routine ++ */ ++ ++// void gosave(Gobuf*) ++// save state in Gobuf; setjmp ++TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 ++ MOVV buf+0(FP), R19 ++ MOVV R3, gobuf_sp(R19) ++ MOVV R1, gobuf_pc(R19) ++ MOVV g, gobuf_g(R19) ++ MOVV R0, gobuf_lr(R19) ++ MOVV R0, gobuf_ret(R19) ++ // Assert ctxt is zero. See func save. ++ MOVV gobuf_ctxt(R19), R19 ++ BEQ R19, 2(PC) ++ JAL runtime·badctxt(SB) ++ RET ++ ++// void gogo(Gobuf*) ++// restore state from Gobuf; longjmp ++TEXT runtime·gogo(SB), NOSPLIT, $16-8 ++ MOVV buf+0(FP), R4 ++ MOVV gobuf_g(R4), g // make sure g is not nil ++ JAL runtime·save_g(SB) ++ ++ MOVV 0(g), R5 ++ MOVV gobuf_sp(R4), R3 ++ MOVV gobuf_lr(R4), R1 ++ MOVV gobuf_ret(R4), R19 ++ MOVV gobuf_ctxt(R4), REGCTXT ++ MOVV R0, gobuf_sp(R4) ++ MOVV R0, gobuf_ret(R4) ++ MOVV R0, gobuf_lr(R4) ++ MOVV R0, gobuf_ctxt(R4) ++ MOVV gobuf_pc(R4), R6 ++ JMP (R6) ++ ++// void mcall(fn func(*g)) ++// Switch to m->g0's stack, call fn(g). ++// Fn must never return. It should gogo(&g->sched) ++// to keep running g. ++TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 ++ // Save caller state in g->sched ++ MOVV R3, (g_sched+gobuf_sp)(g) ++ MOVV R1, (g_sched+gobuf_pc)(g) ++ MOVV R0, (g_sched+gobuf_lr)(g) ++ MOVV g, (g_sched+gobuf_g)(g) ++ ++ // Switch to m->g0 & its stack, call fn. ++ MOVV g, R19 ++ MOVV g_m(g), R4 ++ MOVV m_g0(R4), g ++ JAL runtime·save_g(SB) ++ BNE g, R19, 2(PC) ++ JMP runtime·badmcall(SB) ++ MOVV fn+0(FP), REGCTXT // context ++ MOVV 0(REGCTXT), R5 // code pointer ++ MOVV (g_sched+gobuf_sp)(g), R3 // sp = m->g0->sched.sp ++ ADDV $-16, R3 ++ MOVV R19, 8(R3) ++ MOVV R0, 0(R3) ++ JAL (R5) ++ JMP runtime·badmcall2(SB) ++ ++// systemstack_switch is a dummy routine that systemstack leaves at the bottom ++// of the G stack. We need to distinguish the routine that ++// lives at the bottom of the G stack from the one that lives ++// at the top of the system stack because the one at the top of ++// the system stack terminates the stack walk (see topofstack()). ++TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 ++ UNDEF ++ JAL (R1) // make sure this function is not leaf ++ RET ++ ++// func systemstack(fn func()) ++TEXT runtime·systemstack(SB), NOSPLIT, $0-8 ++ MOVV fn+0(FP), R19 // R19 = fn ++ MOVV R19, REGCTXT // context ++ MOVV g_m(g), R4 // R4 = m ++ ++ MOVV m_gsignal(R4), R5 // R5 = gsignal ++ BEQ g, R5, noswitch ++ ++ MOVV m_g0(R4), R5 // R5 = g0 ++ BEQ g, R5, noswitch ++ ++ MOVV m_curg(R4), R6 ++ BEQ g, R6, switch ++ ++ // Bad: g is not gsignal, not g0, not curg. What is it? ++ // Hide call from linker nosplit analysis. ++ MOVV $runtime·badsystemstack(SB), R7 ++ JAL (R7) ++ JAL runtime·abort(SB) ++ ++switch: ++ // save our state in g->sched. Pretend to ++ // be systemstack_switch if the G stack is scanned. ++ JAL gosave_systemstack_switch<>(SB) ++ ++ // switch to g0 ++ MOVV R5, g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R19 ++ // make it look like mstart called systemstack on g0, to stop traceback ++ ADDV $-8, R19 ++ MOVV $runtime·mstart(SB), R6 ++ MOVV R6, 0(R19) ++ MOVV R19, R3 ++ ++ // call target function ++ MOVV 0(REGCTXT), R6 // code pointer ++ JAL (R6) ++ ++ // switch back to g ++ MOVV g_m(g), R4 ++ MOVV m_curg(R4), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ MOVV R0, (g_sched+gobuf_sp)(g) ++ RET ++ ++noswitch: ++ // already on m stack, just call directly ++ // Using a tail call here cleans up tracebacks since we won't stop ++ // at an intermediate systemstack. ++ MOVV 0(REGCTXT), R4 // code pointer ++ MOVV 0(R3), R1 // restore LR ++ ADDV $8, R3 ++ JMP (R4) ++ ++/* ++ * support for morestack ++ */ ++ ++// Called during function prolog when more stack is needed. ++// Caller has already loaded: ++// loong64: R5: LR ++// ++// The traceback routines see morestack on a g0 as being ++// the top of a stack (for example, morestack calling newstack ++// calling the scheduler calling newm calling gc), so we must ++// record an argument size. For that purpose, it has no arguments. ++TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 ++ // Cannot grow scheduler stack (m->g0). ++ MOVV g_m(g), R7 ++ MOVV m_g0(R7), R8 ++ BNE g, R8, 3(PC) ++ JAL runtime·badmorestackg0(SB) ++ JAL runtime·abort(SB) ++ ++ // Cannot grow signal stack (m->gsignal). ++ MOVV m_gsignal(R7), R8 ++ BNE g, R8, 3(PC) ++ JAL runtime·badmorestackgsignal(SB) ++ JAL runtime·abort(SB) ++ ++ // Called from f. ++ // Set g->sched to context in f. ++ MOVV R3, (g_sched+gobuf_sp)(g) ++ MOVV R1, (g_sched+gobuf_pc)(g) ++ MOVV R5, (g_sched+gobuf_lr)(g) ++ MOVV REGCTXT, (g_sched+gobuf_ctxt)(g) ++ ++ // Called from f. ++ // Set m->morebuf to f's caller. ++ MOVV R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC ++ MOVV R3, (m_morebuf+gobuf_sp)(R7) // f's caller's SP ++ MOVV g, (m_morebuf+gobuf_g)(R7) ++ ++ // Call newstack on m->g0's stack. ++ MOVV m_g0(R7), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ // Create a stack frame on g0 to call newstack. ++ MOVV R0, -8(R3) // Zero saved LR in frame ++ ADDV $-8, R3 ++ JAL runtime·newstack(SB) ++ ++ // Not reached, but make sure the return PC from the call to newstack ++ // is still in this function, and not the beginning of the next. ++ UNDEF ++ ++TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV R0, REGCTXT ++ JMP runtime·morestack(SB) ++ ++// reflectcall: call a function with the given argument list ++// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). ++// we don't have variable-sized frames, so we use a small number ++// of constant-sized-frame functions to encode a few bits of size in the pc. ++// Caution: ugly multiline assembly macros in your future! ++ ++#define DISPATCH(NAME,MAXSIZE) \ ++ MOVV $MAXSIZE, R30; \ ++ SGTU R19, R30, R30; \ ++ BNE R30, 3(PC); \ ++ MOVV $NAME(SB), R4; \ ++ JMP (R4) ++// Note: can't just "BR NAME(SB)" - bad inlining results. ++ ++TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 ++ MOVWU stackArgsSize+24(FP), R19 ++ DISPATCH(runtime·call32, 32) ++ DISPATCH(runtime·call64, 64) ++ DISPATCH(runtime·call128, 128) ++ DISPATCH(runtime·call256, 256) ++ DISPATCH(runtime·call512, 512) ++ DISPATCH(runtime·call1024, 1024) ++ DISPATCH(runtime·call2048, 2048) ++ DISPATCH(runtime·call4096, 4096) ++ DISPATCH(runtime·call8192, 8192) ++ DISPATCH(runtime·call16384, 16384) ++ DISPATCH(runtime·call32768, 32768) ++ DISPATCH(runtime·call65536, 65536) ++ DISPATCH(runtime·call131072, 131072) ++ DISPATCH(runtime·call262144, 262144) ++ DISPATCH(runtime·call524288, 524288) ++ DISPATCH(runtime·call1048576, 1048576) ++ DISPATCH(runtime·call2097152, 2097152) ++ DISPATCH(runtime·call4194304, 4194304) ++ DISPATCH(runtime·call8388608, 8388608) ++ DISPATCH(runtime·call16777216, 16777216) ++ DISPATCH(runtime·call33554432, 33554432) ++ DISPATCH(runtime·call67108864, 67108864) ++ DISPATCH(runtime·call134217728, 134217728) ++ DISPATCH(runtime·call268435456, 268435456) ++ DISPATCH(runtime·call536870912, 536870912) ++ DISPATCH(runtime·call1073741824, 1073741824) ++ MOVV $runtime·badreflectcall(SB), R4 ++ JMP (R4) ++ ++#define CALLFN(NAME,MAXSIZE) \ ++TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ ++ NO_LOCAL_POINTERS; \ ++ /* copy arguments to stack */ \ ++ MOVV arg+16(FP), R4; \ ++ MOVWU argsize+24(FP), R5; \ ++ MOVV R3, R12; \ ++ ADDV $8, R12; \ ++ ADDV R12, R5; \ ++ BEQ R12, R5, 6(PC); \ ++ MOVBU (R4), R6; \ ++ ADDV $1, R4; \ ++ MOVBU R6, (R12); \ ++ ADDV $1, R12; \ ++ JMP -5(PC); \ ++ /* call function */ \ ++ MOVV f+8(FP), REGCTXT; \ ++ MOVV (REGCTXT), R6; \ ++ PCDATA $PCDATA_StackMapIndex, $0; \ ++ JAL (R6); \ ++ /* copy return values back */ \ ++ MOVV argtype+0(FP), R7; \ ++ MOVV arg+16(FP), R4; \ ++ MOVWU n+24(FP), R5; \ ++ MOVWU retoffset+28(FP), R6; \ ++ ADDV $8, R3, R12; \ ++ ADDV R6, R12; \ ++ ADDV R6, R4; \ ++ SUBVU R6, R5; \ ++ JAL callRet<>(SB); \ ++ RET ++ ++// callRet copies return values back at the end of call*. This is a ++// separate function so it can allocate stack space for the arguments ++// to reflectcallmove. It does not follow the Go ABI; it expects its ++// arguments in registers. ++TEXT callRet<>(SB), NOSPLIT, $32-0 ++ MOVV R7, 8(R3) ++ MOVV R4, 16(R3) ++ MOVV R12, 24(R3) ++ MOVV R5, 32(R3) ++ JAL runtime·reflectcallmove(SB) ++ RET ++ ++CALLFN(·call16, 16) ++CALLFN(·call32, 32) ++CALLFN(·call64, 64) ++CALLFN(·call128, 128) ++CALLFN(·call256, 256) ++CALLFN(·call512, 512) ++CALLFN(·call1024, 1024) ++CALLFN(·call2048, 2048) ++CALLFN(·call4096, 4096) ++CALLFN(·call8192, 8192) ++CALLFN(·call16384, 16384) ++CALLFN(·call32768, 32768) ++CALLFN(·call65536, 65536) ++CALLFN(·call131072, 131072) ++CALLFN(·call262144, 262144) ++CALLFN(·call524288, 524288) ++CALLFN(·call1048576, 1048576) ++CALLFN(·call2097152, 2097152) ++CALLFN(·call4194304, 4194304) ++CALLFN(·call8388608, 8388608) ++CALLFN(·call16777216, 16777216) ++CALLFN(·call33554432, 33554432) ++CALLFN(·call67108864, 67108864) ++CALLFN(·call134217728, 134217728) ++CALLFN(·call268435456, 268435456) ++CALLFN(·call536870912, 536870912) ++CALLFN(·call1073741824, 1073741824) ++ ++TEXT runtime·procyield(SB),NOSPLIT,$0-0 ++ RET ++ ++// Save state of caller into g->sched. ++// but using fake PC from systemstack_switch. ++// Must only be called from functions with no locals ($0) ++// or else unwinding from systemstack_switch is incorrect. ++// Smashes R19. ++TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $runtime·systemstack_switch(SB), R19 ++ ADDV $8, R19 ++ MOVV R19, (g_sched+gobuf_pc)(g) ++ MOVV R3, (g_sched+gobuf_sp)(g) ++ MOVV R0, (g_sched+gobuf_lr)(g) ++ MOVV R0, (g_sched+gobuf_ret)(g) ++ // Assert ctxt is zero. See func save. ++ MOVV (g_sched+gobuf_ctxt)(g), R19 ++ BEQ R19, 2(PC) ++ JAL runtime·abort(SB) ++ RET ++ ++// func asmcgocall(fn, arg unsafe.Pointer) int32 ++// Call fn(arg) on the scheduler stack, ++// aligned appropriately for the gcc ABI. ++// See cgocall.go for more details. ++TEXT ·asmcgocall(SB),NOSPLIT,$0-20 ++ MOVV fn+0(FP), R25 ++ MOVV arg+8(FP), R4 ++ ++ MOVV R3, R12 // save original stack pointer ++ MOVV g, R13 ++ ++ // Figure out if we need to switch to m->g0 stack. ++ // We get called to create new OS threads too, and those ++ // come in on the m->g0 stack already. ++ MOVV g_m(g), R5 ++ MOVV m_gsignal(R5), R6 ++ BEQ R6, g, g0 ++ MOVV m_g0(R5), R6 ++ BEQ R6, g, g0 ++ ++ JAL gosave_systemstack_switch<>(SB) ++ MOVV R6, g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ ++ // Now on a scheduling stack (a pthread-created stack). ++g0: ++ // Save room for two of our pointers. ++ ADDV $-16, R3 ++ MOVV R13, 0(R3) // save old g on stack ++ MOVV (g_stack+stack_hi)(R13), R13 ++ SUBVU R12, R13 ++ MOVV R13, 8(R3) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) ++ JAL (R25) ++ ++ // Restore g, stack pointer. R4 is return value. ++ MOVV 0(R3), g ++ JAL runtime·save_g(SB) ++ MOVV (g_stack+stack_hi)(g), R5 ++ MOVV 8(R3), R6 ++ SUBVU R6, R5 ++ MOVV R5, R3 ++ ++ MOVW R4, ret+16(FP) ++ RET ++ ++// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) ++// See cgocall.go for more details. ++TEXT ·cgocallback(SB),NOSPLIT,$24-24 ++ NO_LOCAL_POINTERS ++ ++ // Load m and g from thread-local storage. ++ MOVB runtime·iscgo(SB), R19 ++ BEQ R19, nocgo ++ JAL runtime·load_g(SB) ++nocgo: ++ ++ // If g is nil, Go did not create the current thread. ++ // Call needm to obtain one for temporary use. ++ // In this case, we're running on the thread stack, so there's ++ // lots of space, but the linker doesn't know. Hide the call from ++ // the linker analysis by using an indirect call. ++ BEQ g, needm ++ ++ MOVV g_m(g), R12 ++ MOVV R12, savedm-8(SP) ++ JMP havem ++ ++needm: ++ MOVV g, savedm-8(SP) // g is zero, so is m. ++ MOVV $runtime·needm(SB), R4 ++ JAL (R4) ++ ++ // Set m->sched.sp = SP, so that if a panic happens ++ // during the function we are about to execute, it will ++ // have a valid SP to run on the g0 stack. ++ // The next few lines (after the havem label) ++ // will save this SP onto the stack and then write ++ // the same SP back to m->sched.sp. That seems redundant, ++ // but if an unrecovered panic happens, unwindm will ++ // restore the g->sched.sp from the stack location ++ // and then systemstack will try to use it. If we don't set it here, ++ // that restored SP will be uninitialized (typically 0) and ++ // will not be usable. ++ MOVV g_m(g), R12 ++ MOVV m_g0(R12), R19 ++ MOVV R3, (g_sched+gobuf_sp)(R19) ++ ++havem: ++ // Now there's a valid m, and we're running on its m->g0. ++ // Save current m->g0->sched.sp on stack and then set it to SP. ++ // Save current sp in m->g0->sched.sp in preparation for ++ // switch back to m->curg stack. ++ // NOTE: unwindm knows that the saved g->sched.sp is at 8(R29) aka savedsp-16(SP). ++ MOVV m_g0(R12), R19 ++ MOVV (g_sched+gobuf_sp)(R19), R13 ++ MOVV R13, savedsp-24(SP) // must match frame size ++ MOVV R3, (g_sched+gobuf_sp)(R19) ++ ++ // Switch to m->curg stack and call runtime.cgocallbackg. ++ // Because we are taking over the execution of m->curg ++ // but *not* resuming what had been running, we need to ++ // save that information (m->curg->sched) so we can restore it. ++ // We can restore m->curg->sched.sp easily, because calling ++ // runtime.cgocallbackg leaves SP unchanged upon return. ++ // To save m->curg->sched.pc, we push it onto the stack. ++ // This has the added benefit that it looks to the traceback ++ // routine like cgocallbackg is going to return to that ++ // PC (because the frame we allocate below has the same ++ // size as cgocallback_gofunc's frame declared above) ++ // so that the traceback will seamlessly trace back into ++ // the earlier calls. ++ MOVV m_curg(R12), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R13 // prepare stack as R13 ++ MOVV (g_sched+gobuf_pc)(g), R4 ++ MOVV R4, -(24+8)(R13) // "saved LR"; must match frame size ++ MOVV fn+0(FP), R5 ++ MOVV frame+8(FP), R6 ++ MOVV ctxt+16(FP), R7 ++ MOVV $-(24+8)(R13), R3 ++ MOVV R5, 8(R3) ++ MOVV R6, 16(R3) ++ MOVV R7, 24(R3) ++ JAL runtime·cgocallbackg(SB) ++ ++ // Restore g->sched (== m->curg->sched) from saved values. ++ MOVV 0(R3), R4 ++ MOVV R4, (g_sched+gobuf_pc)(g) ++ MOVV $(24+8)(R3), R13 // must match frame size ++ MOVV R13, (g_sched+gobuf_sp)(g) ++ ++ // Switch back to m->g0's stack and restore m->g0->sched.sp. ++ // (Unlike m->curg, the g0 goroutine never uses sched.pc, ++ // so we do not have to restore it.) ++ MOVV g_m(g), R12 ++ MOVV m_g0(R12), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ MOVV savedsp-24(SP), R13 // must match frame size ++ MOVV R13, (g_sched+gobuf_sp)(g) ++ ++ // If the m on entry was nil, we called needm above to borrow an m ++ // for the duration of the call. Since the call is over, return it with dropm. ++ MOVV savedm-8(SP), R12 ++ BNE R12, droppedm ++ MOVV $runtime·dropm(SB), R4 ++ JAL (R4) ++droppedm: ++ ++ // Done! ++ RET ++ ++// void setg(G*); set g. for use by needm. ++TEXT runtime·setg(SB), NOSPLIT, $0-8 ++ MOVV gg+0(FP), g ++ // This only happens if iscgo, so jump straight to save_g ++ JAL runtime·save_g(SB) ++ RET ++ ++// void setg_gcc(G*); set g called from gcc with g in R19 ++TEXT setg_gcc<>(SB),NOSPLIT,$0-0 ++ MOVV R19, g ++ JAL runtime·save_g(SB) ++ RET ++ ++TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVW (R0), R0 ++ UNDEF ++ ++// AES hashing not implemented for loong64 ++TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 ++ JMP runtime·memhashFallback(SB) ++TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 ++ JMP runtime·strhashFallback(SB) ++TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 ++ JMP runtime·memhash32Fallback(SB) ++TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 ++ JMP runtime·memhash64Fallback(SB) ++ ++TEXT runtime·return0(SB), NOSPLIT, $0 ++ MOVW $0, R19 ++ RET ++ ++// Called from cgo wrappers, this function returns g->m->curg.stack.hi. ++// Must obey the gcc calling convention. ++TEXT _cgo_topofstack(SB),NOSPLIT,$16 ++ // g (R22) and REGTMP (R30) might be clobbered by load_g. They ++ // are callee-save in the gcc calling convention, so save them. ++ MOVV R30, savedREGTMP-16(SP) ++ MOVV g, savedG-8(SP) ++ ++ JAL runtime·load_g(SB) ++ MOVV g_m(g), R19 ++ MOVV m_curg(R19), R19 ++ MOVV (g_stack+stack_hi)(R19), R4 // return value in R4 ++ ++ MOVV savedG-8(SP), g ++ MOVV savedREGTMP-16(SP), R30 ++ RET ++ ++// The top-most function running on a goroutine ++// returns to goexit+PCQuantum. ++TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 ++ NOR R0, R0 // NOP ++ JAL runtime·goexit1(SB) // does not return ++ // traceback from goexit1 must hit code range of goexit ++ NOR R0, R0 // NOP ++ ++TEXT ·checkASM(SB),NOSPLIT,$0-1 ++ MOVW $1, R19 ++ MOVB R19, ret+0(FP) ++ RET ++ ++// gcWriteBarrier performs a heap pointer write and informs the GC. ++// ++// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments: ++// - R27 is the destination of the write ++// - R28 is the value being written at R27. ++// It clobbers R30 (the linker temp register). ++// The act of CALLing gcWriteBarrier will clobber R1 (LR). ++// It does not clobber any other general-purpose registers, ++// but may clobber others (e.g., floating point registers). ++TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$216 ++ // Save the registers clobbered by the fast path. ++ MOVV R19, 208(R3) ++ MOVV R13, 216(R3) ++ MOVV g_m(g), R19 ++ MOVV m_p(R19), R19 ++ MOVV (p_wbBuf+wbBuf_next)(R19), R13 ++ // Increment wbBuf.next position. ++ ADDV $16, R13 ++ MOVV R13, (p_wbBuf+wbBuf_next)(R19) ++ MOVV (p_wbBuf+wbBuf_end)(R19), R19 ++ MOVV R19, R30 // R30 is linker temp register ++ // Record the write. ++ MOVV R28, -16(R13) // Record value ++ MOVV (R27), R19 // TODO: This turns bad writes into bad reads. ++ MOVV R19, -8(R13) // Record *slot ++ // Is the buffer full? ++ BEQ R13, R30, flush ++ret: ++ MOVV 208(R3), R19 ++ MOVV 216(R3), R13 ++ // Do the write. ++ MOVV R28, (R27) ++ RET ++ ++flush: ++ // Save all general purpose registers since these could be ++ // clobbered by wbBufFlush and were not saved by the caller. ++ MOVV R27, 8(R3) // Also first argument to wbBufFlush ++ MOVV R28, 16(R3) // Also second argument to wbBufFlush ++ // R1 is LR, which was saved by the prologue. ++ MOVV R2, 24(R3) ++ // R3 is SP. ++ MOVV R4, 32(R3) ++ MOVV R5, 40(R3) ++ MOVV R6, 48(R3) ++ MOVV R7, 56(R3) ++ MOVV R8, 64(R3) ++ MOVV R9, 72(R3) ++ MOVV R10, 80(R3) ++ MOVV R11, 88(R3) ++ MOVV R12, 96(R3) ++ // R13 already saved ++ MOVV R14, 104(R3) ++ MOVV R15, 112(R3) ++ MOVV R16, 120(R3) ++ MOVV R17, 128(R3) ++ MOVV R18, 136(R3) ++ // R19 already saved ++ MOVV R20, 144(R3) ++ MOVV R21, 152(R3) ++ // R22 is g. ++ MOVV R23, 160(R3) ++ MOVV R24, 168(R3) ++ MOVV R25, 176(R3) ++ MOVV R26, 184(R3) ++ // R27 already saved ++ // R28 already saved. ++ MOVV R29, 192(R3) ++ // R30 is tmp register. ++ MOVV R31, 200(R3) ++ ++ ++ // This takes arguments R27 and R28. ++ CALL runtime·wbBufFlush(SB) ++ ++ MOVV 8(R3), R27 ++ MOVV 16(R3), R28 ++ MOVV 24(R3), R2 ++ MOVV 32(R3), R4 ++ MOVV 40(R3), R5 ++ MOVV 48(R3), R6 ++ MOVV 56(R3), R7 ++ MOVV 64(R3), R8 ++ MOVV 72(R3), R9 ++ MOVV 80(R3), R10 ++ MOVV 88(R3), R11 ++ MOVV 96(R3), R12 ++ MOVV 104(R3), R14 ++ MOVV 112(R3), R15 ++ MOVV 120(R3), R16 ++ MOVV 128(R3), R17 ++ MOVV 136(R3), R18 ++ MOVV 144(R3), R20 ++ MOVV 152(R3), R21 ++ MOVV 160(R3), R23 ++ MOVV 168(R3), R24 ++ MOVV 176(R3), R25 ++ MOVV 184(R3), R26 ++ MOVV 192(R3), R29 ++ MOVV 200(R3), R31 ++ JMP ret ++ ++// Note: these functions use a special calling convention to save generated code space. ++// Arguments are passed in registers, but the space for those arguments are allocated ++// in the caller's stack frame. These stubs write the args into that stack space and ++// then tail call to the corresponding runtime handler. ++// The tail call makes these stubs disappear in backtraces. ++TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicIndex(SB) ++TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicIndexU(SB) ++TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAlen(SB) ++TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAlenU(SB) ++TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAcap(SB) ++TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAcapU(SB) ++TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSliceB(SB) ++TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSliceBU(SB) ++TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3Alen(SB) ++TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3AlenU(SB) ++TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3Acap(SB) ++TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3AcapU(SB) ++TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSlice3B(SB) ++TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSlice3BU(SB) ++TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSlice3C(SB) ++TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSlice3CU(SB) ++TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSliceConvert(SB) +diff --git a/src/runtime/atomic_loong64.s b/src/runtime/atomic_loong64.s +new file mode 100644 +index 0000000000..b661ec2534 +--- /dev/null ++++ b/src/runtime/atomic_loong64.s +@@ -0,0 +1,14 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++// DBAR sync load/store operation ++#define DBAR WORD $0x38720000 ++ ++TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 ++ DBAR ++ RET +diff --git a/src/runtime/cputicks.go b/src/runtime/cputicks.go +index 7c926f4a2b..79ddcdc8d6 100644 +--- a/src/runtime/cputicks.go ++++ b/src/runtime/cputicks.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !arm && !arm64 && !mips64 && !mips64le && !mips && !mipsle && !wasm +-// +build !arm,!arm64,!mips64,!mips64le,!mips,!mipsle,!wasm ++//go:build !arm && !arm64 && !loong64 && !mips64 && !mips64le && !mips && !mipsle && !wasm ++// +build !arm,!arm64,!loong64,!mips64,!mips64le,!mips,!mipsle,!wasm + + package runtime + +diff --git a/src/runtime/os_linux_loong64.go b/src/runtime/os_linux_loong64.go +new file mode 100644 +index 0000000000..e9a8728445 +--- /dev/null ++++ b/src/runtime/os_linux_loong64.go +@@ -0,0 +1,19 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++func archauxv(tag, val uintptr) {} ++ ++func osArchInit() {} ++ ++//go:nosplit ++func cputicks() int64 { ++ // Currently cputicks() is used in blocking profiler and to seed fastrand(). ++ // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. ++ return nanotime() ++} +diff --git a/src/runtime/os_linux_noauxv.go b/src/runtime/os_linux_noauxv.go +index 59b5aacaeb..22833e4c22 100644 +--- a/src/runtime/os_linux_noauxv.go ++++ b/src/runtime/os_linux_noauxv.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && !arm && !arm64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && !ppc64le +-// +build linux,!arm,!arm64,!mips,!mipsle,!mips64,!mips64le,!s390x,!ppc64,!ppc64le ++//go:build linux && !arm && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && !ppc64le ++// +build linux,!arm,!arm64,!loong64,!mips,!mipsle,!mips64,!mips64le,!s390x,!ppc64,!ppc64le + + package runtime + +diff --git a/src/runtime/rt0_linux_loong64.s b/src/runtime/rt0_linux_loong64.s +new file mode 100644 +index 0000000000..840c8b134e +--- /dev/null ++++ b/src/runtime/rt0_linux_loong64.s +@@ -0,0 +1,27 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build linux ++// +build loong64 ++ ++#include "textflag.h" ++ ++TEXT _rt0_loong64_linux(SB),NOSPLIT,$0 ++ JMP _main<>(SB) ++ ++TEXT _main<>(SB),NOSPLIT|NOFRAME,$0 ++ // In a statically linked binary, the stack contains argc, ++ // argv as argc string pointers followed by a NULL, envv as a ++ // sequence of string pointers followed by a NULL, and auxv. ++ // There is no TLS base pointer. ++ MOVW 0(R3), R4 // argc ++ ADDV $8, R3, R5 // argv ++ JMP main(SB) ++ ++TEXT main(SB),NOSPLIT|NOFRAME,$0 ++ // in external linking, glibc jumps to main with argc in R4 ++ // and argv in R5 ++ ++ MOVV $runtime·rt0_go(SB), R19 ++ JMP (R19) +diff --git a/src/runtime/sys_loong64.go b/src/runtime/sys_loong64.go +new file mode 100644 +index 0000000000..650843ee5f +--- /dev/null ++++ b/src/runtime/sys_loong64.go +@@ -0,0 +1,21 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package runtime ++ ++import "unsafe" ++ ++// adjust Gobuf as if it executed a call to fn with context ctxt ++// and then did an immediate Gosave. ++func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) { ++ if buf.lr != 0 { ++ throw("invalid use of gostartcall") ++ } ++ buf.lr = buf.pc ++ buf.pc = uintptr(fn) ++ buf.ctxt = ctxt ++} +-- +2.38.0 + diff --git a/loongarch64/0018-runtime-load-save-TLS-variable-g-on-loong64.patch b/loongarch64/0018-runtime-load-save-TLS-variable-g-on-loong64.patch new file mode 100644 index 0000000..c834259 --- /dev/null +++ b/loongarch64/0018-runtime-load-save-TLS-variable-g-on-loong64.patch @@ -0,0 +1,68 @@ +From e56e1f86831844abd14808ed6fb6b506c38669bc Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 15:48:53 +0800 +Subject: [PATCH 18/82] runtime: load/save TLS variable g on loong64 + +Change-Id: I5e09759ce9201596e89a01fc4a6f7fd7e205449f +--- + src/runtime/stubs_loong64.go | 12 ++++++++++++ + src/runtime/tls_loong64.s | 28 ++++++++++++++++++++++++++++ + 2 files changed, 40 insertions(+) + create mode 100644 src/runtime/stubs_loong64.go + create mode 100644 src/runtime/tls_loong64.s + +diff --git a/src/runtime/stubs_loong64.go b/src/runtime/stubs_loong64.go +new file mode 100644 +index 0000000000..39e752067e +--- /dev/null ++++ b/src/runtime/stubs_loong64.go +@@ -0,0 +1,12 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package runtime ++ ++// Called from assembly only; declared for go vet. ++func load_g() ++func save_g() +diff --git a/src/runtime/tls_loong64.s b/src/runtime/tls_loong64.s +new file mode 100644 +index 0000000000..30627d849b +--- /dev/null ++++ b/src/runtime/tls_loong64.s +@@ -0,0 +1,28 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "go_tls.h" ++#include "funcdata.h" ++#include "textflag.h" ++ ++// If !iscgo, this is a no-op. ++// ++// NOTE: mcall() assumes this clobbers only R30 (REGTMP). ++TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVB runtime·iscgo(SB), R30 ++ BEQ R30, nocgo ++ ++ MOVV g, runtime·tls_g(SB) ++ ++nocgo: ++ RET ++ ++TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV runtime·tls_g(SB), g ++ RET ++ ++GLOBL runtime·tls_g(SB), TLSBSS, $8 +-- +2.38.0 + diff --git a/loongarch64/0019-runtime-implement-signal-for-linux-loong64.patch b/loongarch64/0019-runtime-implement-signal-for-linux-loong64.patch new file mode 100644 index 0000000..3dc261e --- /dev/null +++ b/loongarch64/0019-runtime-implement-signal-for-linux-loong64.patch @@ -0,0 +1,454 @@ +From 21b1135606226badc77689aff94d6c14df8d7894 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:06:43 +0800 +Subject: [PATCH 19/82] runtime: implement signal for linux/loong64 + +Change-Id: Ifa0229d2044dd53683de4a2b3ab965b16263f267 +--- + src/runtime/defs_linux_loong64.go | 212 ++++++++++++++++++++++++++++ + src/runtime/export_linux_test.go | 3 + + src/runtime/os_linux.go | 5 + + src/runtime/signal_linux_loong64.go | 76 ++++++++++ + src/runtime/signal_loong64.go | 98 +++++++++++++ + 5 files changed, 394 insertions(+) + create mode 100644 src/runtime/defs_linux_loong64.go + create mode 100644 src/runtime/signal_linux_loong64.go + create mode 100644 src/runtime/signal_loong64.go + +diff --git a/src/runtime/defs_linux_loong64.go b/src/runtime/defs_linux_loong64.go +new file mode 100644 +index 0000000000..3e0fac0298 +--- /dev/null ++++ b/src/runtime/defs_linux_loong64.go +@@ -0,0 +1,212 @@ ++// Generated using cgo, then manually converted into appropriate naming and code ++// for the Go runtime. ++// go tool cgo -godefs defs_linux.go defs1_linux.go defs2_linux.go ++ ++package runtime ++ ++import "unsafe" ++ ++const ( ++ _EINTR = 0x4 ++ _EAGAIN = 0xb ++ _ENOMEM = 0xc ++ _ENOSYS = 0x26 ++ ++ _PROT_NONE = 0x0 ++ _PROT_READ = 0x1 ++ _PROT_WRITE = 0x2 ++ _PROT_EXEC = 0x4 ++ ++ _MAP_ANON = 0x20 ++ _MAP_PRIVATE = 0x2 ++ _MAP_FIXED = 0x10 ++ ++ _MADV_DONTNEED = 0x4 ++ _MADV_FREE = 0x8 ++ _MADV_HUGEPAGE = 0xe ++ _MADV_NOHUGEPAGE = 0xf ++ ++ _SA_RESTART = 0x10000000 ++ _SA_ONSTACK = 0x8000000 ++ _SA_SIGINFO = 0x4 ++ _SA_RESTORER = 0x0 ++ ++ _SI_KERNEL = 0x80 ++ _SI_TIMER = -0x2 ++ ++ _SIGHUP = 0x1 ++ _SIGINT = 0x2 ++ _SIGQUIT = 0x3 ++ _SIGILL = 0x4 ++ _SIGTRAP = 0x5 ++ _SIGABRT = 0x6 ++ _SIGBUS = 0x7 ++ _SIGFPE = 0x8 ++ _SIGKILL = 0x9 ++ _SIGUSR1 = 0xa ++ _SIGSEGV = 0xb ++ _SIGUSR2 = 0xc ++ _SIGPIPE = 0xd ++ _SIGALRM = 0xe ++ _SIGSTKFLT = 0x10 ++ _SIGCHLD = 0x11 ++ _SIGCONT = 0x12 ++ _SIGSTOP = 0x13 ++ _SIGTSTP = 0x14 ++ _SIGTTIN = 0x15 ++ _SIGTTOU = 0x16 ++ _SIGURG = 0x17 ++ _SIGXCPU = 0x18 ++ _SIGXFSZ = 0x19 ++ _SIGVTALRM = 0x1a ++ _SIGPROF = 0x1b ++ _SIGWINCH = 0x1c ++ _SIGIO = 0x1d ++ _SIGPWR = 0x1e ++ _SIGSYS = 0x1f ++ ++ _SIGRTMIN = 0x20 ++ ++ _FPE_INTDIV = 0x1 ++ _FPE_INTOVF = 0x2 ++ _FPE_FLTDIV = 0x3 ++ _FPE_FLTOVF = 0x4 ++ _FPE_FLTUND = 0x5 ++ _FPE_FLTRES = 0x6 ++ _FPE_FLTINV = 0x7 ++ _FPE_FLTSUB = 0x8 ++ ++ _BUS_ADRALN = 0x1 ++ _BUS_ADRERR = 0x2 ++ _BUS_OBJERR = 0x3 ++ ++ _SEGV_MAPERR = 0x1 ++ _SEGV_ACCERR = 0x2 ++ ++ _ITIMER_REAL = 0x0 ++ _ITIMER_VIRTUAL = 0x1 ++ _ITIMER_PROF = 0x2 ++ ++ _CLOCK_THREAD_CPUTIME_ID = 0x3 ++ ++ _SIGEV_THREAD_ID = 0x4 ++ ++ _EPOLLIN = 0x1 ++ _EPOLLOUT = 0x4 ++ _EPOLLERR = 0x8 ++ _EPOLLHUP = 0x10 ++ _EPOLLRDHUP = 0x2000 ++ _EPOLLET = 0x80000000 ++ _EPOLL_CLOEXEC = 0x80000 ++ _EPOLL_CTL_ADD = 0x1 ++ _EPOLL_CTL_DEL = 0x2 ++ _EPOLL_CTL_MOD = 0x3 ++) ++ ++type timespec struct { ++ tv_sec int64 ++ tv_nsec int64 ++} ++ ++//go:nosplit ++func (ts *timespec) setNsec(ns int64) { ++ ts.tv_sec = ns / 1e9 ++ ts.tv_nsec = ns % 1e9 ++} ++ ++type timeval struct { ++ tv_sec int64 ++ tv_usec int64 ++} ++ ++func (tv *timeval) set_usec(x int32) { ++ tv.tv_usec = int64(x) ++} ++ ++type itimerspec struct { ++ it_interval timespec ++ it_value timespec ++} ++ ++type itimerval struct { ++ it_interval timeval ++ it_value timeval ++} ++ ++type sigeventFields struct { ++ value uintptr ++ signo int32 ++ notify int32 ++ // below here is a union; sigev_notify_thread_id is the only field we use ++ sigev_notify_thread_id int32 ++} ++ ++type sigevent struct { ++ sigeventFields ++ // Pad struct to the max size in the kernel. ++ _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte ++} ++ ++type epollevent struct { ++ events uint32 ++ pad_cgo_0 [4]byte ++ data [8]byte // unaligned uintptr ++} ++ ++const ( ++ _O_RDONLY = 0x0 ++ _O_NONBLOCK = 0x800 ++ _O_CLOEXEC = 0x80000 ++) ++ ++type sigactiont struct { ++ sa_handler uintptr ++ sa_flags uint64 ++ sa_mask uint64 ++ // Linux on loong64 does not have the sa_restorer field, but the setsig ++ // function references it (for x86). Not much harm to include it at the end. ++ sa_restorer uintptr ++} ++ ++type siginfoFields struct { ++ si_signo int32 ++ si_errno int32 ++ si_code int32 ++ __pad0 [1]int32 ++ // below here is a union; si_addr is the only field we use ++ si_addr uint64 ++} ++ ++type siginfo struct { ++ siginfoFields ++ // Pad struct to the max size in the kernel. ++ _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte ++} ++ ++type usigset struct { ++ val [16]uint64 ++} ++ ++type stackt struct { ++ ss_sp *byte ++ ss_flags int32 ++ pad_cgo_0 [4]byte ++ ss_size uintptr ++} ++ ++type sigcontext struct { ++ sc_pc uint64 ++ sc_regs [32]uint64 ++ sc_flags uint32 ++ sc_extcontext [0]uint64 ++} ++ ++type ucontext struct { ++ uc_flags uint64 ++ uc_link *ucontext ++ uc_stack stackt ++ uc_sigmask usigset ++ uc_x_unused [0]uint8 ++ uc_pad_cgo_0 [8]byte ++ uc_mcontext sigcontext ++} +diff --git a/src/runtime/export_linux_test.go b/src/runtime/export_linux_test.go +index b7c901f238..057f4b6918 100644 +--- a/src/runtime/export_linux_test.go ++++ b/src/runtime/export_linux_test.go +@@ -8,6 +8,9 @@ package runtime + + import "unsafe" + ++const SiginfoMaxSize = _si_max_size ++const SigeventMaxSize = _sigev_max_size ++ + var NewOSProc0 = newosproc0 + var Mincore = mincore + var Add = add +diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go +index c8b29e396c..34c3cee108 100644 +--- a/src/runtime/os_linux.go ++++ b/src/runtime/os_linux.go +@@ -454,6 +454,11 @@ func setsigstack(i uint32) { + sigaction(i, &sa, nil) + } + ++const ( ++ _si_max_size = 128 ++ _sigev_max_size = 64 ++) ++ + //go:nosplit + //go:nowritebarrierrec + func getsig(i uint32) uintptr { +diff --git a/src/runtime/signal_linux_loong64.go b/src/runtime/signal_linux_loong64.go +new file mode 100644 +index 0000000000..8f978c045b +--- /dev/null ++++ b/src/runtime/signal_linux_loong64.go +@@ -0,0 +1,76 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++import ( ++ "runtime/internal/sys" ++ "unsafe" ++) ++ ++type sigctxt struct { ++ info *siginfo ++ ctxt unsafe.Pointer ++} ++ ++//go:nosplit ++//go:nowritebarrierrec ++func (c *sigctxt) regs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext } ++ ++func (c *sigctxt) r0() uint64 { return c.regs().sc_regs[0] } ++func (c *sigctxt) r1() uint64 { return c.regs().sc_regs[1] } ++func (c *sigctxt) r2() uint64 { return c.regs().sc_regs[2] } ++func (c *sigctxt) r3() uint64 { return c.regs().sc_regs[3] } ++func (c *sigctxt) r4() uint64 { return c.regs().sc_regs[4] } ++func (c *sigctxt) r5() uint64 { return c.regs().sc_regs[5] } ++func (c *sigctxt) r6() uint64 { return c.regs().sc_regs[6] } ++func (c *sigctxt) r7() uint64 { return c.regs().sc_regs[7] } ++func (c *sigctxt) r8() uint64 { return c.regs().sc_regs[8] } ++func (c *sigctxt) r9() uint64 { return c.regs().sc_regs[9] } ++func (c *sigctxt) r10() uint64 { return c.regs().sc_regs[10] } ++func (c *sigctxt) r11() uint64 { return c.regs().sc_regs[11] } ++func (c *sigctxt) r12() uint64 { return c.regs().sc_regs[12] } ++func (c *sigctxt) r13() uint64 { return c.regs().sc_regs[13] } ++func (c *sigctxt) r14() uint64 { return c.regs().sc_regs[14] } ++func (c *sigctxt) r15() uint64 { return c.regs().sc_regs[15] } ++func (c *sigctxt) r16() uint64 { return c.regs().sc_regs[16] } ++func (c *sigctxt) r17() uint64 { return c.regs().sc_regs[17] } ++func (c *sigctxt) r18() uint64 { return c.regs().sc_regs[18] } ++func (c *sigctxt) r19() uint64 { return c.regs().sc_regs[19] } ++func (c *sigctxt) r20() uint64 { return c.regs().sc_regs[20] } ++func (c *sigctxt) r21() uint64 { return c.regs().sc_regs[21] } ++func (c *sigctxt) r22() uint64 { return c.regs().sc_regs[22] } ++func (c *sigctxt) r23() uint64 { return c.regs().sc_regs[23] } ++func (c *sigctxt) r24() uint64 { return c.regs().sc_regs[24] } ++func (c *sigctxt) r25() uint64 { return c.regs().sc_regs[25] } ++func (c *sigctxt) r26() uint64 { return c.regs().sc_regs[26] } ++func (c *sigctxt) r27() uint64 { return c.regs().sc_regs[27] } ++func (c *sigctxt) r28() uint64 { return c.regs().sc_regs[28] } ++func (c *sigctxt) r29() uint64 { return c.regs().sc_regs[29] } ++func (c *sigctxt) r30() uint64 { return c.regs().sc_regs[30] } ++func (c *sigctxt) r31() uint64 { return c.regs().sc_regs[31] } ++func (c *sigctxt) sp() uint64 { return c.regs().sc_regs[3] } ++ ++//go:nosplit ++//go:nowritebarrierrec ++func (c *sigctxt) pc() uint64 { return c.regs().sc_pc } ++ ++func (c *sigctxt) link() uint64 { return c.regs().sc_regs[1] } ++ ++func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) } ++func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr } ++ ++func (c *sigctxt) set_r31(x uint64) { c.regs().sc_regs[31] = x } ++func (c *sigctxt) set_r22(x uint64) { c.regs().sc_regs[22] = x } ++func (c *sigctxt) set_pc(x uint64) { c.regs().sc_pc = x } ++func (c *sigctxt) set_sp(x uint64) { c.regs().sc_regs[3] = x } ++func (c *sigctxt) set_link(x uint64) { c.regs().sc_regs[1] = x } ++ ++func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) } ++func (c *sigctxt) set_sigaddr(x uint64) { ++ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x) ++} +diff --git a/src/runtime/signal_loong64.go b/src/runtime/signal_loong64.go +new file mode 100644 +index 0000000000..e65ec58ebf +--- /dev/null ++++ b/src/runtime/signal_loong64.go +@@ -0,0 +1,98 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++import ( ++ "runtime/internal/sys" ++ "unsafe" ++) ++ ++func dumpregs(c *sigctxt) { ++ print("r0 ", hex(c.r0()), "\t") ++ print("r1 ", hex(c.r1()), "\n") ++ print("r2 ", hex(c.r2()), "\t") ++ print("r3 ", hex(c.r3()), "\n") ++ print("r4 ", hex(c.r4()), "\t") ++ print("r5 ", hex(c.r5()), "\n") ++ print("r6 ", hex(c.r6()), "\t") ++ print("r7 ", hex(c.r7()), "\n") ++ print("r8 ", hex(c.r8()), "\t") ++ print("r9 ", hex(c.r9()), "\n") ++ print("r10 ", hex(c.r10()), "\t") ++ print("r11 ", hex(c.r11()), "\n") ++ print("r12 ", hex(c.r12()), "\t") ++ print("r13 ", hex(c.r13()), "\n") ++ print("r14 ", hex(c.r14()), "\t") ++ print("r15 ", hex(c.r15()), "\n") ++ print("r16 ", hex(c.r16()), "\t") ++ print("r17 ", hex(c.r17()), "\n") ++ print("r18 ", hex(c.r18()), "\t") ++ print("r19 ", hex(c.r19()), "\n") ++ print("r20 ", hex(c.r20()), "\t") ++ print("r21 ", hex(c.r21()), "\n") ++ print("r22 ", hex(c.r22()), "\t") ++ print("r23 ", hex(c.r23()), "\n") ++ print("r24 ", hex(c.r24()), "\t") ++ print("r25 ", hex(c.r25()), "\n") ++ print("r26 ", hex(c.r26()), "\t") ++ print("r27 ", hex(c.r27()), "\n") ++ print("r28 ", hex(c.r28()), "\t") ++ print("r29 ", hex(c.r29()), "\n") ++ print("r30 ", hex(c.r30()), "\t") ++ print("r31 ", hex(c.r31()), "\n") ++ print("pc ", hex(c.pc()), "\t") ++ print("link ", hex(c.link()), "\n") ++} ++ ++//go:nosplit ++//go:nowritebarrierrec ++func (c *sigctxt) sigpc() uintptr { return uintptr(c.pc()) } ++ ++func (c *sigctxt) sigsp() uintptr { return uintptr(c.sp()) } ++func (c *sigctxt) siglr() uintptr { return uintptr(c.link()) } ++func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) } ++ ++// preparePanic sets up the stack to look like a call to sigpanic. ++func (c *sigctxt) preparePanic(sig uint32, gp *g) { ++ // We arrange link, and pc to pretend the panicking ++ // function calls sigpanic directly. ++ // Always save LINK to stack so that panics in leaf ++ // functions are correctly handled. This smashes ++ // the stack frame but we're not going back there ++ // anyway. ++ sp := c.sp() - sys.PtrSize ++ c.set_sp(sp) ++ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link() ++ ++ pc := gp.sigpc ++ ++ if shouldPushSigpanic(gp, pc, uintptr(c.link())) { ++ // Make it look the like faulting PC called sigpanic. ++ c.set_link(uint64(pc)) ++ } ++ ++ // In case we are panicking from external C code ++ sigpanicPC := uint64(funcPC(sigpanic)) ++ c.set_r31(sigpanicPC >> 32 << 32) // RSB register ++ c.set_r22(uint64(uintptr(unsafe.Pointer(gp)))) ++ c.set_pc(sigpanicPC) ++} ++ ++func (c *sigctxt) pushCall(targetPC, resumePC uintptr) { ++ // Push the LR to stack, as we'll clobber it in order to ++ // push the call. The function being pushed is responsible ++ // for restoring the LR and setting the SP back. ++ // This extra slot is known to gentraceback. ++ sp := c.sp() - 8 ++ c.set_sp(sp) ++ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link() ++ // Set up PC and LR to pretend the function being signaled ++ // calls targetPC at resumePC. ++ c.set_link(uint64(resumePC)) ++ c.set_pc(uint64(targetPC)) ++} +-- +2.38.0 + diff --git a/loongarch64/0020-runtime-support-vdso-for-linux-loong64.patch b/loongarch64/0020-runtime-support-vdso-for-linux-loong64.patch new file mode 100644 index 0000000..6bf330a --- /dev/null +++ b/loongarch64/0020-runtime-support-vdso-for-linux-loong64.patch @@ -0,0 +1,114 @@ +From 01837b2d7e1167f51bdc90111367627f389510b1 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:12:05 +0800 +Subject: [PATCH 20/82] runtime: support vdso for linux/loong64 + +Change-Id: Ie9bb5ccfc28e65036e2088c232bb333dcb259a60 +--- + src/runtime/os_linux_novdso.go | 4 ++-- + src/runtime/vdso_elf64.go | 4 ++-- + src/runtime/vdso_in_none.go | 4 ++-- + src/runtime/vdso_linux.go | 4 ++-- + src/runtime/vdso_linux_loong64.go | 28 ++++++++++++++++++++++++++++ + 5 files changed, 36 insertions(+), 8 deletions(-) + create mode 100644 src/runtime/vdso_linux_loong64.go + +diff --git a/src/runtime/os_linux_novdso.go b/src/runtime/os_linux_novdso.go +index 8104f63627..9783076d56 100644 +--- a/src/runtime/os_linux_novdso.go ++++ b/src/runtime/os_linux_novdso.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le +-// +build linux,!386,!amd64,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le ++//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le ++// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips64,!mips64le,!ppc64,!ppc64le + + package runtime + +diff --git a/src/runtime/vdso_elf64.go b/src/runtime/vdso_elf64.go +index 9923bd4697..c744f7b77f 100644 +--- a/src/runtime/vdso_elf64.go ++++ b/src/runtime/vdso_elf64.go +@@ -2,9 +2,9 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && (amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le) ++//go:build linux && (amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le) + // +build linux +-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le ++// +build amd64 arm64 loong64 mips64 mips64le ppc64 ppc64le + + package runtime + +diff --git a/src/runtime/vdso_in_none.go b/src/runtime/vdso_in_none.go +index c66fbf8216..28bf27ab9a 100644 +--- a/src/runtime/vdso_in_none.go ++++ b/src/runtime/vdso_in_none.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build (linux && !386 && !amd64 && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le) || !linux +-// +build linux,!386,!amd64,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le !linux ++//go:build (linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le) || !linux ++// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips64,!mips64le,!ppc64,!ppc64le !linux + + package runtime + +diff --git a/src/runtime/vdso_linux.go b/src/runtime/vdso_linux.go +index ae211f96b1..4f6381ff4c 100644 +--- a/src/runtime/vdso_linux.go ++++ b/src/runtime/vdso_linux.go +@@ -2,9 +2,9 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && (386 || amd64 || arm || arm64 || mips64 || mips64le || ppc64 || ppc64le) ++//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le) + // +build linux +-// +build 386 amd64 arm arm64 mips64 mips64le ppc64 ppc64le ++// +build 386 amd64 arm arm64 loong64 mips64 mips64le ppc64 ppc64le + + package runtime + +diff --git a/src/runtime/vdso_linux_loong64.go b/src/runtime/vdso_linux_loong64.go +new file mode 100644 +index 0000000000..e8afdd4f1e +--- /dev/null ++++ b/src/runtime/vdso_linux_loong64.go +@@ -0,0 +1,28 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++const ( ++ // vdsoArrayMax is the byte-size of a maximally sized array on this architecture. ++ // See cmd/compile/internal/loong64/galign.go arch.MAXWIDTH initialization. ++ vdsoArrayMax = 1<<50 - 1 ++) ++ ++// see man 7 vdso : loong64 ++var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6", 0x3ae75f6} ++ ++// The symbol name is not __kernel_clock_gettime as suggested by the manpage; ++// according to Linux source code it should be __vdso_clock_gettime instead. ++var vdsoSymbolKeys = []vdsoSymbolKey{ ++ {"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym}, ++} ++ ++// initialize to fall back to syscall ++var ( ++ vdsoClockgettimeSym uintptr = 0 ++) +-- +2.38.0 + diff --git a/loongarch64/0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch b/loongarch64/0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch new file mode 100644 index 0000000..c8c4f9b --- /dev/null +++ b/loongarch64/0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch @@ -0,0 +1,979 @@ +From 9d24c3e1699e423d6379e766e0e4f77e597af1d3 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:13:22 +0800 +Subject: [PATCH 21/82] runtime: implement duffzero/duffcopy for linux/loong64 + +Change-Id: Ida040e76dc8172f60e6aee1ea2b5bce13ab3581e +--- + src/runtime/duff_loong64.s | 909 +++++++++++++++++++++++++++++++++++++ + src/runtime/mkduff.go | 31 ++ + 2 files changed, 940 insertions(+) + create mode 100644 src/runtime/duff_loong64.s + +diff --git a/src/runtime/duff_loong64.s b/src/runtime/duff_loong64.s +new file mode 100644 +index 0000000000..f070916dce +--- /dev/null ++++ b/src/runtime/duff_loong64.s +@@ -0,0 +1,909 @@ ++// Code generated by mkduff.go; DO NOT EDIT. ++// Run go generate from src/runtime to update. ++// See mkduff.go for comments. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ RET ++ ++TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ RET +diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go +index da191cc594..f12d984e02 100644 +--- a/src/runtime/mkduff.go ++++ b/src/runtime/mkduff.go +@@ -37,6 +37,7 @@ func main() { + gen("386", notags, zero386, copy386) + gen("arm", notags, zeroARM, copyARM) + gen("arm64", notags, zeroARM64, copyARM64) ++ gen("loong64", tagsLOONG64, zeroLOONG64, copyLOONG64) + gen("ppc64x", tagsPPC64x, zeroPPC64x, copyPPC64x) + gen("mips64x", tagsMIPS64x, zeroMIPS64x, copyMIPS64x) + gen("riscv64", notags, zeroRISCV64, copyRISCV64) +@@ -177,6 +178,36 @@ func copyARM64(w io.Writer) { + fmt.Fprintln(w, "\tRET") + } + ++func tagsLOONG64(w io.Writer) { ++ fmt.Fprintln(w) ++ fmt.Fprintln(w, "// +build loong64") ++ fmt.Fprintln(w) ++} ++ ++func zeroLOONG64(w io.Writer) { ++ // R0: always zero ++ // R19 (aka REGRT1): ptr to memory to be zeroed - 8 ++ // On return, R19 points to the last zeroed dword. ++ fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") ++ for i := 0; i < 128; i++ { ++ fmt.Fprintln(w, "\tMOVV\tR0, 8(R19)") ++ fmt.Fprintln(w, "\tADDV\t$8, R19") ++ } ++ fmt.Fprintln(w, "\tRET") ++} ++ ++func copyLOONG64(w io.Writer) { ++ fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") ++ for i := 0; i < 128; i++ { ++ fmt.Fprintln(w, "\tMOVV\t(R19), R30") ++ fmt.Fprintln(w, "\tADDV\t$8, R19") ++ fmt.Fprintln(w, "\tMOVV\tR30, (R20)") ++ fmt.Fprintln(w, "\tADDV\t$8, R20") ++ fmt.Fprintln(w) ++ } ++ fmt.Fprintln(w, "\tRET") ++} ++ + func tagsPPC64x(w io.Writer) { + fmt.Fprintln(w) + fmt.Fprintln(w, "//go:build ppc64 || ppc64le") +-- +2.38.0 + diff --git a/loongarch64/0022-runtime-implement-asyncPreempt-for-linux-loong64.patch b/loongarch64/0022-runtime-implement-asyncPreempt-for-linux-loong64.patch new file mode 100644 index 0000000..94a1f5e --- /dev/null +++ b/loongarch64/0022-runtime-implement-asyncPreempt-for-linux-loong64.patch @@ -0,0 +1,231 @@ +From 3d66e789ed194e56b6dc1ae0b2000a2c85159c47 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:15:16 +0800 +Subject: [PATCH 22/82] runtime: implement asyncPreempt for linux/loong64 + +Change-Id: I7a64e38b15a99816bd74262c02f62dad021cc166 +--- + src/runtime/mkpreempt.go | 49 +++++++++++++ + src/runtime/preempt.go | 2 +- + src/runtime/preempt_loong64.s | 129 ++++++++++++++++++++++++++++++++++ + 3 files changed, 179 insertions(+), 1 deletion(-) + create mode 100644 src/runtime/preempt_loong64.s + +diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go +index 6c980540f5..1bad20d60b 100644 +--- a/src/runtime/mkpreempt.go ++++ b/src/runtime/mkpreempt.go +@@ -81,6 +81,7 @@ var arches = map[string]func(){ + "amd64": genAMD64, + "arm": genARM, + "arm64": genARM64, ++ "loong64": genLOONG64, + "mips64x": func() { genMIPS(true) }, + "mipsx": func() { genMIPS(false) }, + "ppc64x": genPPC64, +@@ -449,6 +450,54 @@ func genMIPS(_64bit bool) { + p("JMP (R23)") + } + ++func genLOONG64() { ++ mov := "MOVV" ++ movf := "MOVD" ++ add := "ADDV" ++ sub := "SUBV" ++ r31 := "RSB" ++ regsize := 8 ++ ++ // Add integer registers r4-r21 r23-r29 r31 ++ // R0 (zero), R30 (REGTMP), R2(tp),R3 (SP), R22 (g), R1 (LR) are special, ++ var l = layout{sp: "R3", stack: regsize} // add slot to save PC of interrupted instruction (in LR) ++ for i := 4; i <= 29; i++ { ++ if i == 22 { ++ continue //R3 is REGSP R22 is g ++ } ++ reg := fmt.Sprintf("R%d", i) ++ l.add(mov, reg, regsize) ++ } ++ l.add(mov, r31, regsize) ++ ++ // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant) ++ var lfp = layout{sp: "R3", stack: l.stack} ++ // lfp.addSpecial( ++ // mov+" FCR31, R1\n"+mov+" R1, %d(R29)", ++ // mov+" %d(R29), R1\n"+mov+" R1, FCR31", ++ // regsize) ++ // Add floating point registers F0-F31. ++ for i := 0; i <= 31; i++ { ++ reg := fmt.Sprintf("F%d", i) ++ lfp.add(movf, reg, regsize) ++ } ++ ++ // allocate frame, save PC of interrupted instruction (in LR) ++ p(mov+" R1, -%d(R3)", lfp.stack) ++ p(sub+" $%d, R3", lfp.stack) ++ ++ l.save() ++ lfp.save() ++ p("CALL ·asyncPreempt2(SB)") ++ lfp.restore() ++ l.restore() ++ ++ p(mov+" %d(R3), R1", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it ++ p(mov + " (R3), R30") // load PC to REGTMP ++ p(add+" $%d, R3", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) ++ p("JMP (R30)") ++} ++ + func genPPC64() { + // Add integer registers R3-R29 + // R0 (zero), R1 (SP), R30 (g) are special and not saved here. +diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go +index 1d5aae1363..92a05d227d 100644 +--- a/src/runtime/preempt.go ++++ b/src/runtime/preempt.go +@@ -386,7 +386,7 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) { + // Not Go code. + return false, 0 + } +- if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { ++ if (GOARCH == "loong64" || GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { + // We probably stopped at a half-executed CALL instruction, + // where the LR is updated but the PC has not. If we preempt + // here we'll see a seemingly self-recursive call, which is in +diff --git a/src/runtime/preempt_loong64.s b/src/runtime/preempt_loong64.s +new file mode 100644 +index 0000000000..363959eeb4 +--- /dev/null ++++ b/src/runtime/preempt_loong64.s +@@ -0,0 +1,129 @@ ++// Code generated by mkpreempt.go; DO NOT EDIT. ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV R1, -472(R3) ++ SUBV $472, R3 ++ MOVV R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ MOVV R7, 32(R3) ++ MOVV R8, 40(R3) ++ MOVV R9, 48(R3) ++ MOVV R10, 56(R3) ++ MOVV R11, 64(R3) ++ MOVV R12, 72(R3) ++ MOVV R13, 80(R3) ++ MOVV R14, 88(R3) ++ MOVV R15, 96(R3) ++ MOVV R16, 104(R3) ++ MOVV R17, 112(R3) ++ MOVV R18, 120(R3) ++ MOVV R19, 128(R3) ++ MOVV R20, 136(R3) ++ MOVV R21, 144(R3) ++ MOVV R23, 152(R3) ++ MOVV R24, 160(R3) ++ MOVV R25, 168(R3) ++ MOVV R26, 176(R3) ++ MOVV R27, 184(R3) ++ MOVV R28, 192(R3) ++ MOVV R29, 200(R3) ++ MOVV RSB, 208(R3) ++ MOVD F0, 216(R3) ++ MOVD F1, 224(R3) ++ MOVD F2, 232(R3) ++ MOVD F3, 240(R3) ++ MOVD F4, 248(R3) ++ MOVD F5, 256(R3) ++ MOVD F6, 264(R3) ++ MOVD F7, 272(R3) ++ MOVD F8, 280(R3) ++ MOVD F9, 288(R3) ++ MOVD F10, 296(R3) ++ MOVD F11, 304(R3) ++ MOVD F12, 312(R3) ++ MOVD F13, 320(R3) ++ MOVD F14, 328(R3) ++ MOVD F15, 336(R3) ++ MOVD F16, 344(R3) ++ MOVD F17, 352(R3) ++ MOVD F18, 360(R3) ++ MOVD F19, 368(R3) ++ MOVD F20, 376(R3) ++ MOVD F21, 384(R3) ++ MOVD F22, 392(R3) ++ MOVD F23, 400(R3) ++ MOVD F24, 408(R3) ++ MOVD F25, 416(R3) ++ MOVD F26, 424(R3) ++ MOVD F27, 432(R3) ++ MOVD F28, 440(R3) ++ MOVD F29, 448(R3) ++ MOVD F30, 456(R3) ++ MOVD F31, 464(R3) ++ CALL ·asyncPreempt2(SB) ++ MOVD 464(R3), F31 ++ MOVD 456(R3), F30 ++ MOVD 448(R3), F29 ++ MOVD 440(R3), F28 ++ MOVD 432(R3), F27 ++ MOVD 424(R3), F26 ++ MOVD 416(R3), F25 ++ MOVD 408(R3), F24 ++ MOVD 400(R3), F23 ++ MOVD 392(R3), F22 ++ MOVD 384(R3), F21 ++ MOVD 376(R3), F20 ++ MOVD 368(R3), F19 ++ MOVD 360(R3), F18 ++ MOVD 352(R3), F17 ++ MOVD 344(R3), F16 ++ MOVD 336(R3), F15 ++ MOVD 328(R3), F14 ++ MOVD 320(R3), F13 ++ MOVD 312(R3), F12 ++ MOVD 304(R3), F11 ++ MOVD 296(R3), F10 ++ MOVD 288(R3), F9 ++ MOVD 280(R3), F8 ++ MOVD 272(R3), F7 ++ MOVD 264(R3), F6 ++ MOVD 256(R3), F5 ++ MOVD 248(R3), F4 ++ MOVD 240(R3), F3 ++ MOVD 232(R3), F2 ++ MOVD 224(R3), F1 ++ MOVD 216(R3), F0 ++ MOVV 208(R3), RSB ++ MOVV 200(R3), R29 ++ MOVV 192(R3), R28 ++ MOVV 184(R3), R27 ++ MOVV 176(R3), R26 ++ MOVV 168(R3), R25 ++ MOVV 160(R3), R24 ++ MOVV 152(R3), R23 ++ MOVV 144(R3), R21 ++ MOVV 136(R3), R20 ++ MOVV 128(R3), R19 ++ MOVV 120(R3), R18 ++ MOVV 112(R3), R17 ++ MOVV 104(R3), R16 ++ MOVV 96(R3), R15 ++ MOVV 88(R3), R14 ++ MOVV 80(R3), R13 ++ MOVV 72(R3), R12 ++ MOVV 64(R3), R11 ++ MOVV 56(R3), R10 ++ MOVV 48(R3), R9 ++ MOVV 40(R3), R8 ++ MOVV 32(R3), R7 ++ MOVV 24(R3), R6 ++ MOVV 16(R3), R5 ++ MOVV 8(R3), R4 ++ MOVV 472(R3), R1 ++ MOVV (R3), R30 ++ ADDV $480, R3 ++ JMP (R30) +-- +2.38.0 + diff --git a/loongarch64/0023-runtime-support-memclr-memmove-for-linux-loong64.patch b/loongarch64/0023-runtime-support-memclr-memmove-for-linux-loong64.patch new file mode 100644 index 0000000..2d336b6 --- /dev/null +++ b/loongarch64/0023-runtime-support-memclr-memmove-for-linux-loong64.patch @@ -0,0 +1,178 @@ +From f46aec3b8089fcef6aff3ba87b8db45072d49d96 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:25:19 +0800 +Subject: [PATCH 23/82] runtime: support memclr/memmove for linux/loong64 + +Change-Id: I7c1f39670034db6714630d479bc41b6620ba2b1a +--- + src/runtime/memclr_loong64.s | 43 ++++++++++++++ + src/runtime/memmove_loong64.s | 107 ++++++++++++++++++++++++++++++++++ + 2 files changed, 150 insertions(+) + create mode 100644 src/runtime/memclr_loong64.s + create mode 100644 src/runtime/memmove_loong64.s + +diff --git a/src/runtime/memclr_loong64.s b/src/runtime/memclr_loong64.s +new file mode 100644 +index 0000000000..c486e848d3 +--- /dev/null ++++ b/src/runtime/memclr_loong64.s +@@ -0,0 +1,43 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) ++TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 ++ MOVV ptr+0(FP), R6 ++ MOVV n+8(FP), R7 ++ ADDV R6, R7, R4 ++ ++ // if less than 8 bytes, do one byte at a time ++ SGTU $8, R7, R8 ++ BNE R8, out ++ ++ // do one byte at a time until 8-aligned ++ AND $7, R6, R8 ++ BEQ R8, words ++ MOVB R0, (R6) ++ ADDV $1, R6 ++ JMP -4(PC) ++ ++words: ++ // do 8 bytes at a time if there is room ++ ADDV $-7, R4, R7 ++ ++ SGTU R7, R6, R8 ++ BEQ R8, out ++ MOVV R0, (R6) ++ ADDV $8, R6 ++ JMP -4(PC) ++ ++out: ++ BEQ R6, R4, done ++ MOVB R0, (R6) ++ ADDV $1, R6 ++ JMP -3(PC) ++done: ++ RET +diff --git a/src/runtime/memmove_loong64.s b/src/runtime/memmove_loong64.s +new file mode 100644 +index 0000000000..339e83b49e +--- /dev/null ++++ b/src/runtime/memmove_loong64.s +@@ -0,0 +1,107 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++// See memmove Go doc for important implementation constraints. ++ ++// func memmove(to, from unsafe.Pointer, n uintptr) ++TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 ++ MOVV to+0(FP), R4 ++ MOVV from+8(FP), R5 ++ MOVV n+16(FP), R6 ++ BNE R6, check ++ RET ++ ++check: ++ SGTU R4, R5, R7 ++ BNE R7, backward ++ ++ ADDV R4, R6, R9 // end pointer ++ ++ // if the two pointers are not of same alignments, do byte copying ++ SUBVU R5, R4, R7 ++ AND $7, R7 ++ BNE R7, out ++ ++ // if less than 8 bytes, do byte copying ++ SGTU $8, R6, R7 ++ BNE R7, out ++ ++ // do one byte at a time until 8-aligned ++ AND $7, R4, R8 ++ BEQ R8, words ++ MOVB (R5), R7 ++ ADDV $1, R5 ++ MOVB R7, (R4) ++ ADDV $1, R4 ++ JMP -6(PC) ++ ++words: ++ // do 8 bytes at a time if there is room ++ ADDV $-7, R9, R6 // R6 is end pointer-7 ++ ++ SGTU R6, R4, R8 ++ BEQ R8, out ++ MOVV (R5), R7 ++ ADDV $8, R5 ++ MOVV R7, (R4) ++ ADDV $8, R4 ++ JMP -6(PC) ++ ++out: ++ BEQ R4, R9, done ++ MOVB (R5), R7 ++ ADDV $1, R5 ++ MOVB R7, (R4) ++ ADDV $1, R4 ++ JMP -5(PC) ++done: ++ RET ++ ++backward: ++ ADDV R6, R5 // from-end pointer ++ ADDV R4, R6, R9 // to-end pointer ++ ++ // if the two pointers are not of same alignments, do byte copying ++ SUBVU R9, R5, R7 ++ AND $7, R7 ++ BNE R7, out1 ++ ++ // if less than 8 bytes, do byte copying ++ SGTU $8, R6, R7 ++ BNE R7, out1 ++ ++ // do one byte at a time until 8-aligned ++ AND $7, R9, R8 ++ BEQ R8, words1 ++ ADDV $-1, R5 ++ MOVB (R5), R7 ++ ADDV $-1, R9 ++ MOVB R7, (R9) ++ JMP -6(PC) ++ ++words1: ++ // do 8 bytes at a time if there is room ++ ADDV $7, R4, R6 // R6 is start pointer+7 ++ ++ SGTU R9, R6, R8 ++ BEQ R8, out1 ++ ADDV $-8, R5 ++ MOVV (R5), R7 ++ ADDV $-8, R9 ++ MOVV R7, (R9) ++ JMP -6(PC) ++ ++out1: ++ BEQ R4, R9, done1 ++ ADDV $-1, R5 ++ MOVB (R5), R7 ++ ADDV $-1, R9 ++ MOVB R7, (R9) ++ JMP -5(PC) ++done1: ++ RET +-- +2.38.0 + diff --git a/loongarch64/0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch b/loongarch64/0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch new file mode 100644 index 0000000..ebd9883 --- /dev/null +++ b/loongarch64/0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch @@ -0,0 +1,626 @@ +From c19bb517445a8f9c884b7c9d2f92ae46b6a5f145 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:27:29 +0800 +Subject: [PATCH 24/82] runtime: implement syscalls for runtime bootstrap on + linux/loong64 + +Change-Id: I848608267932717895d5cff9e33040029c3f3c4b +--- + src/runtime/sys_linux_loong64.s | 605 ++++++++++++++++++++++++++++++++ + 1 file changed, 605 insertions(+) + create mode 100644 src/runtime/sys_linux_loong64.s + +diff --git a/src/runtime/sys_linux_loong64.s b/src/runtime/sys_linux_loong64.s +new file mode 100644 +index 0000000000..1496d922c5 +--- /dev/null ++++ b/src/runtime/sys_linux_loong64.s +@@ -0,0 +1,605 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build linux ++// +build loong64 ++ ++// ++// System calls and other sys.stuff for loong64, Linux ++// ++ ++#include "go_asm.h" ++#include "go_tls.h" ++#include "textflag.h" ++ ++#define AT_FDCWD -100 ++ ++#define SYS_exit 93 ++#define SYS_read 63 ++#define SYS_write 64 ++#define SYS_close 57 ++#define SYS_getpid 172 ++#define SYS_kill 129 ++#define SYS_fcntl 25 ++#define SYS_mmap 222 ++#define SYS_munmap 215 ++#define SYS_setitimer 103 ++#define SYS_clone 220 ++#define SYS_nanosleep 101 ++#define SYS_sched_yield 124 ++#define SYS_rt_sigreturn 139 ++#define SYS_rt_sigaction 134 ++#define SYS_rt_sigprocmask 135 ++#define SYS_sigaltstack 132 ++#define SYS_madvise 233 ++#define SYS_mincore 232 ++#define SYS_gettid 178 ++#define SYS_futex 98 ++#define SYS_sched_getaffinity 123 ++#define SYS_exit_group 94 ++#define SYS_epoll_ctl 21 ++#define SYS_tgkill 131 ++#define SYS_openat 56 ++#define SYS_epoll_pwait 22 ++#define SYS_clock_gettime 113 ++#define SYS_epoll_create1 20 ++#define SYS_brk 214 ++#define SYS_pipe2 59 ++ ++TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4 ++ MOVW code+0(FP), R4 ++ MOVV $SYS_exit_group, R11 ++ SYSCALL ++ RET ++ ++// func exitThread(wait *uint32) ++TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8 ++ MOVV wait+0(FP), R19 ++ // We're done using the stack. ++ MOVW $0, R11 ++ DBAR ++ MOVW R11, (R19) ++ DBAR ++ MOVW $0, R4 // exit code ++ MOVV $SYS_exit, R11 ++ SYSCALL ++ JMP 0(PC) ++ ++TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20 ++ MOVW $AT_FDCWD, R4 // AT_FDCWD, so this acts like open ++ MOVV name+0(FP), R5 ++ MOVW mode+8(FP), R6 ++ MOVW perm+12(FP), R7 ++ MOVV $SYS_openat, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVW $-1, R4 ++ MOVW R4, ret+16(FP) ++ RET ++ ++TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12 ++ MOVW fd+0(FP), R4 ++ MOVV $SYS_close, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVW $-1, R4 ++ MOVW R4, ret+8(FP) ++ RET ++ ++TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVV fd+0(FP), R4 ++ MOVV p+8(FP), R5 ++ MOVW n+16(FP), R6 ++ MOVV $SYS_write, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVW fd+0(FP), R4 ++ MOVV p+8(FP), R5 ++ MOVW n+16(FP), R6 ++ MOVV $SYS_read, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// func pipe() (r, w int32, errno int32) ++TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12 ++ MOVV $r+0(FP), R4 ++ MOVV R0, R5 ++ MOVV $SYS_pipe2, R11 ++ SYSCALL ++ MOVW R4, errno+8(FP) ++ RET ++ ++// func pipe2(flags int32) (r, w int32, errno int32) ++TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20 ++ MOVV $r+8(FP), R4 ++ MOVW flags+0(FP), R5 ++ MOVV $SYS_pipe2, R11 ++ SYSCALL ++ MOVW R4, errno+16(FP) ++ RET ++ ++TEXT runtime·usleep(SB),NOSPLIT,$16-4 ++ MOVWU usec+0(FP), R6 ++ MOVV R6, R5 ++ MOVW $1000000, R4 ++ DIVVU R4, R6, R6 ++ MOVV R6, 8(R3) ++ MOVW $1000, R4 ++ MULVU R6, R4, R4 ++ SUBVU R4, R5 ++ MOVV R5, 16(R3) ++ ++ // nanosleep(&ts, 0) ++ ADDV $8, R3, R4 ++ MOVW $0, R5 ++ MOVV $SYS_nanosleep, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·gettid(SB),NOSPLIT,$0-4 ++ MOVV $SYS_gettid, R11 ++ SYSCALL ++ MOVW R4, ret+0(FP) ++ RET ++ ++TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $SYS_getpid, R11 ++ SYSCALL ++ MOVW R4, R23 ++ MOVV $SYS_gettid, R11 ++ SYSCALL ++ MOVW R4, R5 // arg 2 tid ++ MOVW R23, R4 // arg 1 pid ++ MOVW sig+0(FP), R6 // arg 3 ++ MOVV $SYS_tgkill, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $SYS_getpid, R11 ++ SYSCALL ++ //MOVW R4, R4 // arg 1 pid ++ MOVW sig+0(FP), R5 // arg 2 ++ MOVV $SYS_kill, R11 ++ SYSCALL ++ RET ++ ++TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8 ++ MOVV $SYS_getpid, R11 ++ SYSCALL ++ MOVV R4, ret+0(FP) ++ RET ++ ++TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24 ++ MOVV tgid+0(FP), R4 ++ MOVV tid+8(FP), R5 ++ MOVV sig+16(FP), R6 ++ MOVV $SYS_tgkill, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24 ++ MOVW mode+0(FP), R4 ++ MOVV new+8(FP), R5 ++ MOVV old+16(FP), R6 ++ MOVV $SYS_setitimer, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVV dst+16(FP), R6 ++ MOVV $SYS_mincore, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// func walltime() (sec int64, nsec int32) ++TEXT runtime·walltime(SB),NOSPLIT,$16-12 ++ MOVV R3, R23 // R23 is unchanged by C code ++ MOVV R3, R25 ++ ++ MOVV g_m(g), R24 // R24 = m ++ ++ // Set vdsoPC and vdsoSP for SIGPROF traceback. ++ // Save the old values on stack and restore them on exit, ++ // so this function is reentrant. ++ MOVV m_vdsoPC(R24), R11 ++ MOVV m_vdsoSP(R24), R7 ++ MOVV R11, 8(R3) ++ MOVV R7, 16(R3) ++ ++ MOVV $ret-8(FP), R11 // caller's SP ++ MOVV R1, m_vdsoPC(R24) ++ MOVV R11, m_vdsoSP(R24) ++ ++ MOVV m_curg(R24), R4 ++ MOVV g, R5 ++ BNE R4, R5, noswitch ++ ++ MOVV m_g0(R24), R4 ++ MOVV (g_sched+gobuf_sp)(R4), R25 // Set SP to g0 stack ++ ++noswitch: ++ SUBV $16, R25 ++ AND $~15, R25 // Align for C code ++ MOVV R25, R3 ++ ++ MOVW $0, R4 // CLOCK_REALTIME=0 ++ MOVV $0(R3), R5 ++ ++ MOVV runtime·vdsoClockgettimeSym(SB), R20 ++ BEQ R20, fallback ++ ++ JAL (R20) ++ ++finish: ++ MOVV 0(R3), R7 // sec ++ MOVV 8(R3), R5 // nsec ++ ++ MOVV R23, R3 // restore SP ++ // Restore vdsoPC, vdsoSP ++ // We don't worry about being signaled between the two stores. ++ // If we are not in a signal handler, we'll restore vdsoSP to 0, ++ // and no one will care about vdsoPC. If we are in a signal handler, ++ // we cannot receive another signal. ++ MOVV 16(R3), R25 ++ MOVV R25, m_vdsoSP(R24) ++ MOVV 8(R3), R25 ++ MOVV R25, m_vdsoPC(R24) ++ ++ MOVV R7, sec+0(FP) ++ MOVW R5, nsec+8(FP) ++ RET ++ ++fallback: ++ MOVV $SYS_clock_gettime, R11 ++ SYSCALL ++ JMP finish ++ ++TEXT runtime·nanotime1(SB),NOSPLIT,$16-8 ++ MOVV R3, R23 // R23 is unchanged by C code ++ MOVV R3, R25 ++ ++ MOVV g_m(g), R24 // R24 = m ++ ++ // Set vdsoPC and vdsoSP for SIGPROF traceback. ++ // Save the old values on stack and restore them on exit, ++ // so this function is reentrant. ++ MOVV m_vdsoPC(R24), R11 ++ MOVV m_vdsoSP(R24), R7 ++ MOVV R11, 8(R3) ++ MOVV R7, 16(R3) ++ ++ MOVV $ret-8(FP), R11 // caller's SP ++ MOVV R1, m_vdsoPC(R24) ++ MOVV R11, m_vdsoSP(R24) ++ ++ MOVV m_curg(R24), R4 ++ MOVV g, R5 ++ BNE R4, R5, noswitch ++ ++ MOVV m_g0(R24), R4 ++ MOVV (g_sched+gobuf_sp)(R4), R25 // Set SP to g0 stack ++ ++noswitch: ++ SUBV $16, R25 ++ AND $~15, R25 // Align for C code ++ MOVV R25, R3 ++ ++ MOVW $1, R4 // CLOCK_MONOTONIC=1 ++ MOVV $0(R3), R5 ++ ++ MOVV runtime·vdsoClockgettimeSym(SB), R20 ++ BEQ R20, fallback ++ ++ JAL (R20) ++ ++finish: ++ MOVV 0(R3), R7 // sec ++ MOVV 8(R3), R5 // nsec ++ ++ MOVV R23, R3 // restore SP ++ // Restore vdsoPC, vdsoSP ++ // We don't worry about being signaled between the two stores. ++ // If we are not in a signal handler, we'll restore vdsoSP to 0, ++ // and no one will care about vdsoPC. If we are in a signal handler, ++ // we cannot receive another signal. ++ MOVV 16(R3), R25 ++ MOVV R25, m_vdsoSP(R24) ++ MOVV 8(R3), R25 ++ MOVV R25, m_vdsoPC(R24) ++ ++ // sec is in R7, nsec in R5 ++ // return nsec in R7 ++ MOVV $1000000000, R4 ++ MULVU R4, R7, R7 ++ ADDVU R5, R7 ++ MOVV R7, ret+0(FP) ++ RET ++ ++fallback: ++ MOVV $SYS_clock_gettime, R11 ++ SYSCALL ++ JMP finish ++ ++TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVW how+0(FP), R4 ++ MOVV new+8(FP), R5 ++ MOVV old+16(FP), R6 ++ MOVW size+24(FP), R7 ++ MOVV $SYS_rt_sigprocmask, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVV R0, 0xf1(R0) // crash ++ RET ++ ++TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36 ++ MOVV sig+0(FP), R4 ++ MOVV new+8(FP), R5 ++ MOVV old+16(FP), R6 ++ MOVV size+24(FP), R7 ++ MOVV $SYS_rt_sigaction, R11 ++ SYSCALL ++ MOVW R4, ret+32(FP) ++ RET ++ ++TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 ++ MOVW sig+8(FP), R4 ++ MOVV info+16(FP), R5 ++ MOVV ctx+24(FP), R6 ++ MOVV fn+0(FP), R20 ++ JAL (R20) ++ RET ++ ++TEXT runtime·sigtramp(SB),NOSPLIT,$64 ++ // this might be called in external code context, ++ // where g is not set. ++ MOVB runtime·iscgo(SB), R19 ++ BEQ R19, 2(PC) ++ JAL runtime·load_g(SB) ++ ++ MOVW R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ MOVV $runtime·sigtrampgo(SB), R19 ++ JAL (R19) ++ RET ++ ++TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 ++ JMP runtime·sigtramp(SB) ++ ++TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVW prot+16(FP), R6 ++ MOVW flags+20(FP), R7 ++ MOVW fd+24(FP), R8 ++ MOVW off+28(FP), R9 ++ ++ MOVV $SYS_mmap, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, ok ++ MOVV $0, p+32(FP) ++ SUBVU R4, R0, R4 ++ MOVV R4, err+40(FP) ++ RET ++ok: ++ MOVV R4, p+32(FP) ++ MOVV $0, err+40(FP) ++ RET ++ ++TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVV $SYS_munmap, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVV R0, 0xf3(R0) // crash ++ RET ++ ++TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVW flags+16(FP), R6 ++ MOVV $SYS_madvise, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// int64 futex(int32 *uaddr, int32 op, int32 val, ++// struct timespec *timeout, int32 *uaddr2, int32 val2); ++TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVW op+8(FP), R5 ++ MOVW val+12(FP), R6 ++ MOVV ts+16(FP), R7 ++ MOVV addr2+24(FP), R8 ++ MOVW val3+32(FP), R9 ++ MOVV $SYS_futex, R11 ++ SYSCALL ++ MOVW R4, ret+40(FP) ++ RET ++ ++// int64 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void)); ++TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0 ++ MOVW flags+0(FP), R4 ++ MOVV stk+8(FP), R5 ++ ++ // Copy mp, gp, fn off parent stack for use by child. ++ // Careful: Linux system call clobbers ???. ++ MOVV mp+16(FP), R23 ++ MOVV gp+24(FP), R24 ++ MOVV fn+32(FP), R25 ++ ++ MOVV R23, -8(R5) ++ MOVV R24, -16(R5) ++ MOVV R25, -24(R5) ++ MOVV $1234, R23 ++ MOVV R23, -32(R5) ++ ++ MOVV $SYS_clone, R11 ++ SYSCALL ++ ++ // In parent, return. ++ BEQ R4, 3(PC) ++ MOVW R4, ret+40(FP) ++ RET ++ ++ // In child, on new stack. ++ MOVV -32(R3), R23 ++ MOVV $1234, R19 ++ BEQ R23, R19, 2(PC) ++ MOVV R0, 0(R0) ++ ++ // Initialize m->procid to Linux tid ++ MOVV $SYS_gettid, R11 ++ SYSCALL ++ ++ MOVV -24(R3), R25 // fn ++ MOVV -16(R3), R24 // g ++ MOVV -8(R3), R23 // m ++ ++ BEQ R23, nog ++ BEQ R24, nog ++ ++ MOVV R4, m_procid(R23) ++ ++ // TODO: setup TLS. ++ ++ // In child, set up new stack ++ MOVV R23, g_m(R24) ++ MOVV R24, g ++ //CALL runtime·stackcheck(SB) ++ ++nog: ++ // Call fn ++ JAL (R25) ++ ++ // It shouldn't return. If it does, exit that thread. ++ MOVW $111, R4 ++ MOVV $SYS_exit, R11 ++ SYSCALL ++ JMP -3(PC) // keep exiting ++ ++TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 ++ MOVV new+0(FP), R4 ++ MOVV old+8(FP), R5 ++ MOVV $SYS_sigaltstack, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVV R0, 0xf1(R0) // crash ++ RET ++ ++TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $SYS_sched_yield, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0 ++ MOVV pid+0(FP), R4 ++ MOVV len+8(FP), R5 ++ MOVV buf+16(FP), R6 ++ MOVV $SYS_sched_getaffinity, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// int32 runtime·epollcreate(int32 size); ++TEXT runtime·epollcreate(SB),NOSPLIT|NOFRAME,$0 ++ MOVW size+0(FP), R4 ++ MOVV $SYS_epoll_create1, R11 ++ SYSCALL ++ MOVW R4, ret+8(FP) ++ RET ++ ++// int32 runtime·epollcreate1(int32 flags); ++TEXT runtime·epollcreate1(SB),NOSPLIT|NOFRAME,$0 ++ MOVW flags+0(FP), R4 ++ MOVV $SYS_epoll_create1, R11 ++ SYSCALL ++ MOVW R4, ret+8(FP) ++ RET ++ ++// func epollctl(epfd, op, fd int32, ev *epollEvent) int ++TEXT runtime·epollctl(SB),NOSPLIT|NOFRAME,$0 ++ MOVW epfd+0(FP), R4 ++ MOVW op+4(FP), R5 ++ MOVW fd+8(FP), R6 ++ MOVV ev+16(FP), R7 ++ MOVV $SYS_epoll_ctl, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout); ++TEXT runtime·epollwait(SB),NOSPLIT|NOFRAME,$0 ++ MOVW epfd+0(FP), R4 ++ MOVV ev+8(FP), R5 ++ MOVW nev+16(FP), R6 ++ MOVW timeout+20(FP), R7 ++ MOVV $0, R8 ++ MOVV $SYS_epoll_pwait, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// void runtime·closeonexec(int32 fd); ++TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0 ++ MOVW fd+0(FP), R4 // fd ++ MOVV $2, R5 // F_SETFD ++ MOVV $1, R6 // FD_CLOEXEC ++ MOVV $SYS_fcntl, R11 ++ SYSCALL ++ RET ++ ++// func runtime·setNonblock(int32 fd) ++TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4 ++ MOVW fd+0(FP), R4 // fd ++ MOVV $3, R5 // F_GETFL ++ MOVV $0, R6 ++ MOVV $SYS_fcntl, R11 ++ SYSCALL ++ MOVW $0x800, R6 // O_NONBLOCK ++ OR R4, R6 ++ MOVW fd+0(FP), R4 // fd ++ MOVV $4, R5 // F_SETFL ++ MOVV $SYS_fcntl, R11 ++ SYSCALL ++ RET ++ ++// func sbrk0() uintptr ++TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8 ++ // Implemented as brk(NULL). ++ MOVV $0, R4 ++ MOVV $SYS_brk, R11 ++ SYSCALL ++ MOVV R4, ret+0(FP) ++ RET ++ ++TEXT runtime·access(SB),$0-20 ++ MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go ++ MOVW R0, ret+16(FP) // for vet ++ RET ++ ++TEXT runtime·connect(SB),$0-28 ++ MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go ++ MOVW R0, ret+24(FP) // for vet ++ RET ++ ++TEXT runtime·socket(SB),$0-20 ++ MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go ++ MOVW R0, ret+16(FP) // for vet ++ RET +-- +2.38.0 + diff --git a/loongarch64/0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch b/loongarch64/0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch new file mode 100644 index 0000000..01ce98b --- /dev/null +++ b/loongarch64/0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch @@ -0,0 +1,61 @@ +From c7dda73ee659068c2fcd1768789a7e1c2a4e9705 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:42:46 +0800 +Subject: [PATCH 25/82] runtime: add build tag for common support on + linux/loong64 + +Change-Id: Ide01fb8a39fe3e890f6cbc5d28f4a1d47eb5d79b +--- + src/runtime/hash64.go | 4 ++-- + src/runtime/lfstack_64bit.go | 4 ++-- + src/runtime/mpagealloc_64bit.go | 4 ++-- + 3 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/src/runtime/hash64.go b/src/runtime/hash64.go +index 5f7d00bf7f..62ce67ac7e 100644 +--- a/src/runtime/hash64.go ++++ b/src/runtime/hash64.go +@@ -5,8 +5,8 @@ + // Hashing algorithm inspired by + // wyhash: https://github.com/wangyi-fudan/wyhash + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm +-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm ++//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm ++// +build amd64 arm64 loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm + + package runtime + +diff --git a/src/runtime/lfstack_64bit.go b/src/runtime/lfstack_64bit.go +index 4812dd1156..71bd2fc0d4 100644 +--- a/src/runtime/lfstack_64bit.go ++++ b/src/runtime/lfstack_64bit.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm +-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm ++//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm ++// +build amd64 arm64 loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm + + package runtime + +diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go +index 16577346a7..1710ca97e0 100644 +--- a/src/runtime/mpagealloc_64bit.go ++++ b/src/runtime/mpagealloc_64bit.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build amd64 || (!ios && arm64) || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x +-// +build amd64 !ios,arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x ++//go:build amd64 || (!ios && arm64) || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x ++// +build amd64 !ios,arm64 loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x + + // See mpagealloc_32bit.go for why ios/arm64 is excluded here. + +-- +2.38.0 + diff --git a/loongarch64/0026-runtime-fix-runtime-test-error-for-loong64.patch b/loongarch64/0026-runtime-fix-runtime-test-error-for-loong64.patch new file mode 100644 index 0000000..beccbfa --- /dev/null +++ b/loongarch64/0026-runtime-fix-runtime-test-error-for-loong64.patch @@ -0,0 +1,26 @@ +From 5b6184fe7e55aea4199a630a7f26a7a46fd5907c Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:43:12 +0800 +Subject: [PATCH 26/82] runtime: fix runtime test error for loong64 + +Change-Id: I61bef32b38ab07543a147cf172b169eae21b26cf +--- + src/runtime/gcinfo_test.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go +index 0808b416f0..daa4ec58ba 100644 +--- a/src/runtime/gcinfo_test.go ++++ b/src/runtime/gcinfo_test.go +@@ -164,7 +164,7 @@ func infoBigStruct() []byte { + typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 + typePointer, typeScalar, // i string + } +- case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm": ++ case "arm64", "amd64", "loong64", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm": + return []byte{ + typePointer, // q *int + typeScalar, typeScalar, typeScalar, // w byte; e [17]byte +-- +2.38.0 + diff --git a/loongarch64/0027-runtime-internal-add-atomic-support-for-loong64.patch b/loongarch64/0027-runtime-internal-add-atomic-support-for-loong64.patch new file mode 100644 index 0000000..b0889d2 --- /dev/null +++ b/loongarch64/0027-runtime-internal-add-atomic-support-for-loong64.patch @@ -0,0 +1,414 @@ +From 9d91c70512565f069157aa1c32acea676c63a67b Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:55:35 +0800 +Subject: [PATCH 27/82] runtime/internal: add atomic support for loong64 + +Change-Id: I0333503db044c6f39df2d7f8d9dff213b1361d6c +--- + src/runtime/internal/atomic/atomic_loong64.go | 84 +++++ + src/runtime/internal/atomic/atomic_loong64.s | 302 ++++++++++++++++++ + 2 files changed, 386 insertions(+) + create mode 100644 src/runtime/internal/atomic/atomic_loong64.go + create mode 100644 src/runtime/internal/atomic/atomic_loong64.s + +diff --git a/src/runtime/internal/atomic/atomic_loong64.go b/src/runtime/internal/atomic/atomic_loong64.go +new file mode 100644 +index 0000000000..563122693b +--- /dev/null ++++ b/src/runtime/internal/atomic/atomic_loong64.go +@@ -0,0 +1,84 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package atomic ++ ++import "unsafe" ++ ++//go:noescape ++func Xadd(ptr *uint32, delta int32) uint32 ++ ++//go:noescape ++func Xadd64(ptr *uint64, delta int64) uint64 ++ ++//go:noescape ++func Xadduintptr(ptr *uintptr, delta uintptr) uintptr ++ ++//go:noescape ++func Xchg(ptr *uint32, new uint32) uint32 ++ ++//go:noescape ++func Xchg64(ptr *uint64, new uint64) uint64 ++ ++//go:noescape ++func Xchguintptr(ptr *uintptr, new uintptr) uintptr ++ ++//go:noescape ++func Load(ptr *uint32) uint32 ++ ++//go:noescape ++func Load8(ptr *uint8) uint8 ++ ++//go:noescape ++func Load64(ptr *uint64) uint64 ++ ++// NO go:noescape annotation; *ptr escapes if result escapes (#31525) ++func Loadp(ptr unsafe.Pointer) unsafe.Pointer ++ ++//go:noescape ++func LoadAcq(ptr *uint32) uint32 ++ ++//go:noescape ++func LoadAcquintptr(ptr *uintptr) uintptr ++ ++//go:noescape ++func And8(ptr *uint8, val uint8) ++ ++//go:noescape ++func And(ptr *uint32, val uint32) ++ ++//go:noescape ++func Or8(ptr *uint8, val uint8) ++ ++//go:noescape ++func Or(ptr *uint32, val uint32) ++ ++// NOTE: Do not add atomicxor8 (XOR is not idempotent). ++ ++//go:noescape ++func Cas64(ptr *uint64, old, new uint64) bool ++ ++//go:noescape ++func CasRel(ptr *uint32, old, new uint32) bool ++ ++//go:noescape ++func Store(ptr *uint32, val uint32) ++ ++//go:noescape ++func Store8(ptr *uint8, val uint8) ++ ++//go:noescape ++func Store64(ptr *uint64, val uint64) ++ ++// NO go:noescape annotation; see atomic_pointer.go. ++func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) ++ ++//go:noescape ++func StoreRel(ptr *uint32, val uint32) ++ ++//go:noescape ++func StoreReluintptr(ptr *uintptr, val uintptr) +diff --git a/src/runtime/internal/atomic/atomic_loong64.s b/src/runtime/internal/atomic/atomic_loong64.s +new file mode 100644 +index 0000000000..fef7931968 +--- /dev/null ++++ b/src/runtime/internal/atomic/atomic_loong64.s +@@ -0,0 +1,302 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "textflag.h" ++ ++// DBAR sync load/store operation ++#define DBAR WORD $0x38720000 ++ ++// bool cas(uint32 *ptr, uint32 old, uint32 new) ++// Atomically: ++// if(*ptr == old){ ++// *ptr = new; ++// return 1; ++// } else ++// return 0; ++TEXT ·Cas(SB), NOSPLIT, $0-17 ++ MOVV ptr+0(FP), R4 ++ MOVW old+8(FP), R5 ++ MOVW new+12(FP), R6 ++ DBAR ++cas_again: ++ MOVV R6, R7 ++ LL (R4), R8 ++ BNE R5, R8, cas_fail ++ SC R7, (R4) ++ BEQ R7, cas_again ++ MOVV $1, R4 ++ MOVB R4, ret+16(FP) ++ DBAR ++ RET ++cas_fail: ++ MOVV $0, R4 ++ JMP -4(PC) ++ ++// bool cas64(uint64 *ptr, uint64 old, uint64 new) ++// Atomically: ++// if(*ptr == old){ ++// *ptr = new; ++// return 1; ++// } else { ++// return 0; ++// } ++TEXT ·Cas64(SB), NOSPLIT, $0-25 ++ MOVV ptr+0(FP), R4 ++ MOVV old+8(FP), R5 ++ MOVV new+16(FP), R6 ++ DBAR ++cas64_again: ++ MOVV R6, R7 ++ LLV (R4), R8 ++ BNE R5, R8, cas64_fail ++ SCV R7, (R4) ++ BEQ R7, cas64_again ++ MOVV $1, R4 ++ MOVB R4, ret+24(FP) ++ DBAR ++ RET ++cas64_fail: ++ MOVV $0, R4 ++ JMP -4(PC) ++ ++TEXT ·Casuintptr(SB), NOSPLIT, $0-25 ++ JMP ·Cas64(SB) ++ ++TEXT ·CasRel(SB), NOSPLIT, $0-17 ++ JMP ·Cas(SB) ++ ++TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 ++ JMP ·Load64(SB) ++ ++TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 ++ JMP ·Load64(SB) ++ ++TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 ++ JMP ·Store64(SB) ++ ++TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 ++ JMP ·Xadd64(SB) ++ ++TEXT ·Loadint64(SB), NOSPLIT, $0-16 ++ JMP ·Load64(SB) ++ ++TEXT ·Xaddint64(SB), NOSPLIT, $0-24 ++ JMP ·Xadd64(SB) ++ ++// bool casp(void **val, void *old, void *new) ++// Atomically: ++// if(*val == old){ ++// *val = new; ++// return 1; ++// } else ++// return 0; ++TEXT ·Casp1(SB), NOSPLIT, $0-25 ++ JMP runtime∕internal∕atomic·Cas64(SB) ++ ++// uint32 xadd(uint32 volatile *ptr, int32 delta) ++// Atomically: ++// *val += delta; ++// return *val; ++TEXT ·Xadd(SB), NOSPLIT, $0-20 ++ MOVV ptr+0(FP), R4 ++ MOVW delta+8(FP), R5 ++ DBAR ++ LL (R4), R6 ++ ADDU R6, R5, R7 ++ MOVV R7, R6 ++ SC R7, (R4) ++ BEQ R7, -4(PC) ++ MOVW R6, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xadd64(SB), NOSPLIT, $0-24 ++ MOVV ptr+0(FP), R4 ++ MOVV delta+8(FP), R5 ++ DBAR ++ LLV (R4), R6 ++ ADDVU R6, R5, R7 ++ MOVV R7, R6 ++ SCV R7, (R4) ++ BEQ R7, -4(PC) ++ MOVV R6, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xchg(SB), NOSPLIT, $0-20 ++ MOVV ptr+0(FP), R4 ++ MOVW new+8(FP), R5 ++ ++ DBAR ++ MOVV R5, R6 ++ LL (R4), R7 ++ SC R6, (R4) ++ BEQ R6, -3(PC) ++ MOVW R7, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xchg64(SB), NOSPLIT, $0-24 ++ MOVV ptr+0(FP), R4 ++ MOVV new+8(FP), R5 ++ ++ DBAR ++ MOVV R5, R6 ++ LLV (R4), R7 ++ SCV R6, (R4) ++ BEQ R6, -3(PC) ++ MOVV R7, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 ++ JMP ·Xchg64(SB) ++ ++TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 ++ JMP ·Store64(SB) ++ ++TEXT ·StoreRel(SB), NOSPLIT, $0-12 ++ JMP ·Store(SB) ++ ++TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 ++ JMP ·Store64(SB) ++ ++TEXT ·Store(SB), NOSPLIT, $0-12 ++ MOVV ptr+0(FP), R4 ++ MOVW val+8(FP), R5 ++ DBAR ++ MOVW R5, 0(R4) ++ DBAR ++ RET ++ ++TEXT ·Store8(SB), NOSPLIT, $0-9 ++ MOVV ptr+0(FP), R4 ++ MOVB val+8(FP), R5 ++ DBAR ++ MOVB R5, 0(R4) ++ DBAR ++ RET ++ ++TEXT ·Store64(SB), NOSPLIT, $0-16 ++ MOVV ptr+0(FP), R4 ++ MOVV val+8(FP), R5 ++ DBAR ++ MOVV R5, 0(R4) ++ DBAR ++ RET ++ ++// void Or8(byte volatile*, byte); ++TEXT ·Or8(SB), NOSPLIT, $0-9 ++ MOVV ptr+0(FP), R4 ++ MOVBU val+8(FP), R5 ++ // Align ptr down to 4 bytes so we can use 32-bit load/store. ++ MOVV $~3, R6 ++ AND R4, R6 ++ // R7 = ((ptr & 3) * 8) ++ AND $3, R4, R7 ++ SLLV $3, R7 ++ // Shift val for aligned ptr. R5 = val << R4 ++ SLLV R7, R5 ++ ++ DBAR ++ LL (R6), R7 ++ OR R5, R7 ++ SC R7, (R6) ++ BEQ R7, -4(PC) ++ DBAR ++ RET ++ ++// void And8(byte volatile*, byte); ++TEXT ·And8(SB), NOSPLIT, $0-9 ++ MOVV ptr+0(FP), R4 ++ MOVBU val+8(FP), R5 ++ // Align ptr down to 4 bytes so we can use 32-bit load/store. ++ MOVV $~3, R6 ++ AND R4, R6 ++ // R7 = ((ptr & 3) * 8) ++ AND $3, R4, R7 ++ SLLV $3, R7 ++ // Shift val for aligned ptr. R5 = val << R7 | ^(0xFF << R7) ++ MOVV $0xFF, R8 ++ SLLV R7, R5 ++ SLLV R7, R8 ++ NOR R0, R8 ++ OR R8, R5 ++ ++ DBAR ++ LL (R6), R7 ++ AND R5, R7 ++ SC R7, (R6) ++ BEQ R7, -4(PC) ++ DBAR ++ RET ++ ++// func Or(addr *uint32, v uint32) ++TEXT ·Or(SB), NOSPLIT, $0-12 ++ MOVV ptr+0(FP), R4 ++ MOVW val+8(FP), R5 ++ DBAR ++ LL (R4), R6 ++ OR R5, R6 ++ SC R6, (R4) ++ BEQ R6, -4(PC) ++ DBAR ++ RET ++ ++// func And(addr *uint32, v uint32) ++TEXT ·And(SB), NOSPLIT, $0-12 ++ MOVV ptr+0(FP), R4 ++ MOVW val+8(FP), R5 ++ DBAR ++ LL (R4), R6 ++ AND R5, R6 ++ SC R6, (R4) ++ BEQ R6, -4(PC) ++ DBAR ++ RET ++ ++// uint32 runtime∕internal∕atomic·Load(uint32 volatile* ptr) ++TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVWU 0(R19), R19 ++ DBAR ++ MOVW R19, ret+8(FP) ++ RET ++ ++// uint8 runtime∕internal∕atomic·Load8(uint8 volatile* ptr) ++TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVBU 0(R19), R19 ++ DBAR ++ MOVB R19, ret+8(FP) ++ RET ++ ++// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* ptr) ++TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVV 0(R19), R19 ++ DBAR ++ MOVV R19, ret+8(FP) ++ RET ++ ++// void *runtime∕internal∕atomic·Loadp(void *volatile *ptr) ++TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVV 0(R19), R19 ++ DBAR ++ MOVV R19, ret+8(FP) ++ RET ++ ++// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr) ++TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 ++ JMP atomic·Load(SB) ++ ++// uintptr ·LoadAcquintptr(uintptr volatile* ptr) ++TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 ++ JMP atomic·Load64(SB) ++ +-- +2.38.0 + diff --git a/loongarch64/0028-cmd-cgo-configure-cgo-tool-for-loong64.patch b/loongarch64/0028-cmd-cgo-configure-cgo-tool-for-loong64.patch new file mode 100644 index 0000000..ca5d9c8 --- /dev/null +++ b/loongarch64/0028-cmd-cgo-configure-cgo-tool-for-loong64.patch @@ -0,0 +1,47 @@ +From 009328b65d649f1bbabf9bd6384aae8e1cf2717d Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:32:16 +0800 +Subject: [PATCH 28/82] cmd/cgo: configure cgo tool for loong64 + +Change-Id: I9699fd9af0112e72193ac24b736b85c580887a0f +--- + src/cmd/cgo/gcc.go | 2 ++ + src/cmd/cgo/main.go | 2 ++ + 2 files changed, 4 insertions(+) + +diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go +index a73e998877..b9250d55b7 100644 +--- a/src/cmd/cgo/gcc.go ++++ b/src/cmd/cgo/gcc.go +@@ -1593,6 +1593,8 @@ func (p *Package) gccMachine() []string { + } else if gomips == "softfloat" { + return []string{"-mabi=32", "-msoft-float"} + } ++ case "loong64": ++ return []string{"-mabi=lp64d"} + } + return nil + } +diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go +index c6a0c525e6..46bd91b051 100644 +--- a/src/cmd/cgo/main.go ++++ b/src/cmd/cgo/main.go +@@ -176,6 +176,7 @@ var ptrSizeMap = map[string]int64{ + "amd64": 8, + "arm": 4, + "arm64": 8, ++ "loong64": 8, + "m68k": 4, + "mips": 4, + "mipsle": 4, +@@ -201,6 +202,7 @@ var intSizeMap = map[string]int64{ + "amd64": 8, + "arm": 4, + "arm64": 8, ++ "loong64": 8, + "m68k": 4, + "mips": 4, + "mipsle": 4, +-- +2.38.0 + diff --git a/loongarch64/0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch b/loongarch64/0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch new file mode 100644 index 0000000..cb3401c --- /dev/null +++ b/loongarch64/0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch @@ -0,0 +1,250 @@ +From ffd6f8cba89848528c8b52499b3992530675bac2 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:53:45 +0800 +Subject: [PATCH 29/82] runtime/cgo: add cgo function call support for loong64 + +Change-Id: I8ef0e7f17d6ada3d2f07c81524136b78457e7795 +--- + src/runtime/cgo/asm_loong64.s | 69 +++++++++++++++++++++++++ + src/runtime/cgo/gcc_linux_loong64.c | 78 +++++++++++++++++++++++++++++ + src/runtime/cgo/gcc_loong64.S | 67 +++++++++++++++++++++++++ + 3 files changed, 214 insertions(+) + create mode 100644 src/runtime/cgo/asm_loong64.s + create mode 100644 src/runtime/cgo/gcc_linux_loong64.c + create mode 100644 src/runtime/cgo/gcc_loong64.S + +diff --git a/src/runtime/cgo/asm_loong64.s b/src/runtime/cgo/asm_loong64.s +new file mode 100644 +index 0000000000..aa5a4ca200 +--- /dev/null ++++ b/src/runtime/cgo/asm_loong64.s +@@ -0,0 +1,69 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++// Called by C code generated by cmd/cgo. ++// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) ++// Saves C callee-saved registers and calls cgocallback with three arguments. ++// fn is the PC of a func(a unsafe.Pointer) function. ++TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0 ++ /* ++ * We still need to save all callee save register as before, and then ++ * push 3 args for fn (R4, R5, R7), skipping R6. ++ * Also note that at procedure entry in gc world, 8(R29) will be the ++ * first arg. ++ */ ++ ++ ADDV $(-8*22), R3 ++ MOVV R4, (8*1)(R3) // fn unsafe.Pointer ++ MOVV R5, (8*2)(R3) // a unsafe.Pointer ++ MOVV R7, (8*3)(R3) // ctxt uintptr ++ MOVV R23, (8*4)(R3) ++ MOVV R24, (8*5)(R3) ++ MOVV R25, (8*6)(R3) ++ MOVV R26, (8*7)(R3) ++ MOVV R27, (8*8)(R3) ++ MOVV R28, (8*9)(R3) ++ MOVV R29, (8*10)(R3) ++ MOVV R30, (8*11)(R3) ++ MOVV g, (8*12)(R3) ++ MOVV R1, (8*13)(R3) ++ MOVD F24, (8*14)(R3) ++ MOVD F25, (8*15)(R3) ++ MOVD F26, (8*16)(R3) ++ MOVD F27, (8*17)(R3) ++ MOVD F28, (8*18)(R3) ++ MOVD F29, (8*19)(R3) ++ MOVD F30, (8*20)(R3) ++ MOVD F31, (8*21)(R3) ++ ++ // Initialize Go ABI environment ++ JAL runtime·load_g(SB) ++ ++ JAL runtime·cgocallback(SB) ++ ++ MOVV (8*4)(R3), R23 ++ MOVV (8*5)(R3), R24 ++ MOVV (8*6)(R3), R25 ++ MOVV (8*7)(R3), R26 ++ MOVV (8*8)(R3), R27 ++ MOVV (8*9)(R3), R28 ++ MOVV (8*10)(R3), R29 ++ MOVV (8*11)(R3), R30 ++ MOVV (8*12)(R3), g ++ MOVV (8*13)(R3), R1 ++ MOVD (8*14)(R3), F24 ++ MOVD (8*15)(R3), F25 ++ MOVD (8*16)(R3), F26 ++ MOVD (8*17)(R3), F27 ++ MOVD (8*18)(R3), F28 ++ MOVD (8*19)(R3), F29 ++ MOVD (8*20)(R3), F30 ++ MOVD (8*21)(R3), F31 ++ ADDV $(8*22), R3 ++ ++ RET +diff --git a/src/runtime/cgo/gcc_linux_loong64.c b/src/runtime/cgo/gcc_linux_loong64.c +new file mode 100644 +index 0000000000..c8c5fda889 +--- /dev/null ++++ b/src/runtime/cgo/gcc_linux_loong64.c +@@ -0,0 +1,78 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build cgo ++// +build linux ++// +build loong64 ++ ++#include ++#include ++#include ++#include "libcgo.h" ++#include "libcgo_unix.h" ++ ++static void *threadentry(void*); ++ ++void (*x_cgo_inittls)(void **tlsg, void **tlsbase); ++static void (*setg_gcc)(void*); ++ ++void ++_cgo_sys_thread_start(ThreadStart *ts) ++{ ++ pthread_attr_t attr; ++ sigset_t ign, oset; ++ pthread_t p; ++ size_t size; ++ int err; ++ ++ sigfillset(&ign); ++ pthread_sigmask(SIG_SETMASK, &ign, &oset); ++ ++ // Not sure why the memset is necessary here, ++ // but without it, we get a bogus stack size ++ // out of pthread_attr_getstacksize. C'est la Linux. ++ memset(&attr, 0, sizeof attr); ++ pthread_attr_init(&attr); ++ size = 0; ++ pthread_attr_getstacksize(&attr, &size); ++ // Leave stacklo=0 and set stackhi=size; mstart will do the rest. ++ ts->g->stackhi = size; ++ err = _cgo_try_pthread_create(&p, &attr, threadentry, ts); ++ ++ pthread_sigmask(SIG_SETMASK, &oset, nil); ++ ++ if (err != 0) { ++ fatalf("pthread_create failed: %s", strerror(err)); ++ } ++} ++ ++extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g); ++static void* ++threadentry(void *v) ++{ ++ ThreadStart ts; ++ ++ ts = *(ThreadStart*)v; ++ free(v); ++ ++ crosscall1(ts.fn, setg_gcc, (void*)ts.g); ++ return nil; ++} ++ ++void ++x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase) ++{ ++ pthread_attr_t attr; ++ size_t size; ++ ++ setg_gcc = setg; ++ pthread_attr_init(&attr); ++ pthread_attr_getstacksize(&attr, &size); ++ g->stacklo = (uintptr)&attr - size + 4096; ++ pthread_attr_destroy(&attr); ++ ++ if (x_cgo_inittls) { ++ x_cgo_inittls(tlsg, tlsbase); ++ } ++} +diff --git a/src/runtime/cgo/gcc_loong64.S b/src/runtime/cgo/gcc_loong64.S +new file mode 100644 +index 0000000000..e294164e9d +--- /dev/null ++++ b/src/runtime/cgo/gcc_loong64.S +@@ -0,0 +1,67 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++/* ++ * void crosscall1(void (*fn)(void), void (*setg_gcc)(void *g), void *g) ++ * ++ * Calling into the gc tool chain, where all registers are caller save. ++ * Called from standard lp64d ABI, where $r1, $r3, $r23-$r30, and $f24-$f31 ++ * are callee-save, so they must be saved explicitly, along with $r1 (LR). ++ */ ++.globl crosscall1 ++crosscall1: ++ addi.d $r3, $r3, -160 ++ st.d $r1, $r3, 0 ++ st.d $r23, $r3, 8 ++ st.d $r24, $r3, 16 ++ st.d $r25, $r3, 24 ++ st.d $r26, $r3, 32 ++ st.d $r27, $r3, 40 ++ st.d $r28, $r3, 48 ++ st.d $r29, $r3, 56 ++ st.d $r30, $r3, 64 ++ st.d $r2, $r3, 72 ++ st.d $r22, $r3, 80 ++ fst.d $f24, $r3, 88 ++ fst.d $f25, $r3, 96 ++ fst.d $f26, $r3, 104 ++ fst.d $f27, $r3, 112 ++ fst.d $f28, $r3, 120 ++ fst.d $f29, $r3, 128 ++ fst.d $f30, $r3, 136 ++ fst.d $f31, $r3, 144 ++ ++ move $r18, $r4 // save R4 ++ move $r19, $r6 ++ jirl $r1, $r5, 0 // call setg_gcc (clobbers R4) ++ jirl $r1, $r18, 0 // call fn ++ ++ ld.d $r23, $r3, 8 ++ ld.d $r24, $r3, 16 ++ ld.d $r25, $r3, 24 ++ ld.d $r26, $r3, 32 ++ ld.d $r27, $r3, 40 ++ ld.d $r28, $r3, 48 ++ ld.d $r29, $r3, 56 ++ ld.d $r30, $r3, 64 ++ ld.d $r2, $r3, 72 ++ ld.d $r22, $r3, 80 ++ fld.d $f24, $r3, 88 ++ fld.d $f25, $r3, 96 ++ fld.d $f26, $r3, 104 ++ fld.d $f27, $r3, 112 ++ fld.d $f28, $r3, 120 ++ fld.d $f29, $r3, 128 ++ fld.d $f30, $r3, 136 ++ fld.d $f31, $r3, 144 ++ ld.d $r1, $r3, 0 ++ addi.d $r3, $r3, 160 ++ jirl $r0, $r1, 0 ++ ++ ++#ifdef __ELF__ ++.section .note.GNU-stack,"",%progbits ++#endif +-- +2.38.0 + diff --git a/loongarch64/0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch b/loongarch64/0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch new file mode 100644 index 0000000..1f54729 --- /dev/null +++ b/loongarch64/0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch @@ -0,0 +1,55 @@ +From f2f4abf4c76b87753d81c03bdd2a6f910916021d Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:06:29 +0800 +Subject: [PATCH 30/82] cmd/nm, cmd/objdump, cmd/pprof: disassembly is not + supported on loong64 + +Change-Id: Ic96e4f0c46d9a6b8cd020e899f32c40681daf9c5 +--- + src/cmd/nm/nm_cgo_test.go | 2 +- + src/cmd/objdump/objdump_test.go | 2 ++ + src/cmd/pprof/pprof_test.go | 2 ++ + 3 files changed, 5 insertions(+), 1 deletion(-) + +diff --git a/src/cmd/nm/nm_cgo_test.go b/src/cmd/nm/nm_cgo_test.go +index 1544be041a..24f4321802 100644 +--- a/src/cmd/nm/nm_cgo_test.go ++++ b/src/cmd/nm/nm_cgo_test.go +@@ -25,7 +25,7 @@ func canInternalLink() bool { + } + case "linux": + switch runtime.GOARCH { +- case "arm64", "mips64", "mips64le", "mips", "mipsle", "ppc64", "ppc64le", "riscv64": ++ case "arm64", "loong64", "mips64", "mips64le", "mips", "mipsle", "ppc64", "ppc64le", "riscv64": + return false + } + case "openbsd": +diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go +index f231a7c6e0..40fe21bb45 100644 +--- a/src/cmd/objdump/objdump_test.go ++++ b/src/cmd/objdump/objdump_test.go +@@ -107,6 +107,8 @@ var ppcGnuNeed = []string{ + + func mustHaveDisasm(t *testing.T) { + switch runtime.GOARCH { ++ case "loong64": ++ t.Skipf("skipping on %s", runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) + case "riscv64": +diff --git a/src/cmd/pprof/pprof_test.go b/src/cmd/pprof/pprof_test.go +index 11e251bfde..9a37b97286 100644 +--- a/src/cmd/pprof/pprof_test.go ++++ b/src/cmd/pprof/pprof_test.go +@@ -72,6 +72,8 @@ func mustHaveCPUProfiling(t *testing.T) { + + func mustHaveDisasm(t *testing.T) { + switch runtime.GOARCH { ++ case "loong64": ++ t.Skipf("skipping on %s.", runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) + case "riscv64": +-- +2.38.0 + diff --git a/loongarch64/0031-cmd-dist-support-dist-tool-for-loong64.patch b/loongarch64/0031-cmd-dist-support-dist-tool-for-loong64.patch new file mode 100644 index 0000000..0d9a42a --- /dev/null +++ b/loongarch64/0031-cmd-dist-support-dist-tool-for-loong64.patch @@ -0,0 +1,88 @@ +From 84657c124d7a932d089068bf7917efe4d7230f97 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:53:05 +0800 +Subject: [PATCH 31/82] cmd/dist: support dist tool for loong64 + +Change-Id: I61dca43680d8e5bd3198a38577450a53f405a987 +--- + src/cmd/dist/build.go | 2 ++ + src/cmd/dist/main.go | 4 +++- + src/cmd/dist/test.go | 6 +++--- + 3 files changed, 8 insertions(+), 4 deletions(-) + +diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go +index bec17696f3..c18ca7517a 100644 +--- a/src/cmd/dist/build.go ++++ b/src/cmd/dist/build.go +@@ -65,6 +65,7 @@ var okgoarch = []string{ + "amd64", + "arm", + "arm64", ++ "loong64", + "mips", + "mipsle", + "mips64", +@@ -1571,6 +1572,7 @@ var cgoEnabled = map[string]bool{ + "linux/amd64": true, + "linux/arm": true, + "linux/arm64": true, ++ "linux/loong64": true, + "linux/ppc64": false, + "linux/ppc64le": true, + "linux/mips": true, +diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go +index 37de1acc31..f8ef99f5d0 100644 +--- a/src/cmd/dist/main.go ++++ b/src/cmd/dist/main.go +@@ -125,6 +125,8 @@ func main() { + if elfIsLittleEndian(os.Args[0]) { + gohostarch = "mipsle" + } ++ case strings.Contains(out, "loongarch64"): ++ gohostarch = "loong64" + case strings.Contains(out, "riscv64"): + gohostarch = "riscv64" + case strings.Contains(out, "s390x"): +@@ -142,7 +144,7 @@ func main() { + } + } + +- if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" { ++ if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" || gohostarch == "loong64" { + maxbg = min(maxbg, runtime.NumCPU()) + } + bginit() +diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go +index f40fa926df..485fe6b17b 100644 +--- a/src/cmd/dist/test.go ++++ b/src/cmd/dist/test.go +@@ -166,7 +166,7 @@ func (t *tester) run() { + switch goarch { + case "arm": + t.timeoutScale = 2 +- case "mips", "mipsle", "mips64", "mips64le": ++ case "loong64", "mips", "mipsle", "mips64", "mips64le": + t.timeoutScale = 4 + } + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { +@@ -983,7 +983,7 @@ func (t *tester) extLink() bool { + "darwin-amd64", "darwin-arm64", + "dragonfly-amd64", + "freebsd-386", "freebsd-amd64", "freebsd-arm", +- "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-mips64", "linux-mips64le", "linux-mips", "linux-mipsle", "linux-riscv64", "linux-s390x", ++ "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-loong64", "linux-ppc64le", "linux-mips64", "linux-mips64le", "linux-mips", "linux-mipsle", "linux-riscv64", "linux-s390x", + "netbsd-386", "netbsd-amd64", + "openbsd-386", "openbsd-amd64", + "windows-386", "windows-amd64": +@@ -1014,7 +1014,7 @@ func (t *tester) internalLink() bool { + // Internally linking cgo is incomplete on some architectures. + // https://golang.org/issue/10373 + // https://golang.org/issue/14449 +- if goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" || goarch == "riscv64" { ++ if goarch == "loong64" || goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" || goarch == "riscv64" { + return false + } + if goos == "aix" { +-- +2.38.0 + diff --git a/loongarch64/0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch b/loongarch64/0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch new file mode 100644 index 0000000..4e3765f --- /dev/null +++ b/loongarch64/0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch @@ -0,0 +1,2740 @@ +From 847485b6ab3d07d450b64c5d6c4ee9f12a1cf06e Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:00:41 +0800 +Subject: [PATCH 32/82] cmd/vendor: update vendored golang.org/x/sys to support + syscall on loong64 + +Change-Id: Id247072a416c9e3da9de801a3daacf1b60ff3f24 +--- + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 ++ + .../golang.org/x/sys/unix/endian_little.go | 4 +- + .../golang.org/x/sys/unix/syscall_linux.go | 2 +- + .../x/sys/unix/syscall_linux_loong64.go | 221 +++++ + .../x/sys/unix/zerrors_linux_loong64.go | 831 ++++++++++++++++++ + .../x/sys/unix/zsyscall_linux_loong64.go | 563 ++++++++++++ + .../x/sys/unix/zsysnum_linux_loong64.go | 313 +++++++ + .../x/sys/unix/ztypes_linux_loong64.go | 667 ++++++++++++++ + 8 files changed, 2652 insertions(+), 3 deletions(-) + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go + +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +new file mode 100644 +index 0000000000..1ccfa5ded5 +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +@@ -0,0 +1,54 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 && gc ++// +build linux ++// +build loong64 ++// +build gc ++ ++#include "textflag.h" ++ ++ ++// Just jump to package syscall's implementation for all these functions. ++// The runtime may know about them. ++ ++TEXT ·Syscall(SB),NOSPLIT,$0-56 ++ JMP syscall·Syscall(SB) ++ ++TEXT ·Syscall6(SB),NOSPLIT,$0-80 ++ JMP syscall·Syscall6(SB) ++ ++TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 ++ JAL runtime·entersyscall(SB) ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVV R4, r1+32(FP) ++ MOVV R5, r2+40(FP) ++ JAL runtime·exitsyscall(SB) ++ RET ++ ++TEXT ·RawSyscall(SB),NOSPLIT,$0-56 ++ JMP syscall·RawSyscall(SB) ++ ++TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 ++ JMP syscall·RawSyscall6(SB) ++ ++TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVV R4, r1+32(FP) ++ MOVV R5, r2+40(FP) ++ RET +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go b/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go +index 4362f47e2c..b0f2bc4ae3 100644 +--- a/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go ++++ b/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // +-//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +-// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh ++//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh ++// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh + + package unix + +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +index 2dd7c8e34a..d40e0117ae 100644 +--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go ++++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +@@ -1732,7 +1732,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e + + func Dup2(oldfd, newfd int) error { + // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. +- if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { ++ if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" || runtime.GOARCH == "loong64" { + return Dup3(oldfd, newfd, 0) + } + return dup2(oldfd, newfd) +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +new file mode 100644 +index 0000000000..0714ce6141 +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +@@ -0,0 +1,221 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++package unix ++ ++import "unsafe" ++ ++//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT ++//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 ++//sys Fchown(fd int, uid int, gid int) (err error) ++//sys Fstat(fd int, stat *Stat_t) (err error) ++//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) ++//sys Fstatfs(fd int, buf *Statfs_t) (err error) ++//sys Ftruncate(fd int, length int64) (err error) ++//sysnb Getegid() (egid int) ++//sysnb Geteuid() (euid int) ++//sysnb Getgid() (gid int) ++//sysnb Getuid() (uid int) ++//sys Listen(s int, n int) (err error) ++//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 ++//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 ++//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK ++ ++func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { ++ var ts *Timespec ++ if timeout != nil { ++ ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ++ } ++ return Pselect(nfd, r, w, e, ts, nil) ++} ++ ++//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) ++//sys setfsgid(gid int) (prev int, err error) ++//sys setfsuid(uid int) (prev int, err error) ++//sysnb Setregid(rgid int, egid int) (err error) ++//sysnb Setresgid(rgid int, egid int, sgid int) (err error) ++//sysnb Setresuid(ruid int, euid int, suid int) (err error) ++//sysnb Setreuid(ruid int, euid int) (err error) ++//sys Shutdown(fd int, how int) (err error) ++//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) ++ ++func Stat(path string, stat *Stat_t) (err error) { ++ return Fstatat(AT_FDCWD, path, stat, 0) ++} ++ ++func Lchown(path string, uid int, gid int) (err error) { ++ return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) ++} ++ ++func Lstat(path string, stat *Stat_t) (err error) { ++ return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) ++} ++ ++//sys Statfs(path string, buf *Statfs_t) (err error) ++//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) ++//sys Truncate(path string, length int64) (err error) ++ ++func Ustat(dev int, ubuf *Ustat_t) (err error) { ++ return ENOSYS ++} ++ ++//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) ++//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) ++//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) ++//sysnb setgroups(n int, list *_Gid_t) (err error) ++//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) ++//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) ++//sysnb socket(domain int, typ int, proto int) (fd int, err error) ++//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) ++//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) ++//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) ++//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) ++ ++//sysnb Gettimeofday(tv *Timeval) (err error) ++ ++func setTimespec(sec, nsec int64) Timespec { ++ return Timespec{Sec: sec, Nsec: nsec} ++} ++ ++func setTimeval(sec, usec int64) Timeval { ++ return Timeval{Sec: sec, Usec: usec} ++} ++ ++func Pipe(p []int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ var pp [2]_C_int ++ err = pipe2(&pp, 0) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return err ++} ++ ++//sysnb pipe2(p *[2]_C_int, flags int) (err error) ++ ++func Pipe2(p []int, flags int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ var pp [2]_C_int ++ err = pipe2(&pp, flags) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return err ++} ++ ++func Getrlimit(resource int, rlim *Rlimit) (err error) { ++ err = prlimit(0, resource, nil, rlim) ++ return ++} ++ ++func Setrlimit(resource int, rlim *Rlimit) (err error) { ++ err = prlimit(0, resource, rlim, nil) ++ return ++} ++ ++func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(dirfd, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++func Time(t *Time_t) (Time_t, error) { ++ var tv Timeval ++ err := Gettimeofday(&tv) ++ if err != nil { ++ return 0, err ++ } ++ if t != nil { ++ *t = Time_t(tv.Sec) ++ } ++ return Time_t(tv.Sec), nil ++} ++ ++func Utime(path string, buf *Utimbuf) error { ++ tv := []Timeval{ ++ {Sec: buf.Actime}, ++ {Sec: buf.Modtime}, ++ } ++ return Utimes(path, tv) ++} ++ ++func utimes(path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(AT_FDCWD, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++func (r *PtraceRegs) GetEra() uint64 { return r.Era } ++ ++func (r *PtraceRegs) SetEra(era uint64) { r.Era = era } ++ ++func (iov *Iovec) SetLen(length int) { ++ iov.Len = uint64(length) ++} ++ ++func (msghdr *Msghdr) SetControllen(length int) { ++ msghdr.Controllen = uint64(length) ++} ++ ++func (msghdr *Msghdr) SetIovlen(length int) { ++ msghdr.Iovlen = uint64(length) ++} ++ ++func (cmsg *Cmsghdr) SetLen(length int) { ++ cmsg.Len = uint64(length) ++} ++ ++func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { ++ rsa.Service_name_len = uint64(length) ++} ++ ++func Pause() error { ++ _, err := ppoll(nil, 0, nil, nil) ++ return err ++} ++ ++func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { ++ return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) ++} ++ ++//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) ++ ++func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { ++ cmdlineLen := len(cmdline) ++ if cmdlineLen > 0 { ++ // Account for the additional NULL byte added by ++ // BytePtrFromString in kexecFileLoad. The kexec_file_load ++ // syscall expects a NULL-terminated string. ++ cmdlineLen++ ++ } ++ return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) ++} ++ ++// dup2 exists because func Dup3 in syscall_linux.go references ++// it in an unreachable path. dup2 isn't available on arm64. ++func dup2(oldfd int, newfd int) error ++ +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +new file mode 100644 +index 0000000000..0b9303175a +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +@@ -0,0 +1,831 @@ ++// mkerrors.sh -Wall -Werror -static -I/tmp/include ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++// Code generated by cmd/cgo -godefs; DO NOT EDIT. ++// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go ++ ++package unix ++ ++import "syscall" ++ ++const ( ++ B1000000 = 0x1008 ++ B115200 = 0x1002 ++ B1152000 = 0x1009 ++ B1500000 = 0x100a ++ B2000000 = 0x100b ++ B230400 = 0x1003 ++ B2500000 = 0x100c ++ B3000000 = 0x100d ++ B3500000 = 0x100e ++ B4000000 = 0x100f ++ B460800 = 0x1004 ++ B500000 = 0x1005 ++ B57600 = 0x1001 ++ B576000 = 0x1006 ++ B921600 = 0x1007 ++ BLKBSZGET = 0x80081270 ++ BLKBSZSET = 0x40081271 ++ BLKFLSBUF = 0x1261 ++ BLKFRAGET = 0x1265 ++ BLKFRASET = 0x1264 ++ BLKGETSIZE = 0x1260 ++ BLKGETSIZE64 = 0x80081272 ++ BLKPBSZGET = 0x127b ++ BLKRAGET = 0x1263 ++ BLKRASET = 0x1262 ++ BLKROGET = 0x125e ++ BLKROSET = 0x125d ++ BLKRRPART = 0x125f ++ BLKSECTGET = 0x1267 ++ BLKSECTSET = 0x1266 ++ BLKSSZGET = 0x1268 ++ BOTHER = 0x1000 ++ BS1 = 0x2000 ++ BSDLY = 0x2000 ++ CBAUD = 0x100f ++ CBAUDEX = 0x1000 ++ CEPH_SUPER_MAGIC = 0xc36400 ++ CIBAUD = 0x100f0000 ++ CIFS_SUPER_MAGIC = 0xff534d42 ++ CLOCAL = 0x800 ++ CR1 = 0x200 ++ CR2 = 0x400 ++ CR3 = 0x600 ++ CRDLY = 0x600 ++ CREAD = 0x80 ++ CS6 = 0x10 ++ CS7 = 0x20 ++ CS8 = 0x30 ++ CSIZE = 0x30 ++ CSTOPB = 0x40 ++ ECCGETLAYOUT = 0x81484d11 ++ ECCGETSTATS = 0x80104d12 ++ ECHOCTL = 0x200 ++ ECHOE = 0x10 ++ ECHOK = 0x20 ++ ECHOKE = 0x800 ++ ECHONL = 0x40 ++ ECHOPRT = 0x400 ++ EFD_CLOEXEC = 0x80000 ++ EFD_NONBLOCK = 0x800 ++ EPOLL_CLOEXEC = 0x80000 ++ EXFAT_SUPER_MAGIC = 0x2011bab0 ++ EXTPROC = 0x10000 ++ FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc ++ FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa ++ FAN_RENAME = 0x10000000 ++ FAN_REPORT_DFID_NAME_TARGET = 0x1e00 ++ FAN_REPORT_TARGET_FID = 0x1000 ++ FF1 = 0x8000 ++ FFDLY = 0x8000 ++ FICLONE = 0x40049409 ++ FICLONERANGE = 0x4020940d ++ FLUSHO = 0x1000 ++ FS_IOC_ENABLE_VERITY = 0x40806685 ++ FS_IOC_GETFLAGS = 0x80086601 ++ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b ++ FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 ++ FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 ++ FS_IOC_SETFLAGS = 0x40086602 ++ FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 ++ F_GETLK = 0x5 ++ F_GETLK64 = 0x5 ++ F_GETOWN = 0x9 ++ F_RDLCK = 0x0 ++ F_SETLK = 0x6 ++ F_SETLK64 = 0x6 ++ F_SETLKW = 0x7 ++ F_SETLKW64 = 0x7 ++ F_SETOWN = 0x8 ++ F_UNLCK = 0x2 ++ F_WRLCK = 0x1 ++ HIDIOCGRAWINFO = 0x80084803 ++ HIDIOCGRDESC = 0x90044802 ++ HIDIOCGRDESCSIZE = 0x80044801 ++ HUPCL = 0x400 ++ ICANON = 0x2 ++ IEXTEN = 0x8000 ++ IN_CLOEXEC = 0x80000 ++ IN_NONBLOCK = 0x800 ++ IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 ++ ISIG = 0x1 ++ IUCLC = 0x200 ++ IXOFF = 0x1000 ++ IXON = 0x400 ++ KEXEC_ARCH_LOONGARCH = 0x1020000 ++ MAP_ANON = 0x20 ++ MAP_ANONYMOUS = 0x20 ++ MAP_DENYWRITE = 0x800 ++ MAP_EXECUTABLE = 0x1000 ++ MAP_GROWSDOWN = 0x100 ++ MAP_HUGETLB = 0x40000 ++ MAP_LOCKED = 0x2000 ++ MAP_NONBLOCK = 0x10000 ++ MAP_NORESERVE = 0x4000 ++ MAP_POPULATE = 0x8000 ++ MAP_STACK = 0x20000 ++ MAP_SYNC = 0x80000 ++ MCL_CURRENT = 0x1 ++ MCL_FUTURE = 0x2 ++ MCL_ONFAULT = 0x4 ++ MEMERASE = 0x40084d02 ++ MEMERASE64 = 0x40104d14 ++ MEMGETBADBLOCK = 0x40084d0b ++ MEMGETINFO = 0x80204d01 ++ MEMGETOOBSEL = 0x80c84d0a ++ MEMGETREGIONCOUNT = 0x80044d07 ++ MEMISLOCKED = 0x80084d17 ++ MEMLOCK = 0x40084d05 ++ MEMREADOOB = 0xc0104d04 ++ MEMSETBADBLOCK = 0x40084d0c ++ MEMUNLOCK = 0x40084d06 ++ MEMWRITEOOB = 0xc0104d03 ++ MODULE_INIT_COMPRESSED_FILE = 0x4 ++ MTDFILEMODE = 0x4d13 ++ NFDBITS = 0x40 ++ NLDLY = 0x100 ++ NOFLSH = 0x80 ++ NS_GET_NSTYPE = 0xb703 ++ NS_GET_OWNER_UID = 0xb704 ++ NS_GET_PARENT = 0xb702 ++ NS_GET_USERNS = 0xb701 ++ OLCUC = 0x2 ++ ONLCR = 0x4 ++ OTPERASE = 0x400c4d19 ++ OTPGETREGIONCOUNT = 0x40044d0e ++ OTPGETREGIONINFO = 0x400c4d0f ++ OTPLOCK = 0x800c4d10 ++ OTPSELECT = 0x80044d0d ++ O_APPEND = 0x400 ++ O_ASYNC = 0x2000 ++ O_CLOEXEC = 0x80000 ++ O_CREAT = 0x40 ++ O_DIRECT = 0x4000 ++ O_DIRECTORY = 0x10000 ++ O_DSYNC = 0x1000 ++ O_EXCL = 0x80 ++ O_FSYNC = 0x101000 ++ O_LARGEFILE = 0x0 ++ O_NDELAY = 0x800 ++ O_NOATIME = 0x40000 ++ O_NOCTTY = 0x100 ++ O_NOFOLLOW = 0x20000 ++ O_NONBLOCK = 0x800 ++ O_PATH = 0x200000 ++ O_RSYNC = 0x101000 ++ O_SYNC = 0x101000 ++ O_TMPFILE = 0x410000 ++ O_TRUNC = 0x200 ++ PARENB = 0x100 ++ PARODD = 0x200 ++ PENDIN = 0x4000 ++ PERF_EVENT_IOC_DISABLE = 0x2401 ++ PERF_EVENT_IOC_ENABLE = 0x2400 ++ PERF_EVENT_IOC_ID = 0x80082407 ++ PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b ++ PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 ++ PERF_EVENT_IOC_PERIOD = 0x40082404 ++ PERF_EVENT_IOC_QUERY_BPF = 0xc008240a ++ PERF_EVENT_IOC_REFRESH = 0x2402 ++ PERF_EVENT_IOC_RESET = 0x2403 ++ PERF_EVENT_IOC_SET_BPF = 0x40042408 ++ PERF_EVENT_IOC_SET_FILTER = 0x40082406 ++ PERF_EVENT_IOC_SET_OUTPUT = 0x2405 ++ PERF_MEM_HOPS_1 = 0x2 ++ PERF_MEM_HOPS_2 = 0x3 ++ PERF_MEM_HOPS_3 = 0x4 ++ PPPIOCATTACH = 0x4004743d ++ PPPIOCATTCHAN = 0x40047438 ++ PPPIOCBRIDGECHAN = 0x40047435 ++ PPPIOCCONNECT = 0x4004743a ++ PPPIOCDETACH = 0x4004743c ++ PPPIOCDISCONN = 0x7439 ++ PPPIOCGASYNCMAP = 0x80047458 ++ PPPIOCGCHAN = 0x80047437 ++ PPPIOCGDEBUG = 0x80047441 ++ PPPIOCGFLAGS = 0x8004745a ++ PPPIOCGIDLE = 0x8010743f ++ PPPIOCGIDLE32 = 0x8008743f ++ PPPIOCGIDLE64 = 0x8010743f ++ PPPIOCGL2TPSTATS = 0x80487436 ++ PPPIOCGMRU = 0x80047453 ++ PPPIOCGRASYNCMAP = 0x80047455 ++ PPPIOCGUNIT = 0x80047456 ++ PPPIOCGXASYNCMAP = 0x80207450 ++ PPPIOCSACTIVE = 0x40107446 ++ PPPIOCSASYNCMAP = 0x40047457 ++ PPPIOCSCOMPRESS = 0x4010744d ++ PPPIOCSDEBUG = 0x40047440 ++ PPPIOCSFLAGS = 0x40047459 ++ PPPIOCSMAXCID = 0x40047451 ++ PPPIOCSMRRU = 0x4004743b ++ PPPIOCSMRU = 0x40047452 ++ PPPIOCSNPMODE = 0x4008744b ++ PPPIOCSPASS = 0x40107447 ++ PPPIOCSRASYNCMAP = 0x40047454 ++ PPPIOCSXASYNCMAP = 0x4020744f ++ PPPIOCUNBRIDGECHAN = 0x7434 ++ PPPIOCXFERUNIT = 0x744e ++ PR_SET_PTRACER_ANY = 0xffffffffffffffff ++ PR_SET_VMA = 0x53564d41 ++ PR_SET_VMA_ANON_NAME = 0x0 ++ PTRACE_SYSEMU = 0x1f ++ PTRACE_SYSEMU_SINGLESTEP = 0x20 ++ RLIMIT_AS = 0x9 ++ RLIMIT_MEMLOCK = 0x8 ++ RLIMIT_NOFILE = 0x7 ++ RLIMIT_NPROC = 0x6 ++ RLIMIT_RSS = 0x5 ++ RNDADDENTROPY = 0x40085203 ++ RNDADDTOENTCNT = 0x40045201 ++ RNDCLEARPOOL = 0x5206 ++ RNDGETENTCNT = 0x80045200 ++ RNDGETPOOL = 0x80085202 ++ RNDRESEEDCRNG = 0x5207 ++ RNDZAPENTCNT = 0x5204 ++ RTC_AIE_OFF = 0x7002 ++ RTC_AIE_ON = 0x7001 ++ RTC_ALM_READ = 0x80247008 ++ RTC_ALM_SET = 0x40247007 ++ RTC_EPOCH_READ = 0x8008700d ++ RTC_EPOCH_SET = 0x4008700e ++ RTC_IRQP_READ = 0x8008700b ++ RTC_IRQP_SET = 0x4008700c ++ RTC_PARAM_GET = 0x40187013 ++ RTC_PARAM_SET = 0x40187014 ++ RTC_PIE_OFF = 0x7006 ++ RTC_PIE_ON = 0x7005 ++ RTC_PLL_GET = 0x80207011 ++ RTC_PLL_SET = 0x40207012 ++ RTC_RD_TIME = 0x80247009 ++ RTC_SET_TIME = 0x4024700a ++ RTC_UIE_OFF = 0x7004 ++ RTC_UIE_ON = 0x7003 ++ RTC_VL_CLR = 0x7014 ++ RTC_VL_READ = 0x80047013 ++ RTC_WIE_OFF = 0x7010 ++ RTC_WIE_ON = 0x700f ++ RTC_WKALM_RD = 0x80287010 ++ RTC_WKALM_SET = 0x4028700f ++ SCM_TIMESTAMPING = 0x25 ++ SCM_TIMESTAMPING_OPT_STATS = 0x36 ++ SCM_TIMESTAMPING_PKTINFO = 0x3a ++ SCM_TIMESTAMPNS = 0x23 ++ SCM_TXTIME = 0x3d ++ SCM_WIFI_STATUS = 0x29 ++ SFD_CLOEXEC = 0x80000 ++ SFD_NONBLOCK = 0x800 ++ SIOCATMARK = 0x8905 ++ SIOCGPGRP = 0x8904 ++ SIOCGSTAMPNS_NEW = 0x80108907 ++ SIOCGSTAMP_NEW = 0x80108906 ++ SIOCINQ = 0x541b ++ SIOCOUTQ = 0x5411 ++ SIOCSPGRP = 0x8902 ++ SMB2_SUPER_MAGIC = 0xfe534d42 ++ SOCK_CLOEXEC = 0x80000 ++ SOCK_DGRAM = 0x2 ++ SOCK_NONBLOCK = 0x800 ++ SOCK_STREAM = 0x1 ++ SOL_SOCKET = 0x1 ++ SO_ACCEPTCONN = 0x1e ++ SO_ATTACH_BPF = 0x32 ++ SO_ATTACH_REUSEPORT_CBPF = 0x33 ++ SO_ATTACH_REUSEPORT_EBPF = 0x34 ++ SO_BINDTODEVICE = 0x19 ++ SO_BINDTOIFINDEX = 0x3e ++ SO_BPF_EXTENSIONS = 0x30 ++ SO_BROADCAST = 0x6 ++ SO_BSDCOMPAT = 0xe ++ SO_BUF_LOCK = 0x48 ++ SO_BUSY_POLL = 0x2e ++ SO_BUSY_POLL_BUDGET = 0x46 ++ SO_CNX_ADVICE = 0x35 ++ SO_COOKIE = 0x39 ++ SO_DETACH_REUSEPORT_BPF = 0x44 ++ SO_DOMAIN = 0x27 ++ SO_DONTROUTE = 0x5 ++ SO_ERROR = 0x4 ++ SO_INCOMING_CPU = 0x31 ++ SO_INCOMING_NAPI_ID = 0x38 ++ SO_KEEPALIVE = 0x9 ++ SO_LINGER = 0xd ++ SO_LOCK_FILTER = 0x2c ++ SO_MARK = 0x24 ++ SO_MAX_PACING_RATE = 0x2f ++ SO_MEMINFO = 0x37 ++ SO_NETNS_COOKIE = 0x47 ++ SO_NOFCS = 0x2b ++ SO_OOBINLINE = 0xa ++ SO_PASSCRED = 0x10 ++ SO_PASSSEC = 0x22 ++ SO_PEEK_OFF = 0x2a ++ SO_PEERCRED = 0x11 ++ SO_PEERGROUPS = 0x3b ++ SO_PEERSEC = 0x1f ++ SO_PREFER_BUSY_POLL = 0x45 ++ SO_PROTOCOL = 0x26 ++ SO_RCVBUF = 0x8 ++ SO_RCVBUFFORCE = 0x21 ++ SO_RCVLOWAT = 0x12 ++ SO_RCVTIMEO = 0x14 ++ SO_RCVTIMEO_NEW = 0x42 ++ SO_RCVTIMEO_OLD = 0x14 ++ SO_RESERVE_MEM = 0x49 ++ SO_REUSEADDR = 0x2 ++ SO_REUSEPORT = 0xf ++ SO_RXQ_OVFL = 0x28 ++ SO_SECURITY_AUTHENTICATION = 0x16 ++ SO_SECURITY_ENCRYPTION_NETWORK = 0x18 ++ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 ++ SO_SELECT_ERR_QUEUE = 0x2d ++ SO_SNDBUF = 0x7 ++ SO_SNDBUFFORCE = 0x20 ++ SO_SNDLOWAT = 0x13 ++ SO_SNDTIMEO = 0x15 ++ SO_SNDTIMEO_NEW = 0x43 ++ SO_SNDTIMEO_OLD = 0x15 ++ SO_TIMESTAMPING = 0x25 ++ SO_TIMESTAMPING_NEW = 0x41 ++ SO_TIMESTAMPING_OLD = 0x25 ++ SO_TIMESTAMPNS = 0x23 ++ SO_TIMESTAMPNS_NEW = 0x40 ++ SO_TIMESTAMPNS_OLD = 0x23 ++ SO_TIMESTAMP_NEW = 0x3f ++ SO_TXTIME = 0x3d ++ SO_TYPE = 0x3 ++ SO_WIFI_STATUS = 0x29 ++ SO_ZEROCOPY = 0x3c ++ TAB1 = 0x800 ++ TAB2 = 0x1000 ++ TAB3 = 0x1800 ++ TABDLY = 0x1800 ++ TCFLSH = 0x540b ++ TCGETA = 0x5405 ++ TCGETS = 0x5401 ++ TCGETS2 = 0x802c542a ++ TCGETX = 0x5432 ++ TCSAFLUSH = 0x2 ++ TCSBRK = 0x5409 ++ TCSBRKP = 0x5425 ++ TCSETA = 0x5406 ++ TCSETAF = 0x5408 ++ TCSETAW = 0x5407 ++ TCSETS = 0x5402 ++ TCSETS2 = 0x402c542b ++ TCSETSF = 0x5404 ++ TCSETSF2 = 0x402c542d ++ TCSETSW = 0x5403 ++ TCSETSW2 = 0x402c542c ++ TCSETX = 0x5433 ++ TCSETXF = 0x5434 ++ TCSETXW = 0x5435 ++ TCXONC = 0x540a ++ TFD_CLOEXEC = 0x80000 ++ TFD_NONBLOCK = 0x800 ++ TIOCCBRK = 0x5428 ++ TIOCCONS = 0x541d ++ TIOCEXCL = 0x540c ++ TIOCGDEV = 0x80045432 ++ TIOCGETD = 0x5424 ++ TIOCGEXCL = 0x80045440 ++ TIOCGICOUNT = 0x545d ++ TIOCGISO7816 = 0x80285442 ++ TIOCGLCKTRMIOS = 0x5456 ++ TIOCGPGRP = 0x540f ++ TIOCGPKT = 0x80045438 ++ TIOCGPTLCK = 0x80045439 ++ TIOCGPTN = 0x80045430 ++ TIOCGPTPEER = 0x5441 ++ TIOCGRS485 = 0x542e ++ TIOCGSERIAL = 0x541e ++ TIOCGSID = 0x5429 ++ TIOCGSOFTCAR = 0x5419 ++ TIOCGWINSZ = 0x5413 ++ TIOCINQ = 0x541b ++ TIOCLINUX = 0x541c ++ TIOCMBIC = 0x5417 ++ TIOCMBIS = 0x5416 ++ TIOCMGET = 0x5415 ++ TIOCMIWAIT = 0x545c ++ TIOCMSET = 0x5418 ++ TIOCM_CAR = 0x40 ++ TIOCM_CD = 0x40 ++ TIOCM_CTS = 0x20 ++ TIOCM_DSR = 0x100 ++ TIOCM_RI = 0x80 ++ TIOCM_RNG = 0x80 ++ TIOCM_SR = 0x10 ++ TIOCM_ST = 0x8 ++ TIOCNOTTY = 0x5422 ++ TIOCNXCL = 0x540d ++ TIOCOUTQ = 0x5411 ++ TIOCPKT = 0x5420 ++ TIOCSBRK = 0x5427 ++ TIOCSCTTY = 0x540e ++ TIOCSERCONFIG = 0x5453 ++ TIOCSERGETLSR = 0x5459 ++ TIOCSERGETMULTI = 0x545a ++ TIOCSERGSTRUCT = 0x5458 ++ TIOCSERGWILD = 0x5454 ++ TIOCSERSETMULTI = 0x545b ++ TIOCSERSWILD = 0x5455 ++ TIOCSER_TEMT = 0x1 ++ TIOCSETD = 0x5423 ++ TIOCSIG = 0x40045436 ++ TIOCSISO7816 = 0xc0285443 ++ TIOCSLCKTRMIOS = 0x5457 ++ TIOCSPGRP = 0x5410 ++ TIOCSPTLCK = 0x40045431 ++ TIOCSRS485 = 0x542f ++ TIOCSSERIAL = 0x541f ++ TIOCSSOFTCAR = 0x541a ++ TIOCSTI = 0x5412 ++ TIOCSWINSZ = 0x5414 ++ TIOCVHANGUP = 0x5437 ++ TOSTOP = 0x100 ++ TUNATTACHFILTER = 0x401054d5 ++ TUNDETACHFILTER = 0x401054d6 ++ TUNGETDEVNETNS = 0x54e3 ++ TUNGETFEATURES = 0x800454cf ++ TUNGETFILTER = 0x801054db ++ TUNGETIFF = 0x800454d2 ++ TUNGETSNDBUF = 0x800454d3 ++ TUNGETVNETBE = 0x800454df ++ TUNGETVNETHDRSZ = 0x800454d7 ++ TUNGETVNETLE = 0x800454dd ++ TUNSETCARRIER = 0x400454e2 ++ TUNSETDEBUG = 0x400454c9 ++ TUNSETFILTEREBPF = 0x800454e1 ++ TUNSETGROUP = 0x400454ce ++ TUNSETIFF = 0x400454ca ++ TUNSETIFINDEX = 0x400454da ++ TUNSETLINK = 0x400454cd ++ TUNSETNOCSUM = 0x400454c8 ++ TUNSETOFFLOAD = 0x400454d0 ++ TUNSETOWNER = 0x400454cc ++ TUNSETPERSIST = 0x400454cb ++ TUNSETQUEUE = 0x400454d9 ++ TUNSETSNDBUF = 0x400454d4 ++ TUNSETSTEERINGEBPF = 0x800454e0 ++ TUNSETTXFILTER = 0x400454d1 ++ TUNSETVNETBE = 0x400454de ++ TUNSETVNETHDRSZ = 0x400454d8 ++ TUNSETVNETLE = 0x400454dc ++ UBI_IOCATT = 0x40186f40 ++ UBI_IOCDET = 0x40046f41 ++ UBI_IOCEBCH = 0x40044f02 ++ UBI_IOCEBER = 0x40044f01 ++ UBI_IOCEBISMAP = 0x80044f05 ++ UBI_IOCEBMAP = 0x40084f03 ++ UBI_IOCEBUNMAP = 0x40044f04 ++ UBI_IOCMKVOL = 0x40986f00 ++ UBI_IOCRMVOL = 0x40046f01 ++ UBI_IOCRNVOL = 0x51106f03 ++ UBI_IOCRPEB = 0x40046f04 ++ UBI_IOCRSVOL = 0x400c6f02 ++ UBI_IOCSETVOLPROP = 0x40104f06 ++ UBI_IOCSPEB = 0x40046f05 ++ UBI_IOCVOLCRBLK = 0x40804f07 ++ UBI_IOCVOLRMBLK = 0x4f08 ++ UBI_IOCVOLUP = 0x40084f00 ++ VDISCARD = 0xd ++ VEOF = 0x4 ++ VEOL = 0xb ++ VEOL2 = 0x10 ++ VMIN = 0x6 ++ VREPRINT = 0xc ++ VSTART = 0x8 ++ VSTOP = 0x9 ++ VSUSP = 0xa ++ VSWTC = 0x7 ++ VT1 = 0x4000 ++ VTDLY = 0x4000 ++ VTIME = 0x5 ++ VWERASE = 0xe ++ WDIOC_GETBOOTSTATUS = 0x80045702 ++ WDIOC_GETPRETIMEOUT = 0x80045709 ++ WDIOC_GETSTATUS = 0x80045701 ++ WDIOC_GETSUPPORT = 0x80285700 ++ WDIOC_GETTEMP = 0x80045703 ++ WDIOC_GETTIMELEFT = 0x8004570a ++ WDIOC_GETTIMEOUT = 0x80045707 ++ WDIOC_KEEPALIVE = 0x80045705 ++ WDIOC_SETOPTIONS = 0x80045704 ++ WORDSIZE = 0x40 ++ XCASE = 0x4 ++ XTABS = 0x1800 ++ _HIDIOCGRAWNAME = 0x80804804 ++ _HIDIOCGRAWPHYS = 0x80404805 ++ _HIDIOCGRAWUNIQ = 0x80404808 ++) ++ ++// Errors ++const ( ++ EADDRINUSE = syscall.Errno(0x62) ++ EADDRNOTAVAIL = syscall.Errno(0x63) ++ EADV = syscall.Errno(0x44) ++ EAFNOSUPPORT = syscall.Errno(0x61) ++ EALREADY = syscall.Errno(0x72) ++ EBADE = syscall.Errno(0x34) ++ EBADFD = syscall.Errno(0x4d) ++ EBADMSG = syscall.Errno(0x4a) ++ EBADR = syscall.Errno(0x35) ++ EBADRQC = syscall.Errno(0x38) ++ EBADSLT = syscall.Errno(0x39) ++ EBFONT = syscall.Errno(0x3b) ++ ECANCELED = syscall.Errno(0x7d) ++ ECHRNG = syscall.Errno(0x2c) ++ ECOMM = syscall.Errno(0x46) ++ ECONNABORTED = syscall.Errno(0x67) ++ ECONNREFUSED = syscall.Errno(0x6f) ++ ECONNRESET = syscall.Errno(0x68) ++ EDEADLK = syscall.Errno(0x23) ++ EDEADLOCK = syscall.Errno(0x23) ++ EDESTADDRREQ = syscall.Errno(0x59) ++ EDOTDOT = syscall.Errno(0x49) ++ EDQUOT = syscall.Errno(0x7a) ++ EHOSTDOWN = syscall.Errno(0x70) ++ EHOSTUNREACH = syscall.Errno(0x71) ++ EHWPOISON = syscall.Errno(0x85) ++ EIDRM = syscall.Errno(0x2b) ++ EILSEQ = syscall.Errno(0x54) ++ EINPROGRESS = syscall.Errno(0x73) ++ EISCONN = syscall.Errno(0x6a) ++ EISNAM = syscall.Errno(0x78) ++ EKEYEXPIRED = syscall.Errno(0x7f) ++ EKEYREJECTED = syscall.Errno(0x81) ++ EKEYREVOKED = syscall.Errno(0x80) ++ EL2HLT = syscall.Errno(0x33) ++ EL2NSYNC = syscall.Errno(0x2d) ++ EL3HLT = syscall.Errno(0x2e) ++ EL3RST = syscall.Errno(0x2f) ++ ELIBACC = syscall.Errno(0x4f) ++ ELIBBAD = syscall.Errno(0x50) ++ ELIBEXEC = syscall.Errno(0x53) ++ ELIBMAX = syscall.Errno(0x52) ++ ELIBSCN = syscall.Errno(0x51) ++ ELNRNG = syscall.Errno(0x30) ++ ELOOP = syscall.Errno(0x28) ++ EMEDIUMTYPE = syscall.Errno(0x7c) ++ EMSGSIZE = syscall.Errno(0x5a) ++ EMULTIHOP = syscall.Errno(0x48) ++ ENAMETOOLONG = syscall.Errno(0x24) ++ ENAVAIL = syscall.Errno(0x77) ++ ENETDOWN = syscall.Errno(0x64) ++ ENETRESET = syscall.Errno(0x66) ++ ENETUNREACH = syscall.Errno(0x65) ++ ENOANO = syscall.Errno(0x37) ++ ENOBUFS = syscall.Errno(0x69) ++ ENOCSI = syscall.Errno(0x32) ++ ENODATA = syscall.Errno(0x3d) ++ ENOKEY = syscall.Errno(0x7e) ++ ENOLCK = syscall.Errno(0x25) ++ ENOLINK = syscall.Errno(0x43) ++ ENOMEDIUM = syscall.Errno(0x7b) ++ ENOMSG = syscall.Errno(0x2a) ++ ENONET = syscall.Errno(0x40) ++ ENOPKG = syscall.Errno(0x41) ++ ENOPROTOOPT = syscall.Errno(0x5c) ++ ENOSR = syscall.Errno(0x3f) ++ ENOSTR = syscall.Errno(0x3c) ++ ENOSYS = syscall.Errno(0x26) ++ ENOTCONN = syscall.Errno(0x6b) ++ ENOTEMPTY = syscall.Errno(0x27) ++ ENOTNAM = syscall.Errno(0x76) ++ ENOTRECOVERABLE = syscall.Errno(0x83) ++ ENOTSOCK = syscall.Errno(0x58) ++ ENOTSUP = syscall.Errno(0x5f) ++ ENOTUNIQ = syscall.Errno(0x4c) ++ EOPNOTSUPP = syscall.Errno(0x5f) ++ EOVERFLOW = syscall.Errno(0x4b) ++ EOWNERDEAD = syscall.Errno(0x82) ++ EPFNOSUPPORT = syscall.Errno(0x60) ++ EPROTO = syscall.Errno(0x47) ++ EPROTONOSUPPORT = syscall.Errno(0x5d) ++ EPROTOTYPE = syscall.Errno(0x5b) ++ EREMCHG = syscall.Errno(0x4e) ++ EREMOTE = syscall.Errno(0x42) ++ EREMOTEIO = syscall.Errno(0x79) ++ ERESTART = syscall.Errno(0x55) ++ ERFKILL = syscall.Errno(0x84) ++ ESHUTDOWN = syscall.Errno(0x6c) ++ ESOCKTNOSUPPORT = syscall.Errno(0x5e) ++ ESRMNT = syscall.Errno(0x45) ++ ESTALE = syscall.Errno(0x74) ++ ESTRPIPE = syscall.Errno(0x56) ++ ETIME = syscall.Errno(0x3e) ++ ETIMEDOUT = syscall.Errno(0x6e) ++ ETOOMANYREFS = syscall.Errno(0x6d) ++ EUCLEAN = syscall.Errno(0x75) ++ EUNATCH = syscall.Errno(0x31) ++ EUSERS = syscall.Errno(0x57) ++ EXFULL = syscall.Errno(0x36) ++) ++ ++// Signals ++const ( ++ SIGBUS = syscall.Signal(0x7) ++ SIGCHLD = syscall.Signal(0x11) ++ SIGCLD = syscall.Signal(0x11) ++ SIGCONT = syscall.Signal(0x12) ++ SIGIO = syscall.Signal(0x1d) ++ SIGPOLL = syscall.Signal(0x1d) ++ SIGPROF = syscall.Signal(0x1b) ++ SIGPWR = syscall.Signal(0x1e) ++ SIGSTKFLT = syscall.Signal(0x10) ++ SIGSTOP = syscall.Signal(0x13) ++ SIGSYS = syscall.Signal(0x1f) ++ SIGTSTP = syscall.Signal(0x14) ++ SIGTTIN = syscall.Signal(0x15) ++ SIGTTOU = syscall.Signal(0x16) ++ SIGURG = syscall.Signal(0x17) ++ SIGUSR1 = syscall.Signal(0xa) ++ SIGUSR2 = syscall.Signal(0xc) ++ SIGVTALRM = syscall.Signal(0x1a) ++ SIGWINCH = syscall.Signal(0x1c) ++ SIGXCPU = syscall.Signal(0x18) ++ SIGXFSZ = syscall.Signal(0x19) ++) ++ ++// Error table ++var errorList = [...]struct { ++ num syscall.Errno ++ name string ++ desc string ++}{ ++ {1, "EPERM", "operation not permitted"}, ++ {2, "ENOENT", "no such file or directory"}, ++ {3, "ESRCH", "no such process"}, ++ {4, "EINTR", "interrupted system call"}, ++ {5, "EIO", "input/output error"}, ++ {6, "ENXIO", "no such device or address"}, ++ {7, "E2BIG", "argument list too long"}, ++ {8, "ENOEXEC", "exec format error"}, ++ {9, "EBADF", "bad file descriptor"}, ++ {10, "ECHILD", "no child processes"}, ++ {11, "EAGAIN", "resource temporarily unavailable"}, ++ {12, "ENOMEM", "cannot allocate memory"}, ++ {13, "EACCES", "permission denied"}, ++ {14, "EFAULT", "bad address"}, ++ {15, "ENOTBLK", "block device required"}, ++ {16, "EBUSY", "device or resource busy"}, ++ {17, "EEXIST", "file exists"}, ++ {18, "EXDEV", "invalid cross-device link"}, ++ {19, "ENODEV", "no such device"}, ++ {20, "ENOTDIR", "not a directory"}, ++ {21, "EISDIR", "is a directory"}, ++ {22, "EINVAL", "invalid argument"}, ++ {23, "ENFILE", "too many open files in system"}, ++ {24, "EMFILE", "too many open files"}, ++ {25, "ENOTTY", "inappropriate ioctl for device"}, ++ {26, "ETXTBSY", "text file busy"}, ++ {27, "EFBIG", "file too large"}, ++ {28, "ENOSPC", "no space left on device"}, ++ {29, "ESPIPE", "illegal seek"}, ++ {30, "EROFS", "read-only file system"}, ++ {31, "EMLINK", "too many links"}, ++ {32, "EPIPE", "broken pipe"}, ++ {33, "EDOM", "numerical argument out of domain"}, ++ {34, "ERANGE", "numerical result out of range"}, ++ {35, "EDEADLK", "resource deadlock avoided"}, ++ {36, "ENAMETOOLONG", "file name too long"}, ++ {37, "ENOLCK", "no locks available"}, ++ {38, "ENOSYS", "function not implemented"}, ++ {39, "ENOTEMPTY", "directory not empty"}, ++ {40, "ELOOP", "too many levels of symbolic links"}, ++ {42, "ENOMSG", "no message of desired type"}, ++ {43, "EIDRM", "identifier removed"}, ++ {44, "ECHRNG", "channel number out of range"}, ++ {45, "EL2NSYNC", "level 2 not synchronized"}, ++ {46, "EL3HLT", "level 3 halted"}, ++ {47, "EL3RST", "level 3 reset"}, ++ {48, "ELNRNG", "link number out of range"}, ++ {49, "EUNATCH", "protocol driver not attached"}, ++ {50, "ENOCSI", "no CSI structure available"}, ++ {51, "EL2HLT", "level 2 halted"}, ++ {52, "EBADE", "invalid exchange"}, ++ {53, "EBADR", "invalid request descriptor"}, ++ {54, "EXFULL", "exchange full"}, ++ {55, "ENOANO", "no anode"}, ++ {56, "EBADRQC", "invalid request code"}, ++ {57, "EBADSLT", "invalid slot"}, ++ {59, "EBFONT", "bad font file format"}, ++ {60, "ENOSTR", "device not a stream"}, ++ {61, "ENODATA", "no data available"}, ++ {62, "ETIME", "timer expired"}, ++ {63, "ENOSR", "out of streams resources"}, ++ {64, "ENONET", "machine is not on the network"}, ++ {65, "ENOPKG", "package not installed"}, ++ {66, "EREMOTE", "object is remote"}, ++ {67, "ENOLINK", "link has been severed"}, ++ {68, "EADV", "advertise error"}, ++ {69, "ESRMNT", "srmount error"}, ++ {70, "ECOMM", "communication error on send"}, ++ {71, "EPROTO", "protocol error"}, ++ {72, "EMULTIHOP", "multihop attempted"}, ++ {73, "EDOTDOT", "RFS specific error"}, ++ {74, "EBADMSG", "bad message"}, ++ {75, "EOVERFLOW", "value too large for defined data type"}, ++ {76, "ENOTUNIQ", "name not unique on network"}, ++ {77, "EBADFD", "file descriptor in bad state"}, ++ {78, "EREMCHG", "remote address changed"}, ++ {79, "ELIBACC", "can not access a needed shared library"}, ++ {80, "ELIBBAD", "accessing a corrupted shared library"}, ++ {81, "ELIBSCN", ".lib section in a.out corrupted"}, ++ {82, "ELIBMAX", "attempting to link in too many shared libraries"}, ++ {83, "ELIBEXEC", "cannot exec a shared library directly"}, ++ {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, ++ {85, "ERESTART", "interrupted system call should be restarted"}, ++ {86, "ESTRPIPE", "streams pipe error"}, ++ {87, "EUSERS", "too many users"}, ++ {88, "ENOTSOCK", "socket operation on non-socket"}, ++ {89, "EDESTADDRREQ", "destination address required"}, ++ {90, "EMSGSIZE", "message too long"}, ++ {91, "EPROTOTYPE", "protocol wrong type for socket"}, ++ {92, "ENOPROTOOPT", "protocol not available"}, ++ {93, "EPROTONOSUPPORT", "protocol not supported"}, ++ {94, "ESOCKTNOSUPPORT", "socket type not supported"}, ++ {95, "ENOTSUP", "operation not supported"}, ++ {96, "EPFNOSUPPORT", "protocol family not supported"}, ++ {97, "EAFNOSUPPORT", "address family not supported by protocol"}, ++ {98, "EADDRINUSE", "address already in use"}, ++ {99, "EADDRNOTAVAIL", "cannot assign requested address"}, ++ {100, "ENETDOWN", "network is down"}, ++ {101, "ENETUNREACH", "network is unreachable"}, ++ {102, "ENETRESET", "network dropped connection on reset"}, ++ {103, "ECONNABORTED", "software caused connection abort"}, ++ {104, "ECONNRESET", "connection reset by peer"}, ++ {105, "ENOBUFS", "no buffer space available"}, ++ {106, "EISCONN", "transport endpoint is already connected"}, ++ {107, "ENOTCONN", "transport endpoint is not connected"}, ++ {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, ++ {109, "ETOOMANYREFS", "too many references: cannot splice"}, ++ {110, "ETIMEDOUT", "connection timed out"}, ++ {111, "ECONNREFUSED", "connection refused"}, ++ {112, "EHOSTDOWN", "host is down"}, ++ {113, "EHOSTUNREACH", "no route to host"}, ++ {114, "EALREADY", "operation already in progress"}, ++ {115, "EINPROGRESS", "operation now in progress"}, ++ {116, "ESTALE", "stale file handle"}, ++ {117, "EUCLEAN", "structure needs cleaning"}, ++ {118, "ENOTNAM", "not a XENIX named type file"}, ++ {119, "ENAVAIL", "no XENIX semaphores available"}, ++ {120, "EISNAM", "is a named type file"}, ++ {121, "EREMOTEIO", "remote I/O error"}, ++ {122, "EDQUOT", "disk quota exceeded"}, ++ {123, "ENOMEDIUM", "no medium found"}, ++ {124, "EMEDIUMTYPE", "wrong medium type"}, ++ {125, "ECANCELED", "operation canceled"}, ++ {126, "ENOKEY", "required key not available"}, ++ {127, "EKEYEXPIRED", "key has expired"}, ++ {128, "EKEYREVOKED", "key has been revoked"}, ++ {129, "EKEYREJECTED", "key was rejected by service"}, ++ {130, "EOWNERDEAD", "owner died"}, ++ {131, "ENOTRECOVERABLE", "state not recoverable"}, ++ {132, "ERFKILL", "operation not possible due to RF-kill"}, ++ {133, "EHWPOISON", "memory page has hardware error"}, ++} ++ ++// Signal table ++var signalList = [...]struct { ++ num syscall.Signal ++ name string ++ desc string ++}{ ++ {1, "SIGHUP", "hangup"}, ++ {2, "SIGINT", "interrupt"}, ++ {3, "SIGQUIT", "quit"}, ++ {4, "SIGILL", "illegal instruction"}, ++ {5, "SIGTRAP", "trace/breakpoint trap"}, ++ {6, "SIGABRT", "aborted"}, ++ {7, "SIGBUS", "bus error"}, ++ {8, "SIGFPE", "floating point exception"}, ++ {9, "SIGKILL", "killed"}, ++ {10, "SIGUSR1", "user defined signal 1"}, ++ {11, "SIGSEGV", "segmentation fault"}, ++ {12, "SIGUSR2", "user defined signal 2"}, ++ {13, "SIGPIPE", "broken pipe"}, ++ {14, "SIGALRM", "alarm clock"}, ++ {15, "SIGTERM", "terminated"}, ++ {16, "SIGSTKFLT", "stack fault"}, ++ {17, "SIGCHLD", "child exited"}, ++ {18, "SIGCONT", "continued"}, ++ {19, "SIGSTOP", "stopped (signal)"}, ++ {20, "SIGTSTP", "stopped"}, ++ {21, "SIGTTIN", "stopped (tty input)"}, ++ {22, "SIGTTOU", "stopped (tty output)"}, ++ {23, "SIGURG", "urgent I/O condition"}, ++ {24, "SIGXCPU", "CPU time limit exceeded"}, ++ {25, "SIGXFSZ", "file size limit exceeded"}, ++ {26, "SIGVTALRM", "virtual timer expired"}, ++ {27, "SIGPROF", "profiling timer expired"}, ++ {28, "SIGWINCH", "window changed"}, ++ {29, "SIGIO", "I/O possible"}, ++ {30, "SIGPWR", "power failure"}, ++ {31, "SIGSYS", "bad system call"}, ++} +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +new file mode 100644 +index 0000000000..e455a8b9b0 +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +@@ -0,0 +1,563 @@ ++// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package unix ++ ++import ( ++ "syscall" ++ "unsafe" ++) ++ ++var _ syscall.Errno ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { ++ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { ++ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(events) > 0 { ++ _p0 = unsafe.Pointer(&events[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fadvise(fd int, offset int64, length int64, advice int) (err error) { ++ _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchown(fd int, uid int, gid int) (err error) { ++ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstat(fd int, stat *Stat_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatfs(fd int, buf *Statfs_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Ftruncate(fd int, length int64) (err error) { ++ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getegid() (egid int) { ++ r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) ++ egid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Geteuid() (euid int) { ++ r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) ++ euid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getgid() (gid int) { ++ r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) ++ gid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getuid() (uid int) { ++ r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) ++ uid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Listen(s int, n int) (err error) { ++ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pread(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pwrite(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Seek(fd int, offset int64, whence int) (off int64, err error) { ++ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) ++ off = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { ++ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) ++ written = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setfsgid(gid int) (prev int, err error) { ++ r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) ++ prev = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setfsuid(uid int) (prev int, err error) { ++ r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) ++ prev = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setregid(rgid int, egid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setresgid(rgid int, egid int, sgid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setresuid(ruid int, euid int, suid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setreuid(ruid int, euid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Shutdown(fd int, how int) (err error) { ++ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Statfs(path string, buf *Statfs_t) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { ++ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Truncate(path string, length int64) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { ++ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { ++ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getgroups(n int, list *_Gid_t) (nn int, err error) { ++ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ nn = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setgroups(n int, list *_Gid_t) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { ++ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { ++ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socket(domain int, typ int, proto int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { ++ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { ++ r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) ++ xaddr = uintptr(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Gettimeofday(tv *Timeval) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(cmdline) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +new file mode 100644 +index 0000000000..e443f9a322 +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +@@ -0,0 +1,313 @@ ++// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++package unix ++ ++const ( ++ SYS_IO_SETUP = 0 ++ SYS_IO_DESTROY = 1 ++ SYS_IO_SUBMIT = 2 ++ SYS_IO_CANCEL = 3 ++ SYS_IO_GETEVENTS = 4 ++ SYS_SETXATTR = 5 ++ SYS_LSETXATTR = 6 ++ SYS_FSETXATTR = 7 ++ SYS_GETXATTR = 8 ++ SYS_LGETXATTR = 9 ++ SYS_FGETXATTR = 10 ++ SYS_LISTXATTR = 11 ++ SYS_LLISTXATTR = 12 ++ SYS_FLISTXATTR = 13 ++ SYS_REMOVEXATTR = 14 ++ SYS_LREMOVEXATTR = 15 ++ SYS_FREMOVEXATTR = 16 ++ SYS_GETCWD = 17 ++ SYS_LOOKUP_DCOOKIE = 18 ++ SYS_EVENTFD2 = 19 ++ SYS_EPOLL_CREATE1 = 20 ++ SYS_EPOLL_CTL = 21 ++ SYS_EPOLL_PWAIT = 22 ++ SYS_DUP = 23 ++ SYS_DUP3 = 24 ++ SYS_FCNTL = 25 ++ SYS_INOTIFY_INIT1 = 26 ++ SYS_INOTIFY_ADD_WATCH = 27 ++ SYS_INOTIFY_RM_WATCH = 28 ++ SYS_IOCTL = 29 ++ SYS_IOPRIO_SET = 30 ++ SYS_IOPRIO_GET = 31 ++ SYS_FLOCK = 32 ++ SYS_MKNODAT = 33 ++ SYS_MKDIRAT = 34 ++ SYS_UNLINKAT = 35 ++ SYS_SYMLINKAT = 36 ++ SYS_LINKAT = 37 ++ SYS_UMOUNT2 = 39 ++ SYS_MOUNT = 40 ++ SYS_PIVOT_ROOT = 41 ++ SYS_NFSSERVCTL = 42 ++ SYS_STATFS = 43 ++ SYS_FSTATFS = 44 ++ SYS_TRUNCATE = 45 ++ SYS_FTRUNCATE = 46 ++ SYS_FALLOCATE = 47 ++ SYS_FACCESSAT = 48 ++ SYS_CHDIR = 49 ++ SYS_FCHDIR = 50 ++ SYS_CHROOT = 51 ++ SYS_FCHMOD = 52 ++ SYS_FCHMODAT = 53 ++ SYS_FCHOWNAT = 54 ++ SYS_FCHOWN = 55 ++ SYS_OPENAT = 56 ++ SYS_CLOSE = 57 ++ SYS_VHANGUP = 58 ++ SYS_PIPE2 = 59 ++ SYS_QUOTACTL = 60 ++ SYS_GETDENTS64 = 61 ++ SYS_LSEEK = 62 ++ SYS_READ = 63 ++ SYS_WRITE = 64 ++ SYS_READV = 65 ++ SYS_WRITEV = 66 ++ SYS_PREAD64 = 67 ++ SYS_PWRITE64 = 68 ++ SYS_PREADV = 69 ++ SYS_PWRITEV = 70 ++ SYS_SENDFILE = 71 ++ SYS_PSELECT6 = 72 ++ SYS_PPOLL = 73 ++ SYS_SIGNALFD4 = 74 ++ SYS_VMSPLICE = 75 ++ SYS_SPLICE = 76 ++ SYS_TEE = 77 ++ SYS_READLINKAT = 78 ++ SYS_FSTATAT = 79 ++ SYS_FSTAT = 80 ++ SYS_SYNC = 81 ++ SYS_FSYNC = 82 ++ SYS_FDATASYNC = 83 ++ SYS_SYNC_FILE_RANGE = 84 ++ SYS_TIMERFD_CREATE = 85 ++ SYS_TIMERFD_SETTIME = 86 ++ SYS_TIMERFD_GETTIME = 87 ++ SYS_UTIMENSAT = 88 ++ SYS_ACCT = 89 ++ SYS_CAPGET = 90 ++ SYS_CAPSET = 91 ++ SYS_PERSONALITY = 92 ++ SYS_EXIT = 93 ++ SYS_EXIT_GROUP = 94 ++ SYS_WAITID = 95 ++ SYS_SET_TID_ADDRESS = 96 ++ SYS_UNSHARE = 97 ++ SYS_FUTEX = 98 ++ SYS_SET_ROBUST_LIST = 99 ++ SYS_GET_ROBUST_LIST = 100 ++ SYS_NANOSLEEP = 101 ++ SYS_GETITIMER = 102 ++ SYS_SETITIMER = 103 ++ SYS_KEXEC_LOAD = 104 ++ SYS_INIT_MODULE = 105 ++ SYS_DELETE_MODULE = 106 ++ SYS_TIMER_CREATE = 107 ++ SYS_TIMER_GETTIME = 108 ++ SYS_TIMER_GETOVERRUN = 109 ++ SYS_TIMER_SETTIME = 110 ++ SYS_TIMER_DELETE = 111 ++ SYS_CLOCK_SETTIME = 112 ++ SYS_CLOCK_GETTIME = 113 ++ SYS_CLOCK_GETRES = 114 ++ SYS_CLOCK_NANOSLEEP = 115 ++ SYS_SYSLOG = 116 ++ SYS_PTRACE = 117 ++ SYS_SCHED_SETPARAM = 118 ++ SYS_SCHED_SETSCHEDULER = 119 ++ SYS_SCHED_GETSCHEDULER = 120 ++ SYS_SCHED_GETPARAM = 121 ++ SYS_SCHED_SETAFFINITY = 122 ++ SYS_SCHED_GETAFFINITY = 123 ++ SYS_SCHED_YIELD = 124 ++ SYS_SCHED_GET_PRIORITY_MAX = 125 ++ SYS_SCHED_GET_PRIORITY_MIN = 126 ++ SYS_SCHED_RR_GET_INTERVAL = 127 ++ SYS_RESTART_SYSCALL = 128 ++ SYS_KILL = 129 ++ SYS_TKILL = 130 ++ SYS_TGKILL = 131 ++ SYS_SIGALTSTACK = 132 ++ SYS_RT_SIGSUSPEND = 133 ++ SYS_RT_SIGACTION = 134 ++ SYS_RT_SIGPROCMASK = 135 ++ SYS_RT_SIGPENDING = 136 ++ SYS_RT_SIGTIMEDWAIT = 137 ++ SYS_RT_SIGQUEUEINFO = 138 ++ SYS_RT_SIGRETURN = 139 ++ SYS_SETPRIORITY = 140 ++ SYS_GETPRIORITY = 141 ++ SYS_REBOOT = 142 ++ SYS_SETREGID = 143 ++ SYS_SETGID = 144 ++ SYS_SETREUID = 145 ++ SYS_SETUID = 146 ++ SYS_SETRESUID = 147 ++ SYS_GETRESUID = 148 ++ SYS_SETRESGID = 149 ++ SYS_GETRESGID = 150 ++ SYS_SETFSUID = 151 ++ SYS_SETFSGID = 152 ++ SYS_TIMES = 153 ++ SYS_SETPGID = 154 ++ SYS_GETPGID = 155 ++ SYS_GETSID = 156 ++ SYS_SETSID = 157 ++ SYS_GETGROUPS = 158 ++ SYS_SETGROUPS = 159 ++ SYS_UNAME = 160 ++ SYS_SETHOSTNAME = 161 ++ SYS_SETDOMAINNAME = 162 ++ SYS_GETRUSAGE = 165 ++ SYS_UMASK = 166 ++ SYS_PRCTL = 167 ++ SYS_GETCPU = 168 ++ SYS_GETTIMEOFDAY = 169 ++ SYS_SETTIMEOFDAY = 170 ++ SYS_ADJTIMEX = 171 ++ SYS_GETPID = 172 ++ SYS_GETPPID = 173 ++ SYS_GETUID = 174 ++ SYS_GETEUID = 175 ++ SYS_GETGID = 176 ++ SYS_GETEGID = 177 ++ SYS_GETTID = 178 ++ SYS_SYSINFO = 179 ++ SYS_MQ_OPEN = 180 ++ SYS_MQ_UNLINK = 181 ++ SYS_MQ_TIMEDSEND = 182 ++ SYS_MQ_TIMEDRECEIVE = 183 ++ SYS_MQ_NOTIFY = 184 ++ SYS_MQ_GETSETATTR = 185 ++ SYS_MSGGET = 186 ++ SYS_MSGCTL = 187 ++ SYS_MSGRCV = 188 ++ SYS_MSGSND = 189 ++ SYS_SEMGET = 190 ++ SYS_SEMCTL = 191 ++ SYS_SEMTIMEDOP = 192 ++ SYS_SEMOP = 193 ++ SYS_SHMGET = 194 ++ SYS_SHMCTL = 195 ++ SYS_SHMAT = 196 ++ SYS_SHMDT = 197 ++ SYS_SOCKET = 198 ++ SYS_SOCKETPAIR = 199 ++ SYS_BIND = 200 ++ SYS_LISTEN = 201 ++ SYS_ACCEPT = 202 ++ SYS_CONNECT = 203 ++ SYS_GETSOCKNAME = 204 ++ SYS_GETPEERNAME = 205 ++ SYS_SENDTO = 206 ++ SYS_RECVFROM = 207 ++ SYS_SETSOCKOPT = 208 ++ SYS_GETSOCKOPT = 209 ++ SYS_SHUTDOWN = 210 ++ SYS_SENDMSG = 211 ++ SYS_RECVMSG = 212 ++ SYS_READAHEAD = 213 ++ SYS_BRK = 214 ++ SYS_MUNMAP = 215 ++ SYS_MREMAP = 216 ++ SYS_ADD_KEY = 217 ++ SYS_REQUEST_KEY = 218 ++ SYS_KEYCTL = 219 ++ SYS_CLONE = 220 ++ SYS_EXECVE = 221 ++ SYS_MMAP = 222 ++ SYS_FADVISE64 = 223 ++ SYS_SWAPON = 224 ++ SYS_SWAPOFF = 225 ++ SYS_MPROTECT = 226 ++ SYS_MSYNC = 227 ++ SYS_MLOCK = 228 ++ SYS_MUNLOCK = 229 ++ SYS_MLOCKALL = 230 ++ SYS_MUNLOCKALL = 231 ++ SYS_MINCORE = 232 ++ SYS_MADVISE = 233 ++ SYS_REMAP_FILE_PAGES = 234 ++ SYS_MBIND = 235 ++ SYS_GET_MEMPOLICY = 236 ++ SYS_SET_MEMPOLICY = 237 ++ SYS_MIGRATE_PAGES = 238 ++ SYS_MOVE_PAGES = 239 ++ SYS_RT_TGSIGQUEUEINFO = 240 ++ SYS_PERF_EVENT_OPEN = 241 ++ SYS_ACCEPT4 = 242 ++ SYS_RECVMMSG = 243 ++ SYS_ARCH_SPECIFIC_SYSCALL = 244 ++ SYS_WAIT4 = 260 ++ SYS_PRLIMIT64 = 261 ++ SYS_FANOTIFY_INIT = 262 ++ SYS_FANOTIFY_MARK = 263 ++ SYS_NAME_TO_HANDLE_AT = 264 ++ SYS_OPEN_BY_HANDLE_AT = 265 ++ SYS_CLOCK_ADJTIME = 266 ++ SYS_SYNCFS = 267 ++ SYS_SETNS = 268 ++ SYS_SENDMMSG = 269 ++ SYS_PROCESS_VM_READV = 270 ++ SYS_PROCESS_VM_WRITEV = 271 ++ SYS_KCMP = 272 ++ SYS_FINIT_MODULE = 273 ++ SYS_SCHED_SETATTR = 274 ++ SYS_SCHED_GETATTR = 275 ++ SYS_RENAMEAT2 = 276 ++ SYS_SECCOMP = 277 ++ SYS_GETRANDOM = 278 ++ SYS_MEMFD_CREATE = 279 ++ SYS_BPF = 280 ++ SYS_EXECVEAT = 281 ++ SYS_USERFAULTFD = 282 ++ SYS_MEMBARRIER = 283 ++ SYS_MLOCK2 = 284 ++ SYS_COPY_FILE_RANGE = 285 ++ SYS_PREADV2 = 286 ++ SYS_PWRITEV2 = 287 ++ SYS_PKEY_MPROTECT = 288 ++ SYS_PKEY_ALLOC = 289 ++ SYS_PKEY_FREE = 290 ++ SYS_STATX = 291 ++ SYS_IO_PGETEVENTS = 292 ++ SYS_RSEQ = 293 ++ SYS_KEXEC_FILE_LOAD = 294 ++ SYS_PIDFD_SEND_SIGNAL = 424 ++ SYS_IO_URING_SETUP = 425 ++ SYS_IO_URING_ENTER = 426 ++ SYS_IO_URING_REGISTER = 427 ++ SYS_OPEN_TREE = 428 ++ SYS_MOVE_MOUNT = 429 ++ SYS_FSOPEN = 430 ++ SYS_FSCONFIG = 431 ++ SYS_FSMOUNT = 432 ++ SYS_FSPICK = 433 ++ SYS_PIDFD_OPEN = 434 ++ SYS_CLONE3 = 435 ++ SYS_CLOSE_RANGE = 436 ++ SYS_OPENAT2 = 437 ++ SYS_PIDFD_GETFD = 438 ++ SYS_FACCESSAT2 = 439 ++ SYS_PROCESS_MADVISE = 440 ++ SYS_EPOLL_PWAIT2 = 441 ++ SYS_MOUNT_SETATTR = 442 ++ SYS_QUOTACTL_FD = 443 ++ SYS_LANDLOCK_CREATE_RULESET = 444 ++ SYS_LANDLOCK_ADD_RULE = 445 ++ SYS_LANDLOCK_RESTRICT_SELF = 446 ++ SYS_PROCESS_MRELEASE = 448 ++ SYS_FUTEX_WAITV = 449 ++ SYS_SET_MEMPOLICY_HOME_NODE = 450 ++) +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +new file mode 100644 +index 0000000000..c19f60ac83 +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +@@ -0,0 +1,667 @@ ++// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++package unix ++ ++const ( ++ SizeofPtr = 0x8 ++ SizeofLong = 0x8 ++) ++ ++type ( ++ _C_long int64 ++) ++ ++type Timespec struct { ++ Sec int64 ++ Nsec int64 ++} ++ ++type Timeval struct { ++ Sec int64 ++ Usec int64 ++} ++ ++type Timex struct { ++ Modes uint32 ++ Offset int64 ++ Freq int64 ++ Maxerror int64 ++ Esterror int64 ++ Status int32 ++ Constant int64 ++ Precision int64 ++ Tolerance int64 ++ Time Timeval ++ Tick int64 ++ Ppsfreq int64 ++ Jitter int64 ++ Shift int32 ++ Stabil int64 ++ Jitcnt int64 ++ Calcnt int64 ++ Errcnt int64 ++ Stbcnt int64 ++ Tai int32 ++ _ [44]byte ++} ++ ++type Time_t int64 ++ ++type Tms struct { ++ Utime int64 ++ Stime int64 ++ Cutime int64 ++ Cstime int64 ++} ++ ++type Utimbuf struct { ++ Actime int64 ++ Modtime int64 ++} ++ ++type Rusage struct { ++ Utime Timeval ++ Stime Timeval ++ Maxrss int64 ++ Ixrss int64 ++ Idrss int64 ++ Isrss int64 ++ Minflt int64 ++ Majflt int64 ++ Nswap int64 ++ Inblock int64 ++ Oublock int64 ++ Msgsnd int64 ++ Msgrcv int64 ++ Nsignals int64 ++ Nvcsw int64 ++ Nivcsw int64 ++} ++ ++type Stat_t struct { ++ Dev uint64 ++ Ino uint64 ++ Mode uint32 ++ Nlink uint32 ++ Uid uint32 ++ Gid uint32 ++ Rdev uint64 ++ _ uint64 ++ Size int64 ++ Blksize int32 ++ _ int32 ++ Blocks int64 ++ Atim Timespec ++ Mtim Timespec ++ Ctim Timespec ++ _ [2]int32 ++} ++ ++type Dirent struct { ++ Ino uint64 ++ Off int64 ++ Reclen uint16 ++ Type uint8 ++ Name [256]int8 ++ _ [5]byte ++} ++ ++type Flock_t struct { ++ Type int16 ++ Whence int16 ++ Start int64 ++ Len int64 ++ Pid int32 ++ _ [4]byte ++} ++ ++type DmNameList struct { ++ Dev uint64 ++ Next uint32 ++ Name [0]byte ++ _ [4]byte ++} ++ ++const ( ++ FADV_DONTNEED = 0x4 ++ FADV_NOREUSE = 0x5 ++) ++ ++type RawSockaddrNFCLLCP struct { ++ Sa_family uint16 ++ Dev_idx uint32 ++ Target_idx uint32 ++ Nfc_protocol uint32 ++ Dsap uint8 ++ Ssap uint8 ++ Service_name [63]uint8 ++ Service_name_len uint64 ++} ++ ++type RawSockaddr struct { ++ Family uint16 ++ Data [14]int8 ++} ++ ++type RawSockaddrAny struct { ++ Addr RawSockaddr ++ Pad [96]int8 ++} ++ ++type Iovec struct { ++ Base *byte ++ Len uint64 ++} ++ ++type Msghdr struct { ++ Name *byte ++ Namelen uint32 ++ Iov *Iovec ++ Iovlen uint64 ++ Control *byte ++ Controllen uint64 ++ Flags int32 ++ _ [4]byte ++} ++ ++type Cmsghdr struct { ++ Len uint64 ++ Level int32 ++ Type int32 ++} ++ ++type ifreq struct { ++ Ifrn [16]byte ++ Ifru [24]byte ++} ++ ++const ( ++ SizeofSockaddrNFCLLCP = 0x60 ++ SizeofIovec = 0x10 ++ SizeofMsghdr = 0x38 ++ SizeofCmsghdr = 0x10 ++) ++ ++const ( ++ SizeofSockFprog = 0x10 ++) ++ ++type PtraceRegs struct { ++ Regs [32]uint64 ++ Orig_a0 uint64 ++ Era uint64 ++ Badv uint64 ++ Reserved [10]uint64 ++} ++ ++type FdSet struct { ++ Bits [16]int64 ++} ++ ++type Sysinfo_t struct { ++ Uptime int64 ++ Loads [3]uint64 ++ Totalram uint64 ++ Freeram uint64 ++ Sharedram uint64 ++ Bufferram uint64 ++ Totalswap uint64 ++ Freeswap uint64 ++ Procs uint16 ++ Pad uint16 ++ Totalhigh uint64 ++ Freehigh uint64 ++ Unit uint32 ++ _ [0]int8 ++ _ [4]byte ++} ++ ++type Ustat_t struct { ++ Tfree int32 ++ Tinode uint64 ++ Fname [6]int8 ++ Fpack [6]int8 ++ _ [4]byte ++} ++ ++type EpollEvent struct { ++ Events uint32 ++ _ int32 ++ Fd int32 ++ Pad int32 ++} ++ ++const ( ++ POLLRDHUP = 0x2000 ++) ++ ++type Sigset_t struct { ++ Val [16]uint64 ++} ++ ++const _C__NSIG = 0x41 ++ ++type Termios struct { ++ Iflag uint32 ++ Oflag uint32 ++ Cflag uint32 ++ Lflag uint32 ++ Line uint8 ++ Cc [19]uint8 ++ Ispeed uint32 ++ Ospeed uint32 ++} ++ ++type Taskstats struct { ++ Version uint16 ++ Ac_exitcode uint32 ++ Ac_flag uint8 ++ Ac_nice uint8 ++ Cpu_count uint64 ++ Cpu_delay_total uint64 ++ Blkio_count uint64 ++ Blkio_delay_total uint64 ++ Swapin_count uint64 ++ Swapin_delay_total uint64 ++ Cpu_run_real_total uint64 ++ Cpu_run_virtual_total uint64 ++ Ac_comm [32]int8 ++ Ac_sched uint8 ++ Ac_pad [3]uint8 ++ _ [4]byte ++ Ac_uid uint32 ++ Ac_gid uint32 ++ Ac_pid uint32 ++ Ac_ppid uint32 ++ Ac_btime uint32 ++ Ac_etime uint64 ++ Ac_utime uint64 ++ Ac_stime uint64 ++ Ac_minflt uint64 ++ Ac_majflt uint64 ++ Coremem uint64 ++ Virtmem uint64 ++ Hiwater_rss uint64 ++ Hiwater_vm uint64 ++ Read_char uint64 ++ Write_char uint64 ++ Read_syscalls uint64 ++ Write_syscalls uint64 ++ Read_bytes uint64 ++ Write_bytes uint64 ++ Cancelled_write_bytes uint64 ++ Nvcsw uint64 ++ Nivcsw uint64 ++ Ac_utimescaled uint64 ++ Ac_stimescaled uint64 ++ Cpu_scaled_run_real_total uint64 ++ Freepages_count uint64 ++ Freepages_delay_total uint64 ++ Thrashing_count uint64 ++ Thrashing_delay_total uint64 ++ Ac_btime64 uint64 ++ Compact_count uint64 ++ Compact_delay_total uint64 ++} ++ ++type cpuMask uint64 ++ ++const ( ++ _NCPUBITS = 0x40 ++) ++ ++const ( ++ CBitFieldMaskBit0 = 0x1 ++ CBitFieldMaskBit1 = 0x2 ++ CBitFieldMaskBit2 = 0x4 ++ CBitFieldMaskBit3 = 0x8 ++ CBitFieldMaskBit4 = 0x10 ++ CBitFieldMaskBit5 = 0x20 ++ CBitFieldMaskBit6 = 0x40 ++ CBitFieldMaskBit7 = 0x80 ++ CBitFieldMaskBit8 = 0x100 ++ CBitFieldMaskBit9 = 0x200 ++ CBitFieldMaskBit10 = 0x400 ++ CBitFieldMaskBit11 = 0x800 ++ CBitFieldMaskBit12 = 0x1000 ++ CBitFieldMaskBit13 = 0x2000 ++ CBitFieldMaskBit14 = 0x4000 ++ CBitFieldMaskBit15 = 0x8000 ++ CBitFieldMaskBit16 = 0x10000 ++ CBitFieldMaskBit17 = 0x20000 ++ CBitFieldMaskBit18 = 0x40000 ++ CBitFieldMaskBit19 = 0x80000 ++ CBitFieldMaskBit20 = 0x100000 ++ CBitFieldMaskBit21 = 0x200000 ++ CBitFieldMaskBit22 = 0x400000 ++ CBitFieldMaskBit23 = 0x800000 ++ CBitFieldMaskBit24 = 0x1000000 ++ CBitFieldMaskBit25 = 0x2000000 ++ CBitFieldMaskBit26 = 0x4000000 ++ CBitFieldMaskBit27 = 0x8000000 ++ CBitFieldMaskBit28 = 0x10000000 ++ CBitFieldMaskBit29 = 0x20000000 ++ CBitFieldMaskBit30 = 0x40000000 ++ CBitFieldMaskBit31 = 0x80000000 ++ CBitFieldMaskBit32 = 0x100000000 ++ CBitFieldMaskBit33 = 0x200000000 ++ CBitFieldMaskBit34 = 0x400000000 ++ CBitFieldMaskBit35 = 0x800000000 ++ CBitFieldMaskBit36 = 0x1000000000 ++ CBitFieldMaskBit37 = 0x2000000000 ++ CBitFieldMaskBit38 = 0x4000000000 ++ CBitFieldMaskBit39 = 0x8000000000 ++ CBitFieldMaskBit40 = 0x10000000000 ++ CBitFieldMaskBit41 = 0x20000000000 ++ CBitFieldMaskBit42 = 0x40000000000 ++ CBitFieldMaskBit43 = 0x80000000000 ++ CBitFieldMaskBit44 = 0x100000000000 ++ CBitFieldMaskBit45 = 0x200000000000 ++ CBitFieldMaskBit46 = 0x400000000000 ++ CBitFieldMaskBit47 = 0x800000000000 ++ CBitFieldMaskBit48 = 0x1000000000000 ++ CBitFieldMaskBit49 = 0x2000000000000 ++ CBitFieldMaskBit50 = 0x4000000000000 ++ CBitFieldMaskBit51 = 0x8000000000000 ++ CBitFieldMaskBit52 = 0x10000000000000 ++ CBitFieldMaskBit53 = 0x20000000000000 ++ CBitFieldMaskBit54 = 0x40000000000000 ++ CBitFieldMaskBit55 = 0x80000000000000 ++ CBitFieldMaskBit56 = 0x100000000000000 ++ CBitFieldMaskBit57 = 0x200000000000000 ++ CBitFieldMaskBit58 = 0x400000000000000 ++ CBitFieldMaskBit59 = 0x800000000000000 ++ CBitFieldMaskBit60 = 0x1000000000000000 ++ CBitFieldMaskBit61 = 0x2000000000000000 ++ CBitFieldMaskBit62 = 0x4000000000000000 ++ CBitFieldMaskBit63 = 0x8000000000000000 ++) ++ ++type SockaddrStorage struct { ++ Family uint16 ++ _ [118]int8 ++ _ uint64 ++} ++ ++type HDGeometry struct { ++ Heads uint8 ++ Sectors uint8 ++ Cylinders uint16 ++ Start uint64 ++} ++ ++type Statfs_t struct { ++ Type int64 ++ Bsize int64 ++ Blocks uint64 ++ Bfree uint64 ++ Bavail uint64 ++ Files uint64 ++ Ffree uint64 ++ Fsid Fsid ++ Namelen int64 ++ Frsize int64 ++ Flags int64 ++ Spare [4]int64 ++} ++ ++type TpacketHdr struct { ++ Status uint64 ++ Len uint32 ++ Snaplen uint32 ++ Mac uint16 ++ Net uint16 ++ Sec uint32 ++ Usec uint32 ++ _ [4]byte ++} ++ ++const ( ++ SizeofTpacketHdr = 0x20 ++) ++ ++type RTCPLLInfo struct { ++ Ctrl int32 ++ Value int32 ++ Max int32 ++ Min int32 ++ Posmult int32 ++ Negmult int32 ++ Clock int64 ++} ++ ++type BlkpgPartition struct { ++ Start int64 ++ Length int64 ++ Pno int32 ++ Devname [64]uint8 ++ Volname [64]uint8 ++ _ [4]byte ++} ++ ++const ( ++ BLKPG = 0x1269 ++) ++ ++type XDPUmemReg struct { ++ Addr uint64 ++ Len uint64 ++ Size uint32 ++ Headroom uint32 ++ Flags uint32 ++ _ [4]byte ++} ++ ++type CryptoUserAlg struct { ++ Name [64]int8 ++ Driver_name [64]int8 ++ Module_name [64]int8 ++ Type uint32 ++ Mask uint32 ++ Refcnt uint32 ++ Flags uint32 ++} ++ ++type CryptoStatAEAD struct { ++ Type [64]int8 ++ Encrypt_cnt uint64 ++ Encrypt_tlen uint64 ++ Decrypt_cnt uint64 ++ Decrypt_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatAKCipher struct { ++ Type [64]int8 ++ Encrypt_cnt uint64 ++ Encrypt_tlen uint64 ++ Decrypt_cnt uint64 ++ Decrypt_tlen uint64 ++ Verify_cnt uint64 ++ Sign_cnt uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatCipher struct { ++ Type [64]int8 ++ Encrypt_cnt uint64 ++ Encrypt_tlen uint64 ++ Decrypt_cnt uint64 ++ Decrypt_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatCompress struct { ++ Type [64]int8 ++ Compress_cnt uint64 ++ Compress_tlen uint64 ++ Decompress_cnt uint64 ++ Decompress_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatHash struct { ++ Type [64]int8 ++ Hash_cnt uint64 ++ Hash_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatKPP struct { ++ Type [64]int8 ++ Setsecret_cnt uint64 ++ Generate_public_key_cnt uint64 ++ Compute_shared_secret_cnt uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatRNG struct { ++ Type [64]int8 ++ Generate_cnt uint64 ++ Generate_tlen uint64 ++ Seed_cnt uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatLarval struct { ++ Type [64]int8 ++} ++ ++type CryptoReportLarval struct { ++ Type [64]int8 ++} ++ ++type CryptoReportHash struct { ++ Type [64]int8 ++ Blocksize uint32 ++ Digestsize uint32 ++} ++ ++type CryptoReportCipher struct { ++ Type [64]int8 ++ Blocksize uint32 ++ Min_keysize uint32 ++ Max_keysize uint32 ++} ++ ++type CryptoReportBlkCipher struct { ++ Type [64]int8 ++ Geniv [64]int8 ++ Blocksize uint32 ++ Min_keysize uint32 ++ Max_keysize uint32 ++ Ivsize uint32 ++} ++ ++type CryptoReportAEAD struct { ++ Type [64]int8 ++ Geniv [64]int8 ++ Blocksize uint32 ++ Maxauthsize uint32 ++ Ivsize uint32 ++} ++ ++type CryptoReportComp struct { ++ Type [64]int8 ++} ++ ++type CryptoReportRNG struct { ++ Type [64]int8 ++ Seedsize uint32 ++} ++ ++type CryptoReportAKCipher struct { ++ Type [64]int8 ++} ++ ++type CryptoReportKPP struct { ++ Type [64]int8 ++} ++ ++type CryptoReportAcomp struct { ++ Type [64]int8 ++} ++ ++type LoopInfo struct { ++ Number int32 ++ Device uint32 ++ Inode uint64 ++ Rdevice uint32 ++ Offset int32 ++ Encrypt_type int32 ++ Encrypt_key_size int32 ++ Flags int32 ++ Name [64]int8 ++ Encrypt_key [32]uint8 ++ Init [2]uint64 ++ Reserved [4]int8 ++ _ [4]byte ++} ++ ++type TIPCSubscr struct { ++ Seq TIPCServiceRange ++ Timeout uint32 ++ Filter uint32 ++ Handle [8]int8 ++} ++ ++type TIPCSIOCLNReq struct { ++ Peer uint32 ++ Id uint32 ++ Linkname [68]int8 ++} ++ ++type TIPCSIOCNodeIDReq struct { ++ Peer uint32 ++ Id [16]int8 ++} ++ ++type PPSKInfo struct { ++ Assert_sequence uint32 ++ Clear_sequence uint32 ++ Assert_tu PPSKTime ++ Clear_tu PPSKTime ++ Current_mode int32 ++ _ [4]byte ++} ++ ++const ( ++ PPS_GETPARAMS = 0x800870a1 ++ PPS_SETPARAMS = 0x400870a2 ++ PPS_GETCAP = 0x800870a3 ++ PPS_FETCH = 0xc00870a4 ++) ++ ++const ( ++ PIDFD_NONBLOCK = 0x800 ++) ++ ++type SysvIpcPerm struct { ++ Key int32 ++ Uid uint32 ++ Gid uint32 ++ Cuid uint32 ++ Cgid uint32 ++ Mode uint32 ++ _ [0]uint8 ++ Seq uint16 ++ _ uint16 ++ _ uint64 ++ _ uint64 ++} ++type SysvShmDesc struct { ++ Perm SysvIpcPerm ++ Segsz uint64 ++ Atime int64 ++ Dtime int64 ++ Ctime int64 ++ Cpid int32 ++ Lpid int32 ++ Nattch uint64 ++ _ uint64 ++ _ uint64 ++} +-- +2.38.0 + diff --git a/loongarch64/0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch b/loongarch64/0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch new file mode 100644 index 0000000..e9e3f82 --- /dev/null +++ b/loongarch64/0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch @@ -0,0 +1,52 @@ +From 184f2f28fdc7c0b3b4068ef36f73f69280a3577b Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Tue, 19 Oct 2021 15:37:32 +0800 +Subject: [PATCH 33/82] cmd/vendor: update vendored golang.org/x/tools to + support loong64 + +Change-Id: I3501138fb0b37f0b4872596dd317f8e51af63b28 +--- + .../x/tools/go/analysis/passes/asmdecl/asmdecl.go | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +index eb0016b18f..8f8da98157 100644 +--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go ++++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +@@ -81,6 +81,7 @@ var ( + asmArchArm = asmArch{name: "arm", bigEndian: false, stack: "R13", lr: true} + asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true} + asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false} ++ asmArchLoong64 = asmArch{name: "loong64", bigEndian: false, stack: "R3", lr: true} + asmArchMips = asmArch{name: "mips", bigEndian: true, stack: "R29", lr: true} + asmArchMipsLE = asmArch{name: "mipsle", bigEndian: false, stack: "R29", lr: true} + asmArchMips64 = asmArch{name: "mips64", bigEndian: true, stack: "R29", lr: true} +@@ -96,6 +97,7 @@ var ( + &asmArchArm, + &asmArchArm64, + &asmArchAmd64, ++ &asmArchLoong64, + &asmArchMips, + &asmArchMipsLE, + &asmArchMips64, +@@ -721,6 +723,17 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri + case "MOVV", "MOVD": + src = 8 + } ++ case "loong64": ++ switch op { ++ case "MOVB", "MOVBU": ++ src = 1 ++ case "MOVH", "MOVHU": ++ src = 2 ++ case "MOVW", "MOVWU", "MOVF": ++ src = 4 ++ case "MOVV", "MOVD": ++ src = 8 ++ } + case "s390x": + switch op { + case "MOVB", "MOVBZ": +-- +2.38.0 + diff --git a/loongarch64/0034-internal-bytealg-support-basic-byte-operation-on-loo.patch b/loongarch64/0034-internal-bytealg-support-basic-byte-operation-on-loo.patch new file mode 100644 index 0000000..adbe3d8 --- /dev/null +++ b/loongarch64/0034-internal-bytealg-support-basic-byte-operation-on-loo.patch @@ -0,0 +1,297 @@ +From eae19e1c6d1fdeb5113cb651fefc8de8723869e7 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:31:52 +0800 +Subject: [PATCH 34/82] internal/bytealg: support basic byte operation on + loong64 + +Change-Id: I4ac6d38dc632abfa0b698325ca0ae349c0d7ecd3 +--- + src/internal/bytealg/compare_generic.go | 4 +- + src/internal/bytealg/compare_loong64.s | 88 +++++++++++++++++++++++ + src/internal/bytealg/compare_native.go | 4 +- + src/internal/bytealg/equal_loong64.s | 54 ++++++++++++++ + src/internal/bytealg/indexbyte_generic.go | 4 +- + src/internal/bytealg/indexbyte_loong64.s | 54 ++++++++++++++ + src/internal/bytealg/indexbyte_native.go | 4 +- + 7 files changed, 204 insertions(+), 8 deletions(-) + create mode 100644 src/internal/bytealg/compare_loong64.s + create mode 100644 src/internal/bytealg/equal_loong64.s + create mode 100644 src/internal/bytealg/indexbyte_loong64.s + +diff --git a/src/internal/bytealg/compare_generic.go b/src/internal/bytealg/compare_generic.go +index 0690d0cf31..70428ba8e2 100644 +--- a/src/internal/bytealg/compare_generic.go ++++ b/src/internal/bytealg/compare_generic.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !wasm && !mips64 && !mips64le +-// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le ++//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !wasm && !mips64 && !mips64le && !loong64 ++// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le,!loong64 + + package bytealg + +diff --git a/src/internal/bytealg/compare_loong64.s b/src/internal/bytealg/compare_loong64.s +new file mode 100644 +index 0000000000..853bab3898 +--- /dev/null ++++ b/src/internal/bytealg/compare_loong64.s +@@ -0,0 +1,88 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++TEXT ·Compare(SB),NOSPLIT,$0-56 ++ MOVV a_base+0(FP), R6 ++ MOVV b_base+24(FP), R7 ++ MOVV a_len+8(FP), R4 ++ MOVV b_len+32(FP), R5 ++ MOVV $ret+48(FP), R13 ++ JMP cmpbody<>(SB) ++ ++TEXT runtime·cmpstring(SB),NOSPLIT,$0-40 ++ MOVV a_base+0(FP), R6 ++ MOVV b_base+16(FP), R7 ++ MOVV a_len+8(FP), R4 ++ MOVV b_len+24(FP), R5 ++ MOVV $ret+32(FP), R13 ++ JMP cmpbody<>(SB) ++ ++// On entry: ++// R4 length of a ++// R5 length of b ++// R6 points to the start of a ++// R7 points to the start of b ++// R13 points to the return value (-1/0/1) ++TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0 ++ BEQ R6, R7, samebytes // same start of a and b ++ ++ SGTU R4, R5, R9 ++ BNE R0, R9, r2_lt_r1 ++ MOVV R4, R14 ++ JMP entry ++r2_lt_r1: ++ MOVV R5, R14 // R14 is min(R4, R5) ++entry: ++ ADDV R6, R14, R12 // R6 start of a, R14 end of a ++ BEQ R6, R12, samebytes // length is 0 ++ ++ SRLV $4, R14 // R14 is number of chunks ++ BEQ R0, R14, byte_loop ++ ++ // make sure both a and b are aligned. ++ OR R6, R7, R15 ++ AND $7, R15 ++ BNE R0, R15, byte_loop ++ ++chunk16_loop: ++ BEQ R0, R14, byte_loop ++ MOVV (R6), R8 ++ MOVV (R7), R9 ++ BNE R8, R9, byte_loop ++ MOVV 8(R6), R16 ++ MOVV 8(R7), R17 ++ ADDV $16, R6 ++ ADDV $16, R7 ++ SUBVU $1, R14 ++ BEQ R16, R17, chunk16_loop ++ SUBV $8, R6 ++ SUBV $8, R7 ++ ++byte_loop: ++ BEQ R6, R12, samebytes ++ MOVBU (R6), R8 ++ ADDVU $1, R6 ++ MOVBU (R7), R9 ++ ADDVU $1, R7 ++ BEQ R8, R9, byte_loop ++ ++byte_cmp: ++ SGTU R8, R9, R12 // R12 = 1 if (R8 > R9) ++ BNE R0, R12, ret ++ MOVV $-1, R12 ++ JMP ret ++ ++samebytes: ++ SGTU R4, R5, R8 ++ SGTU R5, R4, R9 ++ SUBV R9, R8, R12 ++ ++ret: ++ MOVV R12, (R13) ++ RET +diff --git a/src/internal/bytealg/compare_native.go b/src/internal/bytealg/compare_native.go +index baa188ff7a..c473cca823 100644 +--- a/src/internal/bytealg/compare_native.go ++++ b/src/internal/bytealg/compare_native.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || wasm || mips64 || mips64le +-// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le ++//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || wasm || mips64 || mips64le || loong64 ++// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le loong64 + + package bytealg + +diff --git a/src/internal/bytealg/equal_loong64.s b/src/internal/bytealg/equal_loong64.s +new file mode 100644 +index 0000000000..a954407e35 +--- /dev/null ++++ b/src/internal/bytealg/equal_loong64.s +@@ -0,0 +1,54 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++#define REGCTXT R29 ++ ++// memequal(a, b unsafe.Pointer, size uintptr) bool ++TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 ++ MOVV a+0(FP), R4 ++ MOVV b+8(FP), R5 ++ BEQ R4, R5, eq ++ MOVV size+16(FP), R6 ++ ADDV R4, R6, R7 ++loop: ++ BNE R4, R7, test ++ MOVV $1, R4 ++ MOVB R4, ret+24(FP) ++ RET ++test: ++ MOVBU (R4), R9 ++ ADDV $1, R4 ++ MOVBU (R5), R10 ++ ADDV $1, R5 ++ BEQ R9, R10, loop ++ ++ MOVB R0, ret+24(FP) ++ RET ++eq: ++ MOVV $1, R4 ++ MOVB R4, ret+24(FP) ++ RET ++ ++// memequal_varlen(a, b unsafe.Pointer) bool ++TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17 ++ MOVV a+0(FP), R4 ++ MOVV b+8(FP), R5 ++ BEQ R4, R5, eq ++ MOVV 8(REGCTXT), R6 // compiler stores size at offset 8 in the closure ++ MOVV R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ JAL runtime·memequal(SB) ++ MOVBU 32(R3), R4 ++ MOVB R4, ret+16(FP) ++ RET ++eq: ++ MOVV $1, R4 ++ MOVB R4, ret+16(FP) ++ RET +diff --git a/src/internal/bytealg/indexbyte_generic.go b/src/internal/bytealg/indexbyte_generic.go +index 6ef639fafd..95123fda02 100644 +--- a/src/internal/bytealg/indexbyte_generic.go ++++ b/src/internal/bytealg/indexbyte_generic.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !mips64 && !mips64le && !riscv64 && !wasm +-// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!riscv64,!wasm ++//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !mips64 && !mips64le && !riscv64 && !wasm && !loong64 ++// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!riscv64,!wasm,!loong64 + + package bytealg + +diff --git a/src/internal/bytealg/indexbyte_loong64.s b/src/internal/bytealg/indexbyte_loong64.s +new file mode 100644 +index 0000000000..a585cdbd86 +--- /dev/null ++++ b/src/internal/bytealg/indexbyte_loong64.s +@@ -0,0 +1,54 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++TEXT ·IndexByte(SB),NOSPLIT,$0-40 ++ MOVV b_base+0(FP), R4 ++ MOVV b_len+8(FP), R5 ++ MOVBU c+24(FP), R6 // byte to find ++ MOVV R4, R7 // store base for later ++ ADDV R4, R5 // end ++ ADDV $-1, R4 ++ ++loop: ++ ADDV $1, R4 ++ BEQ R4, R5, notfound ++ MOVBU (R4), R8 ++ BNE R6, R8, loop ++ ++ SUBV R7, R4 // remove base ++ MOVV R4, ret+32(FP) ++ RET ++ ++notfound: ++ MOVV $-1, R4 ++ MOVV R4, ret+32(FP) ++ RET ++ ++TEXT ·IndexByteString(SB),NOSPLIT,$0-32 ++ MOVV s_base+0(FP), R4 ++ MOVV s_len+8(FP), R5 ++ MOVBU c+16(FP), R6 // byte to find ++ MOVV R4, R7 // store base for later ++ ADDV R4, R5 // end ++ ADDV $-1, R4 ++ ++loop: ++ ADDV $1, R4 ++ BEQ R4, R5, notfound ++ MOVBU (R4), R8 ++ BNE R6, R8, loop ++ ++ SUBV R7, R4 // remove base ++ MOVV R4, ret+24(FP) ++ RET ++ ++notfound: ++ MOVV $-1, R4 ++ MOVV R4, ret+24(FP) ++ RET +diff --git a/src/internal/bytealg/indexbyte_native.go b/src/internal/bytealg/indexbyte_native.go +index 965f38fe52..6775b3e1d7 100644 +--- a/src/internal/bytealg/indexbyte_native.go ++++ b/src/internal/bytealg/indexbyte_native.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm +-// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le riscv64 wasm ++//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm || loong64 ++// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le riscv64 wasm loong64 + + package bytealg + +-- +2.38.0 + diff --git a/loongarch64/0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch b/loongarch64/0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch new file mode 100644 index 0000000..632cafa --- /dev/null +++ b/loongarch64/0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch @@ -0,0 +1,390 @@ +From 670e716b86200985156237efca1e3714452f0d8e Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:11:14 +0800 +Subject: [PATCH 35/82] debug, go, math, os, reflect, vendor: support standard + library for loong64 + +Change-Id: I2ad9ed01fc913b90e75023ac0fa70de87a9f5de1 +--- + src/debug/elf/elf.go | 114 ++++++++++++++++++ + src/debug/elf/file.go | 50 ++++++++ + src/go/types/gccgosizes.go | 1 + + src/go/types/sizes.go | 3 +- + src/math/big/arith_loong64.s | 37 ++++++ + src/os/endian_little.go | 4 +- + src/reflect/asm_loong64.s | 42 +++++++ + .../golang.org/x/sys/cpu/cpu_loong64.go | 14 +++ + 8 files changed, 262 insertions(+), 3 deletions(-) + create mode 100644 src/math/big/arith_loong64.s + create mode 100644 src/reflect/asm_loong64.s + create mode 100644 src/vendor/golang.org/x/sys/cpu/cpu_loong64.go + +diff --git a/src/debug/elf/elf.go b/src/debug/elf/elf.go +index 9f8399d4d3..972a75e1e6 100644 +--- a/src/debug/elf/elf.go ++++ b/src/debug/elf/elf.go +@@ -384,6 +384,7 @@ const ( + EM_RISCV Machine = 243 /* RISC-V */ + EM_LANAI Machine = 244 /* Lanai 32-bit processor */ + EM_BPF Machine = 247 /* Linux BPF – in-kernel virtual machine */ ++ EM_LOONGARCH Machine = 258 /* LoongArch */ + + /* Non-standard or deprecated. */ + EM_486 Machine = 6 /* Intel i486. */ +@@ -575,6 +576,7 @@ var machineStrings = []intName{ + {243, "EM_RISCV"}, + {244, "EM_LANAI"}, + {247, "EM_BPF"}, ++ {258, "EM_LOONGARCH"}, + + /* Non-standard or deprecated. */ + {6, "EM_486"}, +@@ -2150,6 +2152,118 @@ var rmipsStrings = []intName{ + func (i R_MIPS) String() string { return stringName(uint32(i), rmipsStrings, false) } + func (i R_MIPS) GoString() string { return stringName(uint32(i), rmipsStrings, true) } + ++// Relocation types for LARCH. ++type R_LARCH int ++ ++const ( ++ R_LARCH_NONE R_LARCH = 0 ++ R_LARCH_32 R_LARCH = 1 ++ R_LARCH_64 R_LARCH = 2 ++ R_LARCH_RELATIVE R_LARCH = 3 ++ R_LARCH_COPY R_LARCH = 4 ++ R_LARCH_JUMP_SLOT R_LARCH = 5 ++ R_LARCH_TLS_DTPMOD32 R_LARCH = 6 ++ R_LARCH_TLS_DTPMOD64 R_LARCH = 7 ++ R_LARCH_TLS_DTPREL32 R_LARCH = 8 ++ R_LARCH_TLS_DTPREL64 R_LARCH = 9 ++ R_LARCH_TLS_TPREL32 R_LARCH = 10 ++ R_LARCH_TLS_TPREL64 R_LARCH = 11 ++ R_LARCH_IRELATIVE R_LARCH = 12 ++ R_LARCH_MARK_LA R_LARCH = 20 ++ R_LARCH_MARK_PCREL R_LARCH = 21 ++ R_LARCH_SOP_PUSH_PCREL R_LARCH = 22 ++ R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH = 23 ++ R_LARCH_SOP_PUSH_DUP R_LARCH = 24 ++ R_LARCH_SOP_PUSH_GPREL R_LARCH = 25 ++ R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH = 26 ++ R_LARCH_SOP_PUSH_TLS_GOT R_LARCH = 27 ++ R_LARCH_SOP_PUSH_TLS_GD R_LARCH = 28 ++ R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH = 29 ++ R_LARCH_SOP_ASSERT R_LARCH = 30 ++ R_LARCH_SOP_NOT R_LARCH = 31 ++ R_LARCH_SOP_SUB R_LARCH = 32 ++ R_LARCH_SOP_SL R_LARCH = 33 ++ R_LARCH_SOP_SR R_LARCH = 34 ++ R_LARCH_SOP_ADD R_LARCH = 35 ++ R_LARCH_SOP_AND R_LARCH = 36 ++ R_LARCH_SOP_IF_ELSE R_LARCH = 37 ++ R_LARCH_SOP_POP_32_S_10_5 R_LARCH = 38 ++ R_LARCH_SOP_POP_32_U_10_12 R_LARCH = 39 ++ R_LARCH_SOP_POP_32_S_10_12 R_LARCH = 40 ++ R_LARCH_SOP_POP_32_S_10_16 R_LARCH = 41 ++ R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH = 42 ++ R_LARCH_SOP_POP_32_S_5_20 R_LARCH = 43 ++ R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH = 44 ++ R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH = 45 ++ R_LARCH_SOP_POP_32_U R_LARCH = 46 ++ R_LARCH_ADD8 R_LARCH = 47 ++ R_LARCH_ADD16 R_LARCH = 48 ++ R_LARCH_ADD24 R_LARCH = 49 ++ R_LARCH_ADD32 R_LARCH = 50 ++ R_LARCH_ADD64 R_LARCH = 51 ++ R_LARCH_SUB8 R_LARCH = 52 ++ R_LARCH_SUB16 R_LARCH = 53 ++ R_LARCH_SUB24 R_LARCH = 54 ++ R_LARCH_SUB32 R_LARCH = 55 ++ R_LARCH_SUB64 R_LARCH = 56 ++) ++ ++var rlarchStrings = []intName{ ++ {0, "R_LARCH_NONE"}, ++ {1, "R_LARCH_32"}, ++ {2, "R_LARCH_64"}, ++ {3, "R_LARCH_RELATIVE"}, ++ {4, "R_LARCH_COPY"}, ++ {5, "R_LARCH_JUMP_SLOT"}, ++ {6, "R_LARCH_TLS_DTPMOD32"}, ++ {7, "R_LARCH_TLS_DTPMOD64"}, ++ {8, "R_LARCH_TLS_DTPREL32"}, ++ {9, "R_LARCH_TLS_DTPREL64"}, ++ {10, "R_LARCH_TLS_TPREL32"}, ++ {11, "R_LARCH_TLS_TPREL64"}, ++ {12, "R_LARCH_IRELATIVE"}, ++ {20, "R_LARCH_MARK_LA"}, ++ {21, "R_LARCH_MARK_PCREL"}, ++ {22, "R_LARCH_SOP_PUSH_PCREL"}, ++ {23, "R_LARCH_SOP_PUSH_ABSOLUTE"}, ++ {24, "R_LARCH_SOP_PUSH_DUP"}, ++ {25, "R_LARCH_SOP_PUSH_GPREL"}, ++ {26, "R_LARCH_SOP_PUSH_TLS_TPREL"}, ++ {27, "R_LARCH_SOP_PUSH_TLS_GOT"}, ++ {28, "R_LARCH_SOP_PUSH_TLS_GD"}, ++ {29, "R_LARCH_SOP_PUSH_PLT_PCREL"}, ++ {30, "R_LARCH_SOP_ASSERT"}, ++ {31, "R_LARCH_SOP_NOT"}, ++ {32, "R_LARCH_SOP_SUB"}, ++ {33, "R_LARCH_SOP_SL"}, ++ {34, "R_LARCH_SOP_SR"}, ++ {35, "R_LARCH_SOP_ADD"}, ++ {36, "R_LARCH_SOP_AND"}, ++ {37, "R_LARCH_SOP_IF_ELSE"}, ++ {38, "R_LARCH_SOP_POP_32_S_10_5"}, ++ {39, "R_LARCH_SOP_POP_32_U_10_12"}, ++ {40, "R_LARCH_SOP_POP_32_S_10_12"}, ++ {41, "R_LARCH_SOP_POP_32_S_10_16"}, ++ {42, "R_LARCH_SOP_POP_32_S_10_16_S2"}, ++ {43, "R_LARCH_SOP_POP_32_S_5_20"}, ++ {44, "R_LARCH_SOP_POP_32_S_0_5_10_16_S2"}, ++ {45, "R_LARCH_SOP_POP_32_S_0_10_10_16_S2"}, ++ {46, "R_LARCH_SOP_POP_32_U"}, ++ {47, "R_LARCH_ADD8"}, ++ {48, "R_LARCH_ADD16"}, ++ {49, "R_LARCH_ADD24"}, ++ {50, "R_LARCH_ADD32"}, ++ {51, "R_LARCH_ADD64"}, ++ {52, "R_LARCH_SUB8"}, ++ {53, "R_LARCH_SUB16"}, ++ {54, "R_LARCH_SUB24"}, ++ {55, "R_LARCH_SUB32"}, ++ {56, "R_LARCH_SUB64"}, ++} ++ ++func (i R_LARCH) String() string { return stringName(uint32(i), rlarchStrings, false) } ++func (i R_LARCH) GoString() string { return stringName(uint32(i), rlarchStrings, true) } ++ + // Relocation types for PowerPC. + // + // Values that are shared by both R_PPC and R_PPC64 are prefixed with +diff --git a/src/debug/elf/file.go b/src/debug/elf/file.go +index b25d8209e3..37739fbcf2 100644 +--- a/src/debug/elf/file.go ++++ b/src/debug/elf/file.go +@@ -617,6 +617,8 @@ func (f *File) applyRelocations(dst []byte, rels []byte) error { + return f.applyRelocationsMIPS(dst, rels) + case f.Class == ELFCLASS64 && f.Machine == EM_MIPS: + return f.applyRelocationsMIPS64(dst, rels) ++ case f.Class == ELFCLASS64 && f.Machine == EM_LOONGARCH: ++ return f.applyRelocationsLOONG64(dst, rels) + case f.Class == ELFCLASS64 && f.Machine == EM_RISCV: + return f.applyRelocationsRISCV64(dst, rels) + case f.Class == ELFCLASS64 && f.Machine == EM_S390: +@@ -990,6 +992,54 @@ func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error { + return nil + } + ++func (f *File) applyRelocationsLOONG64(dst []byte, rels []byte) error { ++ // 24 is the size of Rela64. ++ if len(rels)%24 != 0 { ++ return errors.New("length of relocation section is not a multiple of 24") ++ } ++ ++ symbols, _, err := f.getSymbols(SHT_SYMTAB) ++ if err != nil { ++ return err ++ } ++ ++ b := bytes.NewReader(rels) ++ var rela Rela64 ++ ++ for b.Len() > 0 { ++ binary.Read(b, f.ByteOrder, &rela) ++ var symNo uint64 ++ var t R_LARCH ++ symNo = rela.Info >> 32 ++ t = R_LARCH(rela.Info & 0xffff) ++ ++ if symNo == 0 || symNo > uint64(len(symbols)) { ++ continue ++ } ++ sym := &symbols[symNo-1] ++ if !canApplyRelocation(sym) { ++ continue ++ } ++ ++ switch t { ++ case R_LARCH_64: ++ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { ++ continue ++ } ++ val64 := sym.Value + uint64(rela.Addend) ++ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) ++ case R_LARCH_32: ++ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { ++ continue ++ } ++ val32 := uint32(sym.Value) + uint32(rela.Addend) ++ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) ++ } ++ } ++ ++ return nil ++} ++ + func (f *File) applyRelocationsRISCV64(dst []byte, rels []byte) error { + // 24 is the size of Rela64. + if len(rels)%24 != 0 { +diff --git a/src/go/types/gccgosizes.go b/src/go/types/gccgosizes.go +index d5c92c6d1d..9d077cc5a6 100644 +--- a/src/go/types/gccgosizes.go ++++ b/src/go/types/gccgosizes.go +@@ -17,6 +17,7 @@ var gccgoArchSizes = map[string]*StdSizes{ + "arm64": {8, 8}, + "arm64be": {8, 8}, + "ia64": {8, 8}, ++ "loong64": {8, 8}, + "m68k": {4, 2}, + "mips": {4, 8}, + "mipsle": {4, 8}, +diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go +index 67052bb816..9d6cabf158 100644 +--- a/src/go/types/sizes.go ++++ b/src/go/types/sizes.go +@@ -163,6 +163,7 @@ var gcArchSizes = map[string]*StdSizes{ + "arm64": {8, 8}, + "amd64": {8, 8}, + "amd64p32": {4, 8}, ++ "loong64": {8, 8}, + "mips": {4, 4}, + "mipsle": {4, 4}, + "mips64": {8, 8}, +@@ -181,7 +182,7 @@ var gcArchSizes = map[string]*StdSizes{ + // The result is nil if a compiler/architecture pair is not known. + // + // Supported architectures for compiler "gc": +-// "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle", ++// "386", "arm", "arm64", "amd64", "amd64p32", "loong64", "mips", "mipsle", + // "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm". + func SizesFor(compiler, arch string) Sizes { + var m map[string]*StdSizes +diff --git a/src/math/big/arith_loong64.s b/src/math/big/arith_loong64.s +new file mode 100644 +index 0000000000..fdc31bfdd0 +--- /dev/null ++++ b/src/math/big/arith_loong64.s +@@ -0,0 +1,37 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build !math_big_pure_go,loong64 ++ ++#include "textflag.h" ++ ++// This file provides fast assembly versions for the elementary ++// arithmetic operations on vectors implemented in arith.go. ++ ++TEXT ·mulWW(SB),NOSPLIT,$0 ++ JMP ·mulWW_g(SB) ++ ++TEXT ·addVV(SB),NOSPLIT,$0 ++ JMP ·addVV_g(SB) ++ ++TEXT ·subVV(SB),NOSPLIT,$0 ++ JMP ·subVV_g(SB) ++ ++TEXT ·addVW(SB),NOSPLIT,$0 ++ JMP ·addVW_g(SB) ++ ++TEXT ·subVW(SB),NOSPLIT,$0 ++ JMP ·subVW_g(SB) ++ ++TEXT ·shlVU(SB),NOSPLIT,$0 ++ JMP ·shlVU_g(SB) ++ ++TEXT ·shrVU(SB),NOSPLIT,$0 ++ JMP ·shrVU_g(SB) ++ ++TEXT ·mulAddVWW(SB),NOSPLIT,$0 ++ JMP ·mulAddVWW_g(SB) ++ ++TEXT ·addMulVVW(SB),NOSPLIT,$0 ++ JMP ·addMulVVW_g(SB) +diff --git a/src/os/endian_little.go b/src/os/endian_little.go +index 6be6020f53..e38a42e14c 100644 +--- a/src/os/endian_little.go ++++ b/src/os/endian_little.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // +-//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm +-// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm ++//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm || loong64 ++// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm loong64 + + package os + +diff --git a/src/reflect/asm_loong64.s b/src/reflect/asm_loong64.s +new file mode 100644 +index 0000000000..e2bb861689 +--- /dev/null ++++ b/src/reflect/asm_loong64.s +@@ -0,0 +1,42 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++#include "funcdata.h" ++ ++#define REGCTXT R29 ++ ++// makeFuncStub is the code half of the function returned by MakeFunc. ++// See the comment on the declaration of makeFuncStub in makefunc.go ++// for more details. ++// No arg size here, runtime pulls arg map out of the func value. ++TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40 ++ NO_LOCAL_POINTERS ++ MOVV REGCTXT, 8(R3) ++ MOVV $argframe+0(FP), R19 ++ MOVV R19, 16(R3) ++ MOVB R0, 40(R3) ++ ADDV $40, R3, R19 ++ MOVV R19, 24(R3) ++ MOVV R0, 32(R3) ++ JAL ·callReflect(SB) ++ RET ++ ++// methodValueCall is the code half of the function returned by makeMethodValue. ++// See the comment on the declaration of methodValueCall in makefunc.go ++// for more details. ++// No arg size here; runtime pulls arg map out of the func value. ++TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40 ++ NO_LOCAL_POINTERS ++ MOVV REGCTXT, 8(R3) ++ MOVV $argframe+0(FP), R19 ++ MOVV R19, 16(R3) ++ MOVB R0, 40(R3) ++ ADDV $40, R3, R19 ++ MOVV R19, 24(R3) ++ MOVV R0, 32(R3) ++ JAL ·callMethod(SB) ++ RET +diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go +new file mode 100644 +index 0000000000..e39ecae08c +--- /dev/null ++++ b/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go +@@ -0,0 +1,14 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package cpu ++ ++const cacheLineSize = 64 ++ ++func initOptions() { ++ options = []option{} ++} +-- +2.38.0 + diff --git a/loongarch64/0036-syscall-add-syscall-support-for-linux-loong64.patch b/loongarch64/0036-syscall-add-syscall-support-for-linux-loong64.patch new file mode 100644 index 0000000..6b8031f --- /dev/null +++ b/loongarch64/0036-syscall-add-syscall-support-for-linux-loong64.patch @@ -0,0 +1,5006 @@ +From eebf6e7075efe7b05ab73b221b45dbb166ce9600 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:45:11 +0800 +Subject: [PATCH 36/82] syscall: add syscall support for linux/loong64 + +Change-Id: Ia676bd2875701639314cadbd39d97620afb3f0a2 +--- + .../internal/syscall/asm_linux_loong64.s | 29 + + src/syscall/asm_linux_loong64.s | 147 ++ + src/syscall/endian_little.go | 4 +- + src/syscall/mkall.sh | 10 + + src/syscall/syscall_dup3_linux.go | 4 +- + src/syscall/syscall_linux_loong64.go | 213 ++ + src/syscall/types_linux.go | 4 +- + src/syscall/zerrors_linux_loong64.go | 2001 +++++++++++++++++ + src/syscall/zsyscall_linux_loong64.go | 1564 +++++++++++++ + src/syscall/zsysnum_linux_loong64.go | 308 +++ + src/syscall/ztypes_linux_loong64.go | 599 +++++ + 11 files changed, 4877 insertions(+), 6 deletions(-) + create mode 100644 src/runtime/internal/syscall/asm_linux_loong64.s + create mode 100644 src/syscall/asm_linux_loong64.s + create mode 100644 src/syscall/syscall_linux_loong64.go + create mode 100644 src/syscall/zerrors_linux_loong64.go + create mode 100644 src/syscall/zsyscall_linux_loong64.go + create mode 100644 src/syscall/zsysnum_linux_loong64.go + create mode 100644 src/syscall/ztypes_linux_loong64.go + +diff --git a/src/runtime/internal/syscall/asm_linux_loong64.s b/src/runtime/internal/syscall/asm_linux_loong64.s +new file mode 100644 +index 0000000000..39bf5b1465 +--- /dev/null ++++ b/src/runtime/internal/syscall/asm_linux_loong64.s +@@ -0,0 +1,29 @@ ++// Copyright 2022 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "textflag.h" ++ ++// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) ++TEXT ·Syscall6(SB),NOSPLIT,$0-80 ++ MOVV num+0(FP), R11 // syscall entry ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV a4+32(FP), R7 ++ MOVV a5+40(FP), R8 ++ MOVV a6+48(FP), R9 ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+56(FP) // r1 ++ MOVV R0, r2+64(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+72(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+56(FP) // r1 ++ MOVV R5, r2+64(FP) // r2 ++ MOVV R0, err+72(FP) // errno ++ RET +diff --git a/src/syscall/asm_linux_loong64.s b/src/syscall/asm_linux_loong64.s +new file mode 100644 +index 0000000000..09f3f9773f +--- /dev/null ++++ b/src/syscall/asm_linux_loong64.s +@@ -0,0 +1,147 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build linux ++// +build loong64 ++ ++#include "textflag.h" ++ ++// ++// System calls for loong64, Linux ++// ++ ++// func Syscall(trap int64, a1, a2, a3 int64) (r1, r2, err int64); ++// Trap # in R11, args in R4-R9, return in R4 ++TEXT ·Syscall(SB),NOSPLIT,$0-56 ++ JAL runtime·entersyscall(SB) ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+32(FP) // r1 ++ MOVV R0, r2+40(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+48(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ok: ++ MOVV R4, r1+32(FP) // r1 ++ MOVV R5, r2+40(FP) // r2 ++ MOVV R0, err+48(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ ++TEXT ·Syscall6(SB),NOSPLIT,$0-80 ++ JAL runtime·entersyscall(SB) ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV a4+32(FP), R7 ++ MOVV a5+40(FP), R8 ++ MOVV a6+48(FP), R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+56(FP) // r1 ++ MOVV R0, r2+64(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+72(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ok: ++ MOVV R4, r1+56(FP) // r1 ++ MOVV R5, r2+64(FP) // r2 ++ MOVV R0, err+72(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ ++TEXT ·RawSyscall(SB),NOSPLIT,$0-56 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+32(FP) // r1 ++ MOVV R0, r2+40(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+48(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+32(FP) // r1 ++ MOVV R5, r2+40(FP) // r2 ++ MOVV R0, err+48(FP) // errno ++ RET ++ ++TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV a4+32(FP), R7 ++ MOVV a5+40(FP), R8 ++ MOVV a6+48(FP), R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+56(FP) // r1 ++ MOVV R0, r2+64(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+72(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+56(FP) // r1 ++ MOVV R5, r2+64(FP) // r2 ++ MOVV R0, err+72(FP) // errno ++ RET ++ ++// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr) ++TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-32 ++ MOVV a1+8(FP), R4 ++ MOVV $0, R5 ++ MOVV $0, R6 ++ MOVV $0, R7 ++ MOVV $0, R8 ++ MOVV $0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+16(FP) // r1 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+24(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+16(FP) // r1 ++ MOVV R0, err+24(FP) // errno ++ RET ++ ++TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVV R4, r1+32(FP) ++ MOVV R5, r2+40(FP) ++ RET +diff --git a/src/syscall/endian_little.go b/src/syscall/endian_little.go +index a894445f73..c62fe9e386 100644 +--- a/src/syscall/endian_little.go ++++ b/src/syscall/endian_little.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // +-//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm +-// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm ++//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm || loong64 ++// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm loong64 + + package syscall + +diff --git a/src/syscall/mkall.sh b/src/syscall/mkall.sh +index dffb52864b..f03c5e48b1 100755 +--- a/src/syscall/mkall.sh ++++ b/src/syscall/mkall.sh +@@ -258,6 +258,16 @@ linux_s390x) + mksysnum="./mksysnum_linux.pl $unistd_h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; ++linux_loong64) ++ GOOSARCH_in=syscall_linux_loong64.go ++ unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1) ++ if [ "$unistd_h" = "" ]; then ++ echo >&2 cannot find unistd.h ++ exit 1 ++ fi ++ mksysnum="./mksysnum_linux.pl $unistd_h" ++ mktypes="GOARCH=$GOARCH go tool cgo -godefs" ++ ;; + netbsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -netbsd" +diff --git a/src/syscall/syscall_dup3_linux.go b/src/syscall/syscall_dup3_linux.go +index 66ec67b0ab..e32a90ba92 100644 +--- a/src/syscall/syscall_dup3_linux.go ++++ b/src/syscall/syscall_dup3_linux.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build android || arm64 || riscv64 +-// +build android arm64 riscv64 ++//go:build android || arm64 || riscv64 || loong64 ++// +build android arm64 riscv64 loong64 + + package syscall + +diff --git a/src/syscall/syscall_linux_loong64.go b/src/syscall/syscall_linux_loong64.go +new file mode 100644 +index 0000000000..2bad1fcdf5 +--- /dev/null ++++ b/src/syscall/syscall_linux_loong64.go +@@ -0,0 +1,213 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package syscall ++ ++import "unsafe" ++ ++// archHonorsR2 captures the fact that r2 is honored by the ++// runtime.GOARCH. Syscall conventions are generally r1, r2, err := ++// syscall(trap, ...). Not all architectures define r2 in their ++// ABI. See "man syscall". ++const archHonorsR2 = false ++ ++const _SYS_setgroups = SYS_SETGROUPS ++ ++func EpollCreate(size int) (fd int, err error) { ++ if size <= 0 { ++ return -1, EINVAL ++ } ++ return EpollCreate1(0) ++} ++ ++//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT ++//sys Fchown(fd int, uid int, gid int) (err error) ++//sys Fstat(fd int, stat *Stat_t) (err error) ++//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) ++//sys fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) ++//sys Fstatfs(fd int, buf *Statfs_t) (err error) ++//sys Ftruncate(fd int, length int64) (err error) ++//sysnb Getegid() (egid int) ++//sysnb Geteuid() (euid int) ++//sysnb Getgid() (gid int) ++//sysnb Getuid() (uid int) ++//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) ++//sys Listen(s int, n int) (err error) ++//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 ++//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 ++//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS_RENAMEAT2 ++//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK ++//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) ++//sys Setfsgid(gid int) (err error) ++//sys Setfsuid(uid int) (err error) ++//sys Setrlimit(resource int, rlim *Rlimit) (err error) ++//sys Shutdown(fd int, how int) (err error) ++//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) ++ ++func Stat(path string, stat *Stat_t) (err error) { ++ return Fstatat(_AT_FDCWD, path, stat, 0) ++} ++ ++func Lchown(path string, uid int, gid int) (err error) { ++ return Fchownat(_AT_FDCWD, path, uid, gid, _AT_SYMLINK_NOFOLLOW) ++} ++ ++func Lstat(path string, stat *Stat_t) (err error) { ++ return Fstatat(_AT_FDCWD, path, stat, _AT_SYMLINK_NOFOLLOW) ++} ++ ++//sys Statfs(path string, buf *Statfs_t) (err error) ++//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) ++//sys Truncate(path string, length int64) (err error) ++//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) ++//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) ++//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) ++//sysnb setgroups(n int, list *_Gid_t) (err error) ++//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) ++//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) ++//sysnb socket(domain int, typ int, proto int) (fd int, err error) ++//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) ++//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) ++//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) ++//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) ++ ++type sigset_t struct { ++ X__val [16]uint64 ++} ++ ++//sys pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_t) (n int, err error) = SYS_PSELECT6 ++ ++func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { ++ var ts *Timespec ++ if timeout != nil { ++ ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ++ } ++ return pselect(nfd, r, w, e, ts, nil) ++} ++ ++//sysnb Gettimeofday(tv *Timeval) (err error) ++ ++func setTimespec(sec, nsec int64) Timespec { ++ return Timespec{Sec: sec, Nsec: nsec} ++} ++ ++func setTimeval(sec, usec int64) Timeval { ++ return Timeval{Sec: sec, Usec: usec} ++} ++ ++func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(dirfd, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++func Time(t *Time_t) (Time_t, error) { ++ var tv Timeval ++ err := Gettimeofday(&tv) ++ if err != nil { ++ return 0, err ++ } ++ if t != nil { ++ *t = Time_t(tv.Sec) ++ } ++ return Time_t(tv.Sec), nil ++} ++ ++func Utime(path string, buf *Utimbuf) error { ++ tv := []Timeval{ ++ {Sec: buf.Actime}, ++ {Sec: buf.Modtime}, ++ } ++ return Utimes(path, tv) ++} ++ ++func utimes(path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(_AT_FDCWD, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(_AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++// Getrlimit prefers the prlimit64 system call. ++func Getrlimit(resource int, rlim *Rlimit) error { ++ return prlimit(0, resource, nil, rlim) ++} ++ ++// Setrlimit prefers the prlimit64 system call. ++func Setrlimit(resource int, rlim *Rlimit) error { ++ return prlimit(0, resource, rlim, nil) ++} ++ ++func (r *PtraceRegs) GetEra() uint64 { return r.Era } ++ ++func (r *PtraceRegs) SetEra(era uint64) { r.Era = era } ++ ++func (iov *Iovec) SetLen(length int) { ++ iov.Len = uint64(length) ++} ++ ++func (msghdr *Msghdr) SetControllen(length int) { ++ msghdr.Controllen = uint64(length) ++} ++ ++func (cmsg *Cmsghdr) SetLen(length int) { ++ cmsg.Len = uint64(length) ++} ++ ++func InotifyInit() (fd int, err error) { ++ return InotifyInit1(0) ++} ++ ++//sys ppoll(fds *pollFd, nfds int, timeout *Timespec, sigmask *sigset_t) (n int, err error) ++ ++func Pause() error { ++ _, err := ppoll(nil, 0, nil, nil) ++ return err ++} ++ ++func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno) ++ ++func Pipe(p []int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ var pp [2]_C_int ++ err = pipe2(&pp, 0) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return ++} ++ ++ ++//sysnb pipe2(p *[2]_C_int, flags int) (err error) ++ ++func Pipe2(p []int, flags int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ ++ var pp [2]_C_int ++ err = pipe2(&pp, flags) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return ++} ++ +diff --git a/src/syscall/types_linux.go b/src/syscall/types_linux.go +index bf76be978b..1ca8912b20 100644 +--- a/src/syscall/types_linux.go ++++ b/src/syscall/types_linux.go +@@ -88,7 +88,7 @@ struct my_sockaddr_un { + + #ifdef __ARM_EABI__ + typedef struct user_regs PtraceRegs; +-#elif defined(__aarch64__) ++#elif defined(__aarch64__) || defined(__loongarch64) + typedef struct user_pt_regs PtraceRegs; + #elif defined(__powerpc64__) + typedef struct pt_regs PtraceRegs; +@@ -119,7 +119,7 @@ struct my_epoll_event { + int32_t padFd; + #endif + #if defined(__powerpc64__) || defined(__s390x__) || (defined(__riscv_xlen) && __riscv_xlen == 64) \ +- || (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64) ++ || (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64) || defined(__loongarch64) + int32_t _padFd; + #endif + int32_t fd; +diff --git a/src/syscall/zerrors_linux_loong64.go b/src/syscall/zerrors_linux_loong64.go +new file mode 100644 +index 0000000000..1281e7d960 +--- /dev/null ++++ b/src/syscall/zerrors_linux_loong64.go +@@ -0,0 +1,2001 @@ ++// mkerrors.sh ++// Code generated by the command above; DO NOT EDIT. ++ ++// Code generated by cmd/cgo -godefs; DO NOT EDIT. ++// cgo -godefs -- _const.go ++ ++package syscall ++ ++const ( ++ AF_ALG = 0x26 ++ AF_APPLETALK = 0x5 ++ AF_ASH = 0x12 ++ AF_ATMPVC = 0x8 ++ AF_ATMSVC = 0x14 ++ AF_AX25 = 0x3 ++ AF_BLUETOOTH = 0x1f ++ AF_BRIDGE = 0x7 ++ AF_CAIF = 0x25 ++ AF_CAN = 0x1d ++ AF_DECnet = 0xc ++ AF_ECONET = 0x13 ++ AF_FILE = 0x1 ++ AF_IB = 0x1b ++ AF_IEEE802154 = 0x24 ++ AF_INET = 0x2 ++ AF_INET6 = 0xa ++ AF_IPX = 0x4 ++ AF_IRDA = 0x17 ++ AF_ISDN = 0x22 ++ AF_IUCV = 0x20 ++ AF_KCM = 0x29 ++ AF_KEY = 0xf ++ AF_LLC = 0x1a ++ AF_LOCAL = 0x1 ++ AF_MAX = 0x2c ++ AF_MPLS = 0x1c ++ AF_NETBEUI = 0xd ++ AF_NETLINK = 0x10 ++ AF_NETROM = 0x6 ++ AF_NFC = 0x27 ++ AF_PACKET = 0x11 ++ AF_PHONET = 0x23 ++ AF_PPPOX = 0x18 ++ AF_QIPCRTR = 0x2a ++ AF_RDS = 0x15 ++ AF_ROSE = 0xb ++ AF_ROUTE = 0x10 ++ AF_RXRPC = 0x21 ++ AF_SECURITY = 0xe ++ AF_SMC = 0x2b ++ AF_SNA = 0x16 ++ AF_TIPC = 0x1e ++ AF_UNIX = 0x1 ++ AF_UNSPEC = 0x0 ++ AF_VSOCK = 0x28 ++ AF_WANPIPE = 0x19 ++ AF_X25 = 0x9 ++ ARPHRD_6LOWPAN = 0x339 ++ ARPHRD_ADAPT = 0x108 ++ ARPHRD_APPLETLK = 0x8 ++ ARPHRD_ARCNET = 0x7 ++ ARPHRD_ASH = 0x30d ++ ARPHRD_ATM = 0x13 ++ ARPHRD_AX25 = 0x3 ++ ARPHRD_BIF = 0x307 ++ ARPHRD_CAIF = 0x336 ++ ARPHRD_CAN = 0x118 ++ ARPHRD_CHAOS = 0x5 ++ ARPHRD_CISCO = 0x201 ++ ARPHRD_CSLIP = 0x101 ++ ARPHRD_CSLIP6 = 0x103 ++ ARPHRD_DDCMP = 0x205 ++ ARPHRD_DLCI = 0xf ++ ARPHRD_ECONET = 0x30e ++ ARPHRD_EETHER = 0x2 ++ ARPHRD_ETHER = 0x1 ++ ARPHRD_EUI64 = 0x1b ++ ARPHRD_FCAL = 0x311 ++ ARPHRD_FCFABRIC = 0x313 ++ ARPHRD_FCPL = 0x312 ++ ARPHRD_FCPP = 0x310 ++ ARPHRD_FDDI = 0x306 ++ ARPHRD_FRAD = 0x302 ++ ARPHRD_HDLC = 0x201 ++ ARPHRD_HIPPI = 0x30c ++ ARPHRD_HWX25 = 0x110 ++ ARPHRD_IEEE1394 = 0x18 ++ ARPHRD_IEEE802 = 0x6 ++ ARPHRD_IEEE80211 = 0x321 ++ ARPHRD_IEEE80211_PRISM = 0x322 ++ ARPHRD_IEEE80211_RADIOTAP = 0x323 ++ ARPHRD_IEEE802154 = 0x324 ++ ARPHRD_IEEE802154_MONITOR = 0x325 ++ ARPHRD_IEEE802_TR = 0x320 ++ ARPHRD_INFINIBAND = 0x20 ++ ARPHRD_IP6GRE = 0x337 ++ ARPHRD_IPDDP = 0x309 ++ ARPHRD_IPGRE = 0x30a ++ ARPHRD_IRDA = 0x30f ++ ARPHRD_LAPB = 0x204 ++ ARPHRD_LOCALTLK = 0x305 ++ ARPHRD_LOOPBACK = 0x304 ++ ARPHRD_METRICOM = 0x17 ++ ARPHRD_NETLINK = 0x338 ++ ARPHRD_NETROM = 0x0 ++ ARPHRD_NONE = 0xfffe ++ ARPHRD_PHONET = 0x334 ++ ARPHRD_PHONET_PIPE = 0x335 ++ ARPHRD_PIMREG = 0x30b ++ ARPHRD_PPP = 0x200 ++ ARPHRD_PRONET = 0x4 ++ ARPHRD_RAWHDLC = 0x206 ++ ARPHRD_RAWIP = 0x207 ++ ARPHRD_ROSE = 0x10e ++ ARPHRD_RSRVD = 0x104 ++ ARPHRD_SIT = 0x308 ++ ARPHRD_SKIP = 0x303 ++ ARPHRD_SLIP = 0x100 ++ ARPHRD_SLIP6 = 0x102 ++ ARPHRD_TUNNEL = 0x300 ++ ARPHRD_TUNNEL6 = 0x301 ++ ARPHRD_VOID = 0xffff ++ ARPHRD_VSOCKMON = 0x33a ++ ARPHRD_X25 = 0x10f ++ B0 = 0x0 ++ B1000000 = 0x1008 ++ B110 = 0x3 ++ B115200 = 0x1002 ++ B1152000 = 0x1009 ++ B1200 = 0x9 ++ B134 = 0x4 ++ B150 = 0x5 ++ B1500000 = 0x100a ++ B1800 = 0xa ++ B19200 = 0xe ++ B200 = 0x6 ++ B2000000 = 0x100b ++ B230400 = 0x1003 ++ B2400 = 0xb ++ B2500000 = 0x100c ++ B300 = 0x7 ++ B3000000 = 0x100d ++ B3500000 = 0x100e ++ B38400 = 0xf ++ B4000000 = 0x100f ++ B460800 = 0x1004 ++ B4800 = 0xc ++ B50 = 0x1 ++ B500000 = 0x1005 ++ B57600 = 0x1001 ++ B576000 = 0x1006 ++ B600 = 0x8 ++ B75 = 0x2 ++ B921600 = 0x1007 ++ B9600 = 0xd ++ BPF_A = 0x10 ++ BPF_ABS = 0x20 ++ BPF_ADD = 0x0 ++ BPF_ALU = 0x4 ++ BPF_AND = 0x50 ++ BPF_B = 0x10 ++ BPF_DIV = 0x30 ++ BPF_H = 0x8 ++ BPF_IMM = 0x0 ++ BPF_IND = 0x40 ++ BPF_JA = 0x0 ++ BPF_JEQ = 0x10 ++ BPF_JGE = 0x30 ++ BPF_JGT = 0x20 ++ BPF_JMP = 0x5 ++ BPF_JSET = 0x40 ++ BPF_K = 0x0 ++ BPF_LD = 0x0 ++ BPF_LDX = 0x1 ++ BPF_LEN = 0x80 ++ BPF_LL_OFF = -0x200000 ++ BPF_LSH = 0x60 ++ BPF_MAJOR_VERSION = 0x1 ++ BPF_MAXINSNS = 0x1000 ++ BPF_MEM = 0x60 ++ BPF_MEMWORDS = 0x10 ++ BPF_MINOR_VERSION = 0x1 ++ BPF_MISC = 0x7 ++ BPF_MOD = 0x90 ++ BPF_MSH = 0xa0 ++ BPF_MUL = 0x20 ++ BPF_NEG = 0x80 ++ BPF_NET_OFF = -0x100000 ++ BPF_OR = 0x40 ++ BPF_RET = 0x6 ++ BPF_RSH = 0x70 ++ BPF_ST = 0x2 ++ BPF_STX = 0x3 ++ BPF_SUB = 0x10 ++ BPF_TAX = 0x0 ++ BPF_TXA = 0x80 ++ BPF_W = 0x0 ++ BPF_X = 0x8 ++ BPF_XOR = 0xa0 ++ BRKINT = 0x2 ++ CFLUSH = 0xf ++ CLOCAL = 0x800 ++ CLONE_CHILD_CLEARTID = 0x200000 ++ CLONE_CHILD_SETTID = 0x1000000 ++ CLONE_DETACHED = 0x400000 ++ CLONE_FILES = 0x400 ++ CLONE_FS = 0x200 ++ CLONE_IO = 0x80000000 ++ CLONE_NEWCGROUP = 0x2000000 ++ CLONE_NEWIPC = 0x8000000 ++ CLONE_NEWNET = 0x40000000 ++ CLONE_NEWNS = 0x20000 ++ CLONE_NEWPID = 0x20000000 ++ CLONE_NEWUSER = 0x10000000 ++ CLONE_NEWUTS = 0x4000000 ++ CLONE_PARENT = 0x8000 ++ CLONE_PARENT_SETTID = 0x100000 ++ CLONE_PTRACE = 0x2000 ++ CLONE_SETTLS = 0x80000 ++ CLONE_SIGHAND = 0x800 ++ CLONE_SYSVSEM = 0x40000 ++ CLONE_THREAD = 0x10000 ++ CLONE_UNTRACED = 0x800000 ++ CLONE_VFORK = 0x4000 ++ CLONE_VM = 0x100 ++ CREAD = 0x80 ++ CS5 = 0x0 ++ CS6 = 0x10 ++ CS7 = 0x20 ++ CS8 = 0x30 ++ CSIGNAL = 0xff ++ CSIZE = 0x30 ++ CSTART = 0x11 ++ CSTATUS = 0x0 ++ CSTOP = 0x13 ++ CSTOPB = 0x40 ++ CSUSP = 0x1a ++ DT_BLK = 0x6 ++ DT_CHR = 0x2 ++ DT_DIR = 0x4 ++ DT_FIFO = 0x1 ++ DT_LNK = 0xa ++ DT_REG = 0x8 ++ DT_SOCK = 0xc ++ DT_UNKNOWN = 0x0 ++ DT_WHT = 0xe ++ ECHO = 0x8 ++ ECHOCTL = 0x200 ++ ECHOE = 0x10 ++ ECHOK = 0x20 ++ ECHOKE = 0x800 ++ ECHONL = 0x40 ++ ECHOPRT = 0x400 ++ ENCODING_DEFAULT = 0x0 ++ ENCODING_FM_MARK = 0x3 ++ ENCODING_FM_SPACE = 0x4 ++ ENCODING_MANCHESTER = 0x5 ++ ENCODING_NRZ = 0x1 ++ ENCODING_NRZI = 0x2 ++ EPOLLERR = 0x8 ++ EPOLLET = 0x80000000 ++ EPOLLEXCLUSIVE = 0x10000000 ++ EPOLLHUP = 0x10 ++ EPOLLIN = 0x1 ++ EPOLLMSG = 0x400 ++ EPOLLONESHOT = 0x40000000 ++ EPOLLOUT = 0x4 ++ EPOLLPRI = 0x2 ++ EPOLLRDBAND = 0x80 ++ EPOLLRDHUP = 0x2000 ++ EPOLLRDNORM = 0x40 ++ EPOLLWAKEUP = 0x20000000 ++ EPOLLWRBAND = 0x200 ++ EPOLLWRNORM = 0x100 ++ EPOLL_CLOEXEC = 0x80000 ++ EPOLL_CTL_ADD = 0x1 ++ EPOLL_CTL_DEL = 0x2 ++ EPOLL_CTL_MOD = 0x3 ++ ETH_P_1588 = 0x88f7 ++ ETH_P_8021AD = 0x88a8 ++ ETH_P_8021AH = 0x88e7 ++ ETH_P_8021Q = 0x8100 ++ ETH_P_80221 = 0x8917 ++ ETH_P_802_2 = 0x4 ++ ETH_P_802_3 = 0x1 ++ ETH_P_802_3_MIN = 0x600 ++ ETH_P_802_EX1 = 0x88b5 ++ ETH_P_AARP = 0x80f3 ++ ETH_P_AF_IUCV = 0xfbfb ++ ETH_P_ALL = 0x3 ++ ETH_P_AOE = 0x88a2 ++ ETH_P_ARCNET = 0x1a ++ ETH_P_ARP = 0x806 ++ ETH_P_ATALK = 0x809b ++ ETH_P_ATMFATE = 0x8884 ++ ETH_P_ATMMPOA = 0x884c ++ ETH_P_AX25 = 0x2 ++ ETH_P_BATMAN = 0x4305 ++ ETH_P_BPQ = 0x8ff ++ ETH_P_CAIF = 0xf7 ++ ETH_P_CAN = 0xc ++ ETH_P_CANFD = 0xd ++ ETH_P_CONTROL = 0x16 ++ ETH_P_CUST = 0x6006 ++ ETH_P_DDCMP = 0x6 ++ ETH_P_DEC = 0x6000 ++ ETH_P_DIAG = 0x6005 ++ ETH_P_DNA_DL = 0x6001 ++ ETH_P_DNA_RC = 0x6002 ++ ETH_P_DNA_RT = 0x6003 ++ ETH_P_DSA = 0x1b ++ ETH_P_ECONET = 0x18 ++ ETH_P_EDSA = 0xdada ++ ETH_P_ERSPAN = 0x88be ++ ETH_P_ERSPAN2 = 0x22eb ++ ETH_P_FCOE = 0x8906 ++ ETH_P_FIP = 0x8914 ++ ETH_P_HDLC = 0x19 ++ ETH_P_HSR = 0x892f ++ ETH_P_IBOE = 0x8915 ++ ETH_P_IEEE802154 = 0xf6 ++ ETH_P_IEEEPUP = 0xa00 ++ ETH_P_IEEEPUPAT = 0xa01 ++ ETH_P_IFE = 0xed3e ++ ETH_P_IP = 0x800 ++ ETH_P_IPV6 = 0x86dd ++ ETH_P_IPX = 0x8137 ++ ETH_P_IRDA = 0x17 ++ ETH_P_LAT = 0x6004 ++ ETH_P_LINK_CTL = 0x886c ++ ETH_P_LOCALTALK = 0x9 ++ ETH_P_LOOP = 0x60 ++ ETH_P_LOOPBACK = 0x9000 ++ ETH_P_MACSEC = 0x88e5 ++ ETH_P_MAP = 0xf9 ++ ETH_P_MOBITEX = 0x15 ++ ETH_P_MPLS_MC = 0x8848 ++ ETH_P_MPLS_UC = 0x8847 ++ ETH_P_MVRP = 0x88f5 ++ ETH_P_NCSI = 0x88f8 ++ ETH_P_NSH = 0x894f ++ ETH_P_PAE = 0x888e ++ ETH_P_PAUSE = 0x8808 ++ ETH_P_PHONET = 0xf5 ++ ETH_P_PPPTALK = 0x10 ++ ETH_P_PPP_DISC = 0x8863 ++ ETH_P_PPP_MP = 0x8 ++ ETH_P_PPP_SES = 0x8864 ++ ETH_P_PREAUTH = 0x88c7 ++ ETH_P_PRP = 0x88fb ++ ETH_P_PUP = 0x200 ++ ETH_P_PUPAT = 0x201 ++ ETH_P_QINQ1 = 0x9100 ++ ETH_P_QINQ2 = 0x9200 ++ ETH_P_QINQ3 = 0x9300 ++ ETH_P_RARP = 0x8035 ++ ETH_P_SCA = 0x6007 ++ ETH_P_SLOW = 0x8809 ++ ETH_P_SNAP = 0x5 ++ ETH_P_TDLS = 0x890d ++ ETH_P_TEB = 0x6558 ++ ETH_P_TIPC = 0x88ca ++ ETH_P_TRAILER = 0x1c ++ ETH_P_TR_802_2 = 0x11 ++ ETH_P_TSN = 0x22f0 ++ ETH_P_WAN_PPP = 0x7 ++ ETH_P_WCCP = 0x883e ++ ETH_P_X25 = 0x805 ++ ETH_P_XDSA = 0xf8 ++ EXTA = 0xe ++ EXTB = 0xf ++ EXTPROC = 0x10000 ++ FD_CLOEXEC = 0x1 ++ FD_SETSIZE = 0x400 ++ FLUSHO = 0x1000 ++ F_ADD_SEALS = 0x409 ++ F_DUPFD = 0x0 ++ F_DUPFD_CLOEXEC = 0x406 ++ F_EXLCK = 0x4 ++ F_GETFD = 0x1 ++ F_GETFL = 0x3 ++ F_GETLEASE = 0x401 ++ F_GETLK = 0x5 ++ F_GETLK64 = 0x5 ++ F_GETOWN = 0x9 ++ F_GETOWN_EX = 0x10 ++ F_GETPIPE_SZ = 0x408 ++ F_GETSIG = 0xb ++ F_GET_FILE_RW_HINT = 0x40d ++ F_GET_RW_HINT = 0x40b ++ F_GET_SEALS = 0x40a ++ F_LOCK = 0x1 ++ F_NOTIFY = 0x402 ++ F_OFD_GETLK = 0x24 ++ F_OFD_SETLK = 0x25 ++ F_OFD_SETLKW = 0x26 ++ F_OK = 0x0 ++ F_RDLCK = 0x0 ++ F_SEAL_GROW = 0x4 ++ F_SEAL_SEAL = 0x1 ++ F_SEAL_SHRINK = 0x2 ++ F_SEAL_WRITE = 0x8 ++ F_SETFD = 0x2 ++ F_SETFL = 0x4 ++ F_SETLEASE = 0x400 ++ F_SETLK = 0x6 ++ F_SETLK64 = 0x6 ++ F_SETLKW = 0x7 ++ F_SETLKW64 = 0x7 ++ F_SETOWN = 0x8 ++ F_SETOWN_EX = 0xf ++ F_SETPIPE_SZ = 0x407 ++ F_SETSIG = 0xa ++ F_SET_FILE_RW_HINT = 0x40e ++ F_SET_RW_HINT = 0x40c ++ F_SHLCK = 0x8 ++ F_TEST = 0x3 ++ F_TLOCK = 0x2 ++ F_ULOCK = 0x0 ++ F_UNLCK = 0x2 ++ F_WRLCK = 0x1 ++ HUPCL = 0x400 ++ ICANON = 0x2 ++ ICMPV6_FILTER = 0x1 ++ ICRNL = 0x100 ++ IEXTEN = 0x8000 ++ IFA_F_DADFAILED = 0x8 ++ IFA_F_DEPRECATED = 0x20 ++ IFA_F_HOMEADDRESS = 0x10 ++ IFA_F_MANAGETEMPADDR = 0x100 ++ IFA_F_MCAUTOJOIN = 0x400 ++ IFA_F_NODAD = 0x2 ++ IFA_F_NOPREFIXROUTE = 0x200 ++ IFA_F_OPTIMISTIC = 0x4 ++ IFA_F_PERMANENT = 0x80 ++ IFA_F_SECONDARY = 0x1 ++ IFA_F_STABLE_PRIVACY = 0x800 ++ IFA_F_TEMPORARY = 0x1 ++ IFA_F_TENTATIVE = 0x40 ++ IFA_MAX = 0x9 ++ IFF_ALLMULTI = 0x200 ++ IFF_ATTACH_QUEUE = 0x200 ++ IFF_AUTOMEDIA = 0x4000 ++ IFF_BROADCAST = 0x2 ++ IFF_DEBUG = 0x4 ++ IFF_DETACH_QUEUE = 0x400 ++ IFF_DORMANT = 0x20000 ++ IFF_DYNAMIC = 0x8000 ++ IFF_ECHO = 0x40000 ++ IFF_LOOPBACK = 0x8 ++ IFF_LOWER_UP = 0x10000 ++ IFF_MASTER = 0x400 ++ IFF_MULTICAST = 0x1000 ++ IFF_MULTI_QUEUE = 0x100 ++ IFF_NAPI = 0x10 ++ IFF_NAPI_FRAGS = 0x20 ++ IFF_NOARP = 0x80 ++ IFF_NOFILTER = 0x1000 ++ IFF_NOTRAILERS = 0x20 ++ IFF_NO_PI = 0x1000 ++ IFF_ONE_QUEUE = 0x2000 ++ IFF_PERSIST = 0x800 ++ IFF_POINTOPOINT = 0x10 ++ IFF_PORTSEL = 0x2000 ++ IFF_PROMISC = 0x100 ++ IFF_RUNNING = 0x40 ++ IFF_SLAVE = 0x800 ++ IFF_TAP = 0x2 ++ IFF_TUN = 0x1 ++ IFF_TUN_EXCL = 0x8000 ++ IFF_UP = 0x1 ++ IFF_VNET_HDR = 0x4000 ++ IFF_VOLATILE = 0x70c5a ++ IFNAMSIZ = 0x10 ++ IGNBRK = 0x1 ++ IGNCR = 0x80 ++ IGNPAR = 0x4 ++ IMAXBEL = 0x2000 ++ INLCR = 0x40 ++ INPCK = 0x10 ++ IN_ACCESS = 0x1 ++ IN_ALL_EVENTS = 0xfff ++ IN_ATTRIB = 0x4 ++ IN_CLASSA_HOST = 0xffffff ++ IN_CLASSA_MAX = 0x80 ++ IN_CLASSA_NET = 0xff000000 ++ IN_CLASSA_NSHIFT = 0x18 ++ IN_CLASSB_HOST = 0xffff ++ IN_CLASSB_MAX = 0x10000 ++ IN_CLASSB_NET = 0xffff0000 ++ IN_CLASSB_NSHIFT = 0x10 ++ IN_CLASSC_HOST = 0xff ++ IN_CLASSC_NET = 0xffffff00 ++ IN_CLASSC_NSHIFT = 0x8 ++ IN_CLOEXEC = 0x80000 ++ IN_CLOSE = 0x18 ++ IN_CLOSE_NOWRITE = 0x10 ++ IN_CLOSE_WRITE = 0x8 ++ IN_CREATE = 0x100 ++ IN_DELETE = 0x200 ++ IN_DELETE_SELF = 0x400 ++ IN_DONT_FOLLOW = 0x2000000 ++ IN_EXCL_UNLINK = 0x4000000 ++ IN_IGNORED = 0x8000 ++ IN_ISDIR = 0x40000000 ++ IN_LOOPBACKNET = 0x7f ++ IN_MASK_ADD = 0x20000000 ++ IN_MODIFY = 0x2 ++ IN_MOVE = 0xc0 ++ IN_MOVED_FROM = 0x40 ++ IN_MOVED_TO = 0x80 ++ IN_MOVE_SELF = 0x800 ++ IN_NONBLOCK = 0x800 ++ IN_ONESHOT = 0x80000000 ++ IN_ONLYDIR = 0x1000000 ++ IN_OPEN = 0x20 ++ IN_Q_OVERFLOW = 0x4000 ++ IN_UNMOUNT = 0x2000 ++ IPPROTO_AH = 0x33 ++ IPPROTO_BEETPH = 0x5e ++ IPPROTO_COMP = 0x6c ++ IPPROTO_DCCP = 0x21 ++ IPPROTO_DSTOPTS = 0x3c ++ IPPROTO_EGP = 0x8 ++ IPPROTO_ENCAP = 0x62 ++ IPPROTO_ESP = 0x32 ++ IPPROTO_FRAGMENT = 0x2c ++ IPPROTO_GRE = 0x2f ++ IPPROTO_HOPOPTS = 0x0 ++ IPPROTO_ICMP = 0x1 ++ IPPROTO_ICMPV6 = 0x3a ++ IPPROTO_IDP = 0x16 ++ IPPROTO_IGMP = 0x2 ++ IPPROTO_IP = 0x0 ++ IPPROTO_IPIP = 0x4 ++ IPPROTO_IPV6 = 0x29 ++ IPPROTO_MH = 0x87 ++ IPPROTO_MPLS = 0x89 ++ IPPROTO_MTP = 0x5c ++ IPPROTO_NONE = 0x3b ++ IPPROTO_PIM = 0x67 ++ IPPROTO_PUP = 0xc ++ IPPROTO_RAW = 0xff ++ IPPROTO_ROUTING = 0x2b ++ IPPROTO_RSVP = 0x2e ++ IPPROTO_SCTP = 0x84 ++ IPPROTO_TCP = 0x6 ++ IPPROTO_TP = 0x1d ++ IPPROTO_UDP = 0x11 ++ IPPROTO_UDPLITE = 0x88 ++ IPV6_2292DSTOPTS = 0x4 ++ IPV6_2292HOPLIMIT = 0x8 ++ IPV6_2292HOPOPTS = 0x3 ++ IPV6_2292PKTINFO = 0x2 ++ IPV6_2292PKTOPTIONS = 0x6 ++ IPV6_2292RTHDR = 0x5 ++ IPV6_ADDRFORM = 0x1 ++ IPV6_ADDR_PREFERENCES = 0x48 ++ IPV6_ADD_MEMBERSHIP = 0x14 ++ IPV6_AUTHHDR = 0xa ++ IPV6_AUTOFLOWLABEL = 0x46 ++ IPV6_CHECKSUM = 0x7 ++ IPV6_DONTFRAG = 0x3e ++ IPV6_DROP_MEMBERSHIP = 0x15 ++ IPV6_DSTOPTS = 0x3b ++ IPV6_FREEBIND = 0x4e ++ IPV6_HDRINCL = 0x24 ++ IPV6_HOPLIMIT = 0x34 ++ IPV6_HOPOPTS = 0x36 ++ IPV6_IPSEC_POLICY = 0x22 ++ IPV6_JOIN_ANYCAST = 0x1b ++ IPV6_JOIN_GROUP = 0x14 ++ IPV6_LEAVE_ANYCAST = 0x1c ++ IPV6_LEAVE_GROUP = 0x15 ++ IPV6_MINHOPCOUNT = 0x49 ++ IPV6_MTU = 0x18 ++ IPV6_MTU_DISCOVER = 0x17 ++ IPV6_MULTICAST_HOPS = 0x12 ++ IPV6_MULTICAST_IF = 0x11 ++ IPV6_MULTICAST_LOOP = 0x13 ++ IPV6_NEXTHOP = 0x9 ++ IPV6_ORIGDSTADDR = 0x4a ++ IPV6_PATHMTU = 0x3d ++ IPV6_PKTINFO = 0x32 ++ IPV6_PMTUDISC_DO = 0x2 ++ IPV6_PMTUDISC_DONT = 0x0 ++ IPV6_PMTUDISC_INTERFACE = 0x4 ++ IPV6_PMTUDISC_OMIT = 0x5 ++ IPV6_PMTUDISC_PROBE = 0x3 ++ IPV6_PMTUDISC_WANT = 0x1 ++ IPV6_RECVDSTOPTS = 0x3a ++ IPV6_RECVERR = 0x19 ++ IPV6_RECVFRAGSIZE = 0x4d ++ IPV6_RECVHOPLIMIT = 0x33 ++ IPV6_RECVHOPOPTS = 0x35 ++ IPV6_RECVORIGDSTADDR = 0x4a ++ IPV6_RECVPATHMTU = 0x3c ++ IPV6_RECVPKTINFO = 0x31 ++ IPV6_RECVRTHDR = 0x38 ++ IPV6_RECVTCLASS = 0x42 ++ IPV6_ROUTER_ALERT = 0x16 ++ IPV6_RTHDR = 0x39 ++ IPV6_RTHDRDSTOPTS = 0x37 ++ IPV6_RTHDR_LOOSE = 0x0 ++ IPV6_RTHDR_STRICT = 0x1 ++ IPV6_RTHDR_TYPE_0 = 0x0 ++ IPV6_RXDSTOPTS = 0x3b ++ IPV6_RXHOPOPTS = 0x36 ++ IPV6_TCLASS = 0x43 ++ IPV6_TRANSPARENT = 0x4b ++ IPV6_UNICAST_HOPS = 0x10 ++ IPV6_UNICAST_IF = 0x4c ++ IPV6_V6ONLY = 0x1a ++ IPV6_XFRM_POLICY = 0x23 ++ IP_ADD_MEMBERSHIP = 0x23 ++ IP_ADD_SOURCE_MEMBERSHIP = 0x27 ++ IP_BIND_ADDRESS_NO_PORT = 0x18 ++ IP_BLOCK_SOURCE = 0x26 ++ IP_CHECKSUM = 0x17 ++ IP_DEFAULT_MULTICAST_LOOP = 0x1 ++ IP_DEFAULT_MULTICAST_TTL = 0x1 ++ IP_DF = 0x4000 ++ IP_DROP_MEMBERSHIP = 0x24 ++ IP_DROP_SOURCE_MEMBERSHIP = 0x28 ++ IP_FREEBIND = 0xf ++ IP_HDRINCL = 0x3 ++ IP_IPSEC_POLICY = 0x10 ++ IP_MAXPACKET = 0xffff ++ IP_MAX_MEMBERSHIPS = 0x14 ++ IP_MF = 0x2000 ++ IP_MINTTL = 0x15 ++ IP_MSFILTER = 0x29 ++ IP_MSS = 0x240 ++ IP_MTU = 0xe ++ IP_MTU_DISCOVER = 0xa ++ IP_MULTICAST_ALL = 0x31 ++ IP_MULTICAST_IF = 0x20 ++ IP_MULTICAST_LOOP = 0x22 ++ IP_MULTICAST_TTL = 0x21 ++ IP_NODEFRAG = 0x16 ++ IP_OFFMASK = 0x1fff ++ IP_OPTIONS = 0x4 ++ IP_ORIGDSTADDR = 0x14 ++ IP_PASSSEC = 0x12 ++ IP_PKTINFO = 0x8 ++ IP_PKTOPTIONS = 0x9 ++ IP_PMTUDISC = 0xa ++ IP_PMTUDISC_DO = 0x2 ++ IP_PMTUDISC_DONT = 0x0 ++ IP_PMTUDISC_INTERFACE = 0x4 ++ IP_PMTUDISC_OMIT = 0x5 ++ IP_PMTUDISC_PROBE = 0x3 ++ IP_PMTUDISC_WANT = 0x1 ++ IP_RECVERR = 0xb ++ IP_RECVFRAGSIZE = 0x19 ++ IP_RECVOPTS = 0x6 ++ IP_RECVORIGDSTADDR = 0x14 ++ IP_RECVRETOPTS = 0x7 ++ IP_RECVTOS = 0xd ++ IP_RECVTTL = 0xc ++ IP_RETOPTS = 0x7 ++ IP_RF = 0x8000 ++ IP_ROUTER_ALERT = 0x5 ++ IP_TOS = 0x1 ++ IP_TRANSPARENT = 0x13 ++ IP_TTL = 0x2 ++ IP_UNBLOCK_SOURCE = 0x25 ++ IP_UNICAST_IF = 0x32 ++ IP_XFRM_POLICY = 0x11 ++ ISIG = 0x1 ++ ISTRIP = 0x20 ++ IUTF8 = 0x4000 ++ IXANY = 0x800 ++ IXOFF = 0x1000 ++ IXON = 0x400 ++ LINUX_REBOOT_CMD_CAD_OFF = 0x0 ++ LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef ++ LINUX_REBOOT_CMD_HALT = 0xcdef0123 ++ LINUX_REBOOT_CMD_KEXEC = 0x45584543 ++ LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc ++ LINUX_REBOOT_CMD_RESTART = 0x1234567 ++ LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 ++ LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 ++ LINUX_REBOOT_MAGIC1 = 0xfee1dead ++ LINUX_REBOOT_MAGIC2 = 0x28121969 ++ LOCK_EX = 0x2 ++ LOCK_NB = 0x4 ++ LOCK_SH = 0x1 ++ LOCK_UN = 0x8 ++ MADV_DODUMP = 0x11 ++ MADV_DOFORK = 0xb ++ MADV_DONTDUMP = 0x10 ++ MADV_DONTFORK = 0xa ++ MADV_DONTNEED = 0x4 ++ MADV_FREE = 0x8 ++ MADV_HUGEPAGE = 0xe ++ MADV_HWPOISON = 0x64 ++ MADV_KEEPONFORK = 0x13 ++ MADV_MERGEABLE = 0xc ++ MADV_NOHUGEPAGE = 0xf ++ MADV_NORMAL = 0x0 ++ MADV_RANDOM = 0x1 ++ MADV_REMOVE = 0x9 ++ MADV_SEQUENTIAL = 0x2 ++ MADV_UNMERGEABLE = 0xd ++ MADV_WILLNEED = 0x3 ++ MADV_WIPEONFORK = 0x12 ++ MAP_ANON = 0x20 ++ MAP_ANONYMOUS = 0x20 ++ MAP_DENYWRITE = 0x800 ++ MAP_EXECUTABLE = 0x1000 ++ MAP_FILE = 0x0 ++ MAP_FIXED = 0x10 ++ MAP_FIXED_NOREPLACE = 0x100000 ++ MAP_GROWSDOWN = 0x100 ++ MAP_HUGETLB = 0x40000 ++ MAP_HUGE_MASK = 0x3f ++ MAP_HUGE_SHIFT = 0x1a ++ MAP_LOCKED = 0x2000 ++ MAP_NONBLOCK = 0x10000 ++ MAP_NORESERVE = 0x4000 ++ MAP_POPULATE = 0x8000 ++ MAP_PRIVATE = 0x2 ++ MAP_SHARED = 0x1 ++ MAP_SHARED_VALIDATE = 0x3 ++ MAP_STACK = 0x20000 ++ MAP_SYNC = 0x80000 ++ MAP_TYPE = 0xf ++ MCL_CURRENT = 0x1 ++ MCL_FUTURE = 0x2 ++ MCL_ONFAULT = 0x4 ++ MNT_DETACH = 0x2 ++ MNT_EXPIRE = 0x4 ++ MNT_FORCE = 0x1 ++ MSG_BATCH = 0x40000 ++ MSG_CMSG_CLOEXEC = 0x40000000 ++ MSG_CONFIRM = 0x800 ++ MSG_CTRUNC = 0x8 ++ MSG_DONTROUTE = 0x4 ++ MSG_DONTWAIT = 0x40 ++ MSG_EOR = 0x80 ++ MSG_ERRQUEUE = 0x2000 ++ MSG_FASTOPEN = 0x20000000 ++ MSG_FIN = 0x200 ++ MSG_MORE = 0x8000 ++ MSG_NOSIGNAL = 0x4000 ++ MSG_OOB = 0x1 ++ MSG_PEEK = 0x2 ++ MSG_PROXY = 0x10 ++ MSG_RST = 0x1000 ++ MSG_SYN = 0x400 ++ MSG_TRUNC = 0x20 ++ MSG_TRYHARD = 0x4 ++ MSG_WAITALL = 0x100 ++ MSG_WAITFORONE = 0x10000 ++ MSG_ZEROCOPY = 0x4000000 ++ MS_ACTIVE = 0x40000000 ++ MS_ASYNC = 0x1 ++ MS_BIND = 0x1000 ++ MS_DIRSYNC = 0x80 ++ MS_INVALIDATE = 0x2 ++ MS_I_VERSION = 0x800000 ++ MS_KERNMOUNT = 0x400000 ++ MS_LAZYTIME = 0x2000000 ++ MS_MANDLOCK = 0x40 ++ MS_MGC_MSK = 0xffff0000 ++ MS_MGC_VAL = 0xc0ed0000 ++ MS_MOVE = 0x2000 ++ MS_NOATIME = 0x400 ++ MS_NODEV = 0x4 ++ MS_NODIRATIME = 0x800 ++ MS_NOEXEC = 0x8 ++ MS_NOSUID = 0x2 ++ MS_NOUSER = -0x80000000 ++ MS_POSIXACL = 0x10000 ++ MS_PRIVATE = 0x40000 ++ MS_RDONLY = 0x1 ++ MS_REC = 0x4000 ++ MS_RELATIME = 0x200000 ++ MS_REMOUNT = 0x20 ++ MS_RMT_MASK = 0x2800051 ++ MS_SHARED = 0x100000 ++ MS_SILENT = 0x8000 ++ MS_SLAVE = 0x80000 ++ MS_STRICTATIME = 0x1000000 ++ MS_SYNC = 0x4 ++ MS_SYNCHRONOUS = 0x10 ++ MS_UNBINDABLE = 0x20000 ++ NAME_MAX = 0xff ++ NETLINK_ADD_MEMBERSHIP = 0x1 ++ NETLINK_AUDIT = 0x9 ++ NETLINK_BROADCAST_ERROR = 0x4 ++ NETLINK_CAP_ACK = 0xa ++ NETLINK_CONNECTOR = 0xb ++ NETLINK_CRYPTO = 0x15 ++ NETLINK_DNRTMSG = 0xe ++ NETLINK_DROP_MEMBERSHIP = 0x2 ++ NETLINK_ECRYPTFS = 0x13 ++ NETLINK_EXT_ACK = 0xb ++ NETLINK_FIB_LOOKUP = 0xa ++ NETLINK_FIREWALL = 0x3 ++ NETLINK_GENERIC = 0x10 ++ NETLINK_INET_DIAG = 0x4 ++ NETLINK_IP6_FW = 0xd ++ NETLINK_ISCSI = 0x8 ++ NETLINK_KOBJECT_UEVENT = 0xf ++ NETLINK_LISTEN_ALL_NSID = 0x8 ++ NETLINK_LIST_MEMBERSHIPS = 0x9 ++ NETLINK_NETFILTER = 0xc ++ NETLINK_NFLOG = 0x5 ++ NETLINK_NO_ENOBUFS = 0x5 ++ NETLINK_PKTINFO = 0x3 ++ NETLINK_RDMA = 0x14 ++ NETLINK_ROUTE = 0x0 ++ NETLINK_RX_RING = 0x6 ++ NETLINK_SCSITRANSPORT = 0x12 ++ NETLINK_SELINUX = 0x7 ++ NETLINK_SMC = 0x16 ++ NETLINK_SOCK_DIAG = 0x4 ++ NETLINK_TX_RING = 0x7 ++ NETLINK_UNUSED = 0x1 ++ NETLINK_USERSOCK = 0x2 ++ NETLINK_XFRM = 0x6 ++ NLA_ALIGNTO = 0x4 ++ NLA_F_NESTED = 0x8000 ++ NLA_F_NET_BYTEORDER = 0x4000 ++ NLA_HDRLEN = 0x4 ++ NLMSG_ALIGNTO = 0x4 ++ NLMSG_DONE = 0x3 ++ NLMSG_ERROR = 0x2 ++ NLMSG_HDRLEN = 0x10 ++ NLMSG_MIN_TYPE = 0x10 ++ NLMSG_NOOP = 0x1 ++ NLMSG_OVERRUN = 0x4 ++ NLM_F_ACK = 0x4 ++ NLM_F_ACK_TLVS = 0x200 ++ NLM_F_APPEND = 0x800 ++ NLM_F_ATOMIC = 0x400 ++ NLM_F_CAPPED = 0x100 ++ NLM_F_CREATE = 0x400 ++ NLM_F_DUMP = 0x300 ++ NLM_F_DUMP_FILTERED = 0x20 ++ NLM_F_DUMP_INTR = 0x10 ++ NLM_F_ECHO = 0x8 ++ NLM_F_EXCL = 0x200 ++ NLM_F_MATCH = 0x200 ++ NLM_F_MULTI = 0x2 ++ NLM_F_NONREC = 0x100 ++ NLM_F_REPLACE = 0x100 ++ NLM_F_REQUEST = 0x1 ++ NLM_F_ROOT = 0x100 ++ NOFLSH = 0x80 ++ OCRNL = 0x8 ++ OFDEL = 0x80 ++ OFILL = 0x40 ++ ONLCR = 0x4 ++ ONLRET = 0x20 ++ ONOCR = 0x10 ++ OPOST = 0x1 ++ O_ACCMODE = 0x3 ++ O_APPEND = 0x400 ++ O_ASYNC = 0x2000 ++ O_CLOEXEC = 0x80000 ++ O_CREAT = 0x40 ++ O_DIRECT = 0x4000 ++ O_DIRECTORY = 0x10000 ++ O_DSYNC = 0x1000 ++ O_EXCL = 0x80 ++ O_FSYNC = 0x101000 ++ O_LARGEFILE = 0x0 ++ O_NDELAY = 0x800 ++ O_NOATIME = 0x40000 ++ O_NOCTTY = 0x100 ++ O_NOFOLLOW = 0x20000 ++ O_NONBLOCK = 0x800 ++ O_PATH = 0x200000 ++ O_RDONLY = 0x0 ++ O_RDWR = 0x2 ++ O_RSYNC = 0x101000 ++ O_SYNC = 0x101000 ++ O_TMPFILE = 0x410000 ++ O_TRUNC = 0x200 ++ O_WRONLY = 0x1 ++ PACKET_ADD_MEMBERSHIP = 0x1 ++ PACKET_AUXDATA = 0x8 ++ PACKET_BROADCAST = 0x1 ++ PACKET_COPY_THRESH = 0x7 ++ PACKET_DROP_MEMBERSHIP = 0x2 ++ PACKET_FANOUT = 0x12 ++ PACKET_FANOUT_CBPF = 0x6 ++ PACKET_FANOUT_CPU = 0x2 ++ PACKET_FANOUT_DATA = 0x16 ++ PACKET_FANOUT_EBPF = 0x7 ++ PACKET_FANOUT_FLAG_DEFRAG = 0x8000 ++ PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 ++ PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 ++ PACKET_FANOUT_HASH = 0x0 ++ PACKET_FANOUT_LB = 0x1 ++ PACKET_FANOUT_QM = 0x5 ++ PACKET_FANOUT_RND = 0x4 ++ PACKET_FANOUT_ROLLOVER = 0x3 ++ PACKET_FASTROUTE = 0x6 ++ PACKET_HDRLEN = 0xb ++ PACKET_HOST = 0x0 ++ PACKET_KERNEL = 0x7 ++ PACKET_LOOPBACK = 0x5 ++ PACKET_LOSS = 0xe ++ PACKET_MR_ALLMULTI = 0x2 ++ PACKET_MR_MULTICAST = 0x0 ++ PACKET_MR_PROMISC = 0x1 ++ PACKET_MR_UNICAST = 0x3 ++ PACKET_MULTICAST = 0x2 ++ PACKET_ORIGDEV = 0x9 ++ PACKET_OTHERHOST = 0x3 ++ PACKET_OUTGOING = 0x4 ++ PACKET_QDISC_BYPASS = 0x14 ++ PACKET_RECV_OUTPUT = 0x3 ++ PACKET_RESERVE = 0xc ++ PACKET_ROLLOVER_STATS = 0x15 ++ PACKET_RX_RING = 0x5 ++ PACKET_STATISTICS = 0x6 ++ PACKET_TIMESTAMP = 0x11 ++ PACKET_TX_HAS_OFF = 0x13 ++ PACKET_TX_RING = 0xd ++ PACKET_TX_TIMESTAMP = 0x10 ++ PACKET_USER = 0x6 ++ PACKET_VERSION = 0xa ++ PACKET_VNET_HDR = 0xf ++ PARENB = 0x100 ++ PARITY_CRC16_PR0 = 0x2 ++ PARITY_CRC16_PR0_CCITT = 0x4 ++ PARITY_CRC16_PR1 = 0x3 ++ PARITY_CRC16_PR1_CCITT = 0x5 ++ PARITY_CRC32_PR0_CCITT = 0x6 ++ PARITY_CRC32_PR1_CCITT = 0x7 ++ PARITY_DEFAULT = 0x0 ++ PARITY_NONE = 0x1 ++ PARMRK = 0x8 ++ PARODD = 0x200 ++ PENDIN = 0x4000 ++ PRIO_PGRP = 0x1 ++ PRIO_PROCESS = 0x0 ++ PRIO_USER = 0x2 ++ PROT_EXEC = 0x4 ++ PROT_GROWSDOWN = 0x1000000 ++ PROT_GROWSUP = 0x2000000 ++ PROT_NONE = 0x0 ++ PROT_READ = 0x1 ++ PROT_WRITE = 0x2 ++ PR_CAPBSET_DROP = 0x18 ++ PR_CAPBSET_READ = 0x17 ++ PR_CAP_AMBIENT = 0x2f ++ PR_CAP_AMBIENT_CLEAR_ALL = 0x4 ++ PR_CAP_AMBIENT_IS_SET = 0x1 ++ PR_CAP_AMBIENT_LOWER = 0x3 ++ PR_CAP_AMBIENT_RAISE = 0x2 ++ PR_ENDIAN_BIG = 0x0 ++ PR_ENDIAN_LITTLE = 0x1 ++ PR_ENDIAN_PPC_LITTLE = 0x2 ++ PR_FPEMU_NOPRINT = 0x1 ++ PR_FPEMU_SIGFPE = 0x2 ++ PR_FP_EXC_ASYNC = 0x2 ++ PR_FP_EXC_DISABLED = 0x0 ++ PR_FP_EXC_DIV = 0x10000 ++ PR_FP_EXC_INV = 0x100000 ++ PR_FP_EXC_NONRECOV = 0x1 ++ PR_FP_EXC_OVF = 0x20000 ++ PR_FP_EXC_PRECISE = 0x3 ++ PR_FP_EXC_RES = 0x80000 ++ PR_FP_EXC_SW_ENABLE = 0x80 ++ PR_FP_EXC_UND = 0x40000 ++ PR_FP_MODE_FR = 0x1 ++ PR_FP_MODE_FRE = 0x2 ++ PR_GET_CHILD_SUBREAPER = 0x25 ++ PR_GET_DUMPABLE = 0x3 ++ PR_GET_ENDIAN = 0x13 ++ PR_GET_FPEMU = 0x9 ++ PR_GET_FPEXC = 0xb ++ PR_GET_FP_MODE = 0x2e ++ PR_GET_KEEPCAPS = 0x7 ++ PR_GET_NAME = 0x10 ++ PR_GET_NO_NEW_PRIVS = 0x27 ++ PR_GET_PDEATHSIG = 0x2 ++ PR_GET_SECCOMP = 0x15 ++ PR_GET_SECUREBITS = 0x1b ++ PR_GET_SPECULATION_CTRL = 0x34 ++ PR_GET_THP_DISABLE = 0x2a ++ PR_GET_TID_ADDRESS = 0x28 ++ PR_GET_TIMERSLACK = 0x1e ++ PR_GET_TIMING = 0xd ++ PR_GET_TSC = 0x19 ++ PR_GET_UNALIGN = 0x5 ++ PR_MCE_KILL = 0x21 ++ PR_MCE_KILL_CLEAR = 0x0 ++ PR_MCE_KILL_DEFAULT = 0x2 ++ PR_MCE_KILL_EARLY = 0x1 ++ PR_MCE_KILL_GET = 0x22 ++ PR_MCE_KILL_LATE = 0x0 ++ PR_MCE_KILL_SET = 0x1 ++ PR_MPX_DISABLE_MANAGEMENT = 0x2c ++ PR_MPX_ENABLE_MANAGEMENT = 0x2b ++ PR_SET_CHILD_SUBREAPER = 0x24 ++ PR_SET_DUMPABLE = 0x4 ++ PR_SET_ENDIAN = 0x14 ++ PR_SET_FPEMU = 0xa ++ PR_SET_FPEXC = 0xc ++ PR_SET_FP_MODE = 0x2d ++ PR_SET_KEEPCAPS = 0x8 ++ PR_SET_MM = 0x23 ++ PR_SET_MM_ARG_END = 0x9 ++ PR_SET_MM_ARG_START = 0x8 ++ PR_SET_MM_AUXV = 0xc ++ PR_SET_MM_BRK = 0x7 ++ PR_SET_MM_END_CODE = 0x2 ++ PR_SET_MM_END_DATA = 0x4 ++ PR_SET_MM_ENV_END = 0xb ++ PR_SET_MM_ENV_START = 0xa ++ PR_SET_MM_EXE_FILE = 0xd ++ PR_SET_MM_MAP = 0xe ++ PR_SET_MM_MAP_SIZE = 0xf ++ PR_SET_MM_START_BRK = 0x6 ++ PR_SET_MM_START_CODE = 0x1 ++ PR_SET_MM_START_DATA = 0x3 ++ PR_SET_MM_START_STACK = 0x5 ++ PR_SET_NAME = 0xf ++ PR_SET_NO_NEW_PRIVS = 0x26 ++ PR_SET_PDEATHSIG = 0x1 ++ PR_SET_PTRACER = 0x59616d61 ++ PR_SET_PTRACER_ANY = 0xffffffffffffffff ++ PR_SET_SECCOMP = 0x16 ++ PR_SET_SECUREBITS = 0x1c ++ PR_SET_SPECULATION_CTRL = 0x35 ++ PR_SET_THP_DISABLE = 0x29 ++ PR_SET_TIMERSLACK = 0x1d ++ PR_SET_TIMING = 0xe ++ PR_SET_TSC = 0x1a ++ PR_SET_UNALIGN = 0x6 ++ PR_SPEC_DISABLE = 0x4 ++ PR_SPEC_ENABLE = 0x2 ++ PR_SPEC_FORCE_DISABLE = 0x8 ++ PR_SPEC_INDIRECT_BRANCH = 0x1 ++ PR_SPEC_NOT_AFFECTED = 0x0 ++ PR_SPEC_PRCTL = 0x1 ++ PR_SPEC_STORE_BYPASS = 0x0 ++ PR_SVE_GET_VL = 0x33 ++ PR_SVE_SET_VL = 0x32 ++ PR_SVE_SET_VL_ONEXEC = 0x40000 ++ PR_SVE_VL_INHERIT = 0x20000 ++ PR_SVE_VL_LEN_MASK = 0xffff ++ PR_TASK_PERF_EVENTS_DISABLE = 0x1f ++ PR_TASK_PERF_EVENTS_ENABLE = 0x20 ++ PR_TIMING_STATISTICAL = 0x0 ++ PR_TIMING_TIMESTAMP = 0x1 ++ PR_TSC_ENABLE = 0x1 ++ PR_TSC_SIGSEGV = 0x2 ++ PR_UNALIGN_NOPRINT = 0x1 ++ PR_UNALIGN_SIGBUS = 0x2 ++ PTRACE_ATTACH = 0x10 ++ PTRACE_CONT = 0x7 ++ PTRACE_DETACH = 0x11 ++ PTRACE_EVENT_CLONE = 0x3 ++ PTRACE_EVENT_EXEC = 0x4 ++ PTRACE_EVENT_EXIT = 0x6 ++ PTRACE_EVENT_FORK = 0x1 ++ PTRACE_EVENT_SECCOMP = 0x7 ++ PTRACE_EVENT_STOP = 0x80 ++ PTRACE_EVENT_VFORK = 0x2 ++ PTRACE_EVENT_VFORK_DONE = 0x5 ++ PTRACE_GETEVENTMSG = 0x4201 ++ PTRACE_GETREGS = 0xc ++ PTRACE_GETREGSET = 0x4204 ++ PTRACE_GETSIGINFO = 0x4202 ++ PTRACE_GETSIGMASK = 0x420a ++ PTRACE_INTERRUPT = 0x4207 ++ PTRACE_KILL = 0x8 ++ PTRACE_LISTEN = 0x4208 ++ PTRACE_O_EXITKILL = 0x100000 ++ PTRACE_O_MASK = 0x3000ff ++ PTRACE_O_SUSPEND_SECCOMP = 0x200000 ++ PTRACE_O_TRACECLONE = 0x8 ++ PTRACE_O_TRACEEXEC = 0x10 ++ PTRACE_O_TRACEEXIT = 0x40 ++ PTRACE_O_TRACEFORK = 0x2 ++ PTRACE_O_TRACESECCOMP = 0x80 ++ PTRACE_O_TRACESYSGOOD = 0x1 ++ PTRACE_O_TRACEVFORK = 0x4 ++ PTRACE_O_TRACEVFORKDONE = 0x20 ++ PTRACE_PEEKDATA = 0x2 ++ PTRACE_PEEKSIGINFO = 0x4209 ++ PTRACE_PEEKSIGINFO_SHARED = 0x1 ++ PTRACE_PEEKTEXT = 0x1 ++ PTRACE_PEEKUSR = 0x3 ++ PTRACE_POKEDATA = 0x5 ++ PTRACE_POKETEXT = 0x4 ++ PTRACE_POKEUSR = 0x6 ++ PTRACE_SECCOMP_GET_FILTER = 0x420c ++ PTRACE_SECCOMP_GET_METADATA = 0x420d ++ PTRACE_SEIZE = 0x4206 ++ PTRACE_SETOPTIONS = 0x4200 ++ PTRACE_SETREGS = 0xd ++ PTRACE_SETREGSET = 0x4205 ++ PTRACE_SETSIGINFO = 0x4203 ++ PTRACE_SETSIGMASK = 0x420b ++ PTRACE_SINGLESTEP = 0x9 ++ PTRACE_SYSCALL = 0x18 ++ PTRACE_TRACEME = 0x0 ++ RLIMIT_AS = 0x9 ++ RLIMIT_CORE = 0x4 ++ RLIMIT_CPU = 0x0 ++ RLIMIT_DATA = 0x2 ++ RLIMIT_FSIZE = 0x1 ++ RLIMIT_NOFILE = 0x7 ++ RLIMIT_STACK = 0x3 ++ RLIM_INFINITY = -0x1 ++ RTAX_ADVMSS = 0x8 ++ RTAX_CC_ALGO = 0x10 ++ RTAX_CWND = 0x7 ++ RTAX_FASTOPEN_NO_COOKIE = 0x11 ++ RTAX_FEATURES = 0xc ++ RTAX_FEATURE_ALLFRAG = 0x8 ++ RTAX_FEATURE_ECN = 0x1 ++ RTAX_FEATURE_MASK = 0xf ++ RTAX_FEATURE_SACK = 0x2 ++ RTAX_FEATURE_TIMESTAMP = 0x4 ++ RTAX_HOPLIMIT = 0xa ++ RTAX_INITCWND = 0xb ++ RTAX_INITRWND = 0xe ++ RTAX_LOCK = 0x1 ++ RTAX_MAX = 0x11 ++ RTAX_MTU = 0x2 ++ RTAX_QUICKACK = 0xf ++ RTAX_REORDERING = 0x9 ++ RTAX_RTO_MIN = 0xd ++ RTAX_RTT = 0x4 ++ RTAX_RTTVAR = 0x5 ++ RTAX_SSTHRESH = 0x6 ++ RTAX_UNSPEC = 0x0 ++ RTAX_WINDOW = 0x3 ++ RTA_ALIGNTO = 0x4 ++ RTA_MAX = 0x1d ++ RTCF_DIRECTSRC = 0x4000000 ++ RTCF_DOREDIRECT = 0x1000000 ++ RTCF_LOG = 0x2000000 ++ RTCF_MASQ = 0x400000 ++ RTCF_NAT = 0x800000 ++ RTCF_VALVE = 0x200000 ++ RTF_ADDRCLASSMASK = 0xf8000000 ++ RTF_ADDRCONF = 0x40000 ++ RTF_ALLONLINK = 0x20000 ++ RTF_BROADCAST = 0x10000000 ++ RTF_CACHE = 0x1000000 ++ RTF_DEFAULT = 0x10000 ++ RTF_DYNAMIC = 0x10 ++ RTF_FLOW = 0x2000000 ++ RTF_GATEWAY = 0x2 ++ RTF_HOST = 0x4 ++ RTF_INTERFACE = 0x40000000 ++ RTF_IRTT = 0x100 ++ RTF_LINKRT = 0x100000 ++ RTF_LOCAL = 0x80000000 ++ RTF_MODIFIED = 0x20 ++ RTF_MSS = 0x40 ++ RTF_MTU = 0x40 ++ RTF_MULTICAST = 0x20000000 ++ RTF_NAT = 0x8000000 ++ RTF_NOFORWARD = 0x1000 ++ RTF_NONEXTHOP = 0x200000 ++ RTF_NOPMTUDISC = 0x4000 ++ RTF_POLICY = 0x4000000 ++ RTF_REINSTATE = 0x8 ++ RTF_REJECT = 0x200 ++ RTF_STATIC = 0x400 ++ RTF_THROW = 0x2000 ++ RTF_UP = 0x1 ++ RTF_WINDOW = 0x80 ++ RTF_XRESOLVE = 0x800 ++ RTM_BASE = 0x10 ++ RTM_DELACTION = 0x31 ++ RTM_DELADDR = 0x15 ++ RTM_DELADDRLABEL = 0x49 ++ RTM_DELCHAIN = 0x65 ++ RTM_DELLINK = 0x11 ++ RTM_DELMDB = 0x55 ++ RTM_DELNEIGH = 0x1d ++ RTM_DELNETCONF = 0x51 ++ RTM_DELNSID = 0x59 ++ RTM_DELQDISC = 0x25 ++ RTM_DELROUTE = 0x19 ++ RTM_DELRULE = 0x21 ++ RTM_DELTCLASS = 0x29 ++ RTM_DELTFILTER = 0x2d ++ RTM_F_CLONED = 0x200 ++ RTM_F_EQUALIZE = 0x400 ++ RTM_F_FIB_MATCH = 0x2000 ++ RTM_F_LOOKUP_TABLE = 0x1000 ++ RTM_F_NOTIFY = 0x100 ++ RTM_F_PREFIX = 0x800 ++ RTM_GETACTION = 0x32 ++ RTM_GETADDR = 0x16 ++ RTM_GETADDRLABEL = 0x4a ++ RTM_GETANYCAST = 0x3e ++ RTM_GETCHAIN = 0x66 ++ RTM_GETDCB = 0x4e ++ RTM_GETLINK = 0x12 ++ RTM_GETMDB = 0x56 ++ RTM_GETMULTICAST = 0x3a ++ RTM_GETNEIGH = 0x1e ++ RTM_GETNEIGHTBL = 0x42 ++ RTM_GETNETCONF = 0x52 ++ RTM_GETNSID = 0x5a ++ RTM_GETQDISC = 0x26 ++ RTM_GETROUTE = 0x1a ++ RTM_GETRULE = 0x22 ++ RTM_GETSTATS = 0x5e ++ RTM_GETTCLASS = 0x2a ++ RTM_GETTFILTER = 0x2e ++ RTM_MAX = 0x67 ++ RTM_NEWACTION = 0x30 ++ RTM_NEWADDR = 0x14 ++ RTM_NEWADDRLABEL = 0x48 ++ RTM_NEWCACHEREPORT = 0x60 ++ RTM_NEWCHAIN = 0x64 ++ RTM_NEWLINK = 0x10 ++ RTM_NEWMDB = 0x54 ++ RTM_NEWNDUSEROPT = 0x44 ++ RTM_NEWNEIGH = 0x1c ++ RTM_NEWNEIGHTBL = 0x40 ++ RTM_NEWNETCONF = 0x50 ++ RTM_NEWNSID = 0x58 ++ RTM_NEWPREFIX = 0x34 ++ RTM_NEWQDISC = 0x24 ++ RTM_NEWROUTE = 0x18 ++ RTM_NEWRULE = 0x20 ++ RTM_NEWSTATS = 0x5c ++ RTM_NEWTCLASS = 0x28 ++ RTM_NEWTFILTER = 0x2c ++ RTM_NR_FAMILIES = 0x16 ++ RTM_NR_MSGTYPES = 0x58 ++ RTM_SETDCB = 0x4f ++ RTM_SETLINK = 0x13 ++ RTM_SETNEIGHTBL = 0x43 ++ RTNH_ALIGNTO = 0x4 ++ RTNH_COMPARE_MASK = 0x19 ++ RTNH_F_DEAD = 0x1 ++ RTNH_F_LINKDOWN = 0x10 ++ RTNH_F_OFFLOAD = 0x8 ++ RTNH_F_ONLINK = 0x4 ++ RTNH_F_PERVASIVE = 0x2 ++ RTNH_F_UNRESOLVED = 0x20 ++ RTN_MAX = 0xb ++ RTPROT_BABEL = 0x2a ++ RTPROT_BGP = 0xba ++ RTPROT_BIRD = 0xc ++ RTPROT_BOOT = 0x3 ++ RTPROT_DHCP = 0x10 ++ RTPROT_DNROUTED = 0xd ++ RTPROT_EIGRP = 0xc0 ++ RTPROT_GATED = 0x8 ++ RTPROT_ISIS = 0xbb ++ RTPROT_KERNEL = 0x2 ++ RTPROT_MROUTED = 0x11 ++ RTPROT_MRT = 0xa ++ RTPROT_NTK = 0xf ++ RTPROT_OSPF = 0xbc ++ RTPROT_RA = 0x9 ++ RTPROT_REDIRECT = 0x1 ++ RTPROT_RIP = 0xbd ++ RTPROT_STATIC = 0x4 ++ RTPROT_UNSPEC = 0x0 ++ RTPROT_XORP = 0xe ++ RTPROT_ZEBRA = 0xb ++ RT_CLASS_DEFAULT = 0xfd ++ RT_CLASS_LOCAL = 0xff ++ RT_CLASS_MAIN = 0xfe ++ RT_CLASS_MAX = 0xff ++ RT_CLASS_UNSPEC = 0x0 ++ RUSAGE_CHILDREN = -0x1 ++ RUSAGE_SELF = 0x0 ++ RUSAGE_THREAD = 0x1 ++ SCM_CREDENTIALS = 0x2 ++ SCM_RIGHTS = 0x1 ++ SCM_TIMESTAMP = 0x1d ++ SCM_TIMESTAMPING = 0x25 ++ SCM_TIMESTAMPING_OPT_STATS = 0x36 ++ SCM_TIMESTAMPING_PKTINFO = 0x3a ++ SCM_TIMESTAMPNS = 0x23 ++ SCM_TXTIME = 0x3d ++ SCM_WIFI_STATUS = 0x29 ++ SHUT_RD = 0x0 ++ SHUT_RDWR = 0x2 ++ SHUT_WR = 0x1 ++ SIOCADDDLCI = 0x8980 ++ SIOCADDMULTI = 0x8931 ++ SIOCADDRT = 0x890b ++ SIOCATMARK = 0x8905 ++ SIOCDARP = 0x8953 ++ SIOCDELDLCI = 0x8981 ++ SIOCDELMULTI = 0x8932 ++ SIOCDELRT = 0x890c ++ SIOCDEVPRIVATE = 0x89f0 ++ SIOCDIFADDR = 0x8936 ++ SIOCDRARP = 0x8960 ++ SIOCGARP = 0x8954 ++ SIOCGIFADDR = 0x8915 ++ SIOCGIFBR = 0x8940 ++ SIOCGIFBRDADDR = 0x8919 ++ SIOCGIFCONF = 0x8912 ++ SIOCGIFCOUNT = 0x8938 ++ SIOCGIFDSTADDR = 0x8917 ++ SIOCGIFENCAP = 0x8925 ++ SIOCGIFFLAGS = 0x8913 ++ SIOCGIFHWADDR = 0x8927 ++ SIOCGIFINDEX = 0x8933 ++ SIOCGIFMAP = 0x8970 ++ SIOCGIFMEM = 0x891f ++ SIOCGIFMETRIC = 0x891d ++ SIOCGIFMTU = 0x8921 ++ SIOCGIFNAME = 0x8910 ++ SIOCGIFNETMASK = 0x891b ++ SIOCGIFPFLAGS = 0x8935 ++ SIOCGIFSLAVE = 0x8929 ++ SIOCGIFTXQLEN = 0x8942 ++ SIOCGPGRP = 0x8904 ++ SIOCGRARP = 0x8961 ++ SIOCGSTAMP = 0x8906 ++ SIOCGSTAMPNS = 0x8907 ++ SIOCPROTOPRIVATE = 0x89e0 ++ SIOCRTMSG = 0x890d ++ SIOCSARP = 0x8955 ++ SIOCSIFADDR = 0x8916 ++ SIOCSIFBR = 0x8941 ++ SIOCSIFBRDADDR = 0x891a ++ SIOCSIFDSTADDR = 0x8918 ++ SIOCSIFENCAP = 0x8926 ++ SIOCSIFFLAGS = 0x8914 ++ SIOCSIFHWADDR = 0x8924 ++ SIOCSIFHWBROADCAST = 0x8937 ++ SIOCSIFLINK = 0x8911 ++ SIOCSIFMAP = 0x8971 ++ SIOCSIFMEM = 0x8920 ++ SIOCSIFMETRIC = 0x891e ++ SIOCSIFMTU = 0x8922 ++ SIOCSIFNAME = 0x8923 ++ SIOCSIFNETMASK = 0x891c ++ SIOCSIFPFLAGS = 0x8934 ++ SIOCSIFSLAVE = 0x8930 ++ SIOCSIFTXQLEN = 0x8943 ++ SIOCSPGRP = 0x8902 ++ SIOCSRARP = 0x8962 ++ SOCK_CLOEXEC = 0x80000 ++ SOCK_DCCP = 0x6 ++ SOCK_DGRAM = 0x2 ++ SOCK_NONBLOCK = 0x800 ++ SOCK_PACKET = 0xa ++ SOCK_RAW = 0x3 ++ SOCK_RDM = 0x4 ++ SOCK_SEQPACKET = 0x5 ++ SOCK_STREAM = 0x1 ++ SOL_AAL = 0x109 ++ SOL_ALG = 0x117 ++ SOL_ATM = 0x108 ++ SOL_BLUETOOTH = 0x112 ++ SOL_CAIF = 0x116 ++ SOL_DCCP = 0x10d ++ SOL_DECNET = 0x105 ++ SOL_ICMPV6 = 0x3a ++ SOL_IP = 0x0 ++ SOL_IPV6 = 0x29 ++ SOL_IRDA = 0x10a ++ SOL_IUCV = 0x115 ++ SOL_KCM = 0x119 ++ SOL_LLC = 0x10c ++ SOL_NETBEUI = 0x10b ++ SOL_NETLINK = 0x10e ++ SOL_NFC = 0x118 ++ SOL_PACKET = 0x107 ++ SOL_PNPIPE = 0x113 ++ SOL_PPPOL2TP = 0x111 ++ SOL_RAW = 0xff ++ SOL_RDS = 0x114 ++ SOL_RXRPC = 0x110 ++ SOL_SOCKET = 0x1 ++ SOL_TCP = 0x6 ++ SOL_TIPC = 0x10f ++ SOL_TLS = 0x11a ++ SOL_X25 = 0x106 ++ SOMAXCONN = 0x80 ++ SO_ACCEPTCONN = 0x1e ++ SO_ATTACH_BPF = 0x32 ++ SO_ATTACH_FILTER = 0x1a ++ SO_ATTACH_REUSEPORT_CBPF = 0x33 ++ SO_ATTACH_REUSEPORT_EBPF = 0x34 ++ SO_BINDTODEVICE = 0x19 ++ SO_BPF_EXTENSIONS = 0x30 ++ SO_BROADCAST = 0x6 ++ SO_BSDCOMPAT = 0xe ++ SO_BUSY_POLL = 0x2e ++ SO_CNX_ADVICE = 0x35 ++ SO_COOKIE = 0x39 ++ SO_DEBUG = 0x1 ++ SO_DETACH_BPF = 0x1b ++ SO_DETACH_FILTER = 0x1b ++ SO_DOMAIN = 0x27 ++ SO_DONTROUTE = 0x5 ++ SO_ERROR = 0x4 ++ SO_GET_FILTER = 0x1a ++ SO_INCOMING_CPU = 0x31 ++ SO_INCOMING_NAPI_ID = 0x38 ++ SO_KEEPALIVE = 0x9 ++ SO_LINGER = 0xd ++ SO_LOCK_FILTER = 0x2c ++ SO_MARK = 0x24 ++ SO_MAX_PACING_RATE = 0x2f ++ SO_MEMINFO = 0x37 ++ SO_NOFCS = 0x2b ++ SO_NO_CHECK = 0xb ++ SO_OOBINLINE = 0xa ++ SO_PASSCRED = 0x10 ++ SO_PASSSEC = 0x22 ++ SO_PEEK_OFF = 0x2a ++ SO_PEERCRED = 0x11 ++ SO_PEERGROUPS = 0x3b ++ SO_PEERNAME = 0x1c ++ SO_PEERSEC = 0x1f ++ SO_PRIORITY = 0xc ++ SO_PROTOCOL = 0x26 ++ SO_RCVBUF = 0x8 ++ SO_RCVBUFFORCE = 0x21 ++ SO_RCVLOWAT = 0x12 ++ SO_RCVTIMEO = 0x14 ++ SO_REUSEADDR = 0x2 ++ SO_REUSEPORT = 0xf ++ SO_RXQ_OVFL = 0x28 ++ SO_SECURITY_AUTHENTICATION = 0x16 ++ SO_SECURITY_ENCRYPTION_NETWORK = 0x18 ++ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 ++ SO_SELECT_ERR_QUEUE = 0x2d ++ SO_SNDBUF = 0x7 ++ SO_SNDBUFFORCE = 0x20 ++ SO_SNDLOWAT = 0x13 ++ SO_SNDTIMEO = 0x15 ++ SO_TIMESTAMP = 0x1d ++ SO_TIMESTAMPING = 0x25 ++ SO_TIMESTAMPNS = 0x23 ++ SO_TXTIME = 0x3d ++ SO_TYPE = 0x3 ++ SO_WIFI_STATUS = 0x29 ++ SO_ZEROCOPY = 0x3c ++ S_BLKSIZE = 0x200 ++ S_IEXEC = 0x40 ++ S_IFBLK = 0x6000 ++ S_IFCHR = 0x2000 ++ S_IFDIR = 0x4000 ++ S_IFIFO = 0x1000 ++ S_IFLNK = 0xa000 ++ S_IFMT = 0xf000 ++ S_IFREG = 0x8000 ++ S_IFSOCK = 0xc000 ++ S_IREAD = 0x100 ++ S_IRGRP = 0x20 ++ S_IROTH = 0x4 ++ S_IRUSR = 0x100 ++ S_IRWXG = 0x38 ++ S_IRWXO = 0x7 ++ S_IRWXU = 0x1c0 ++ S_ISGID = 0x400 ++ S_ISUID = 0x800 ++ S_ISVTX = 0x200 ++ S_IWGRP = 0x10 ++ S_IWOTH = 0x2 ++ S_IWRITE = 0x80 ++ S_IWUSR = 0x80 ++ S_IXGRP = 0x8 ++ S_IXOTH = 0x1 ++ S_IXUSR = 0x40 ++ TCFLSH = 0x540b ++ TCIFLUSH = 0x0 ++ TCIOFLUSH = 0x2 ++ TCOFLUSH = 0x1 ++ TCP_CC_INFO = 0x1a ++ TCP_CONGESTION = 0xd ++ TCP_COOKIE_IN_ALWAYS = 0x1 ++ TCP_COOKIE_MAX = 0x10 ++ TCP_COOKIE_MIN = 0x8 ++ TCP_COOKIE_OUT_NEVER = 0x2 ++ TCP_COOKIE_PAIR_SIZE = 0x20 ++ TCP_COOKIE_TRANSACTIONS = 0xf ++ TCP_CORK = 0x3 ++ TCP_DEFER_ACCEPT = 0x9 ++ TCP_FASTOPEN = 0x17 ++ TCP_FASTOPEN_CONNECT = 0x1e ++ TCP_FASTOPEN_KEY = 0x21 ++ TCP_FASTOPEN_NO_COOKIE = 0x22 ++ TCP_INFO = 0xb ++ TCP_KEEPCNT = 0x6 ++ TCP_KEEPIDLE = 0x4 ++ TCP_KEEPINTVL = 0x5 ++ TCP_LINGER2 = 0x8 ++ TCP_MAXSEG = 0x2 ++ TCP_MAXWIN = 0xffff ++ TCP_MAX_WINSHIFT = 0xe ++ TCP_MD5SIG = 0xe ++ TCP_MD5SIG_EXT = 0x20 ++ TCP_MD5SIG_FLAG_PREFIX = 0x1 ++ TCP_MD5SIG_MAXKEYLEN = 0x50 ++ TCP_MSS = 0x200 ++ TCP_MSS_DEFAULT = 0x218 ++ TCP_MSS_DESIRED = 0x4c4 ++ TCP_NODELAY = 0x1 ++ TCP_NOTSENT_LOWAT = 0x19 ++ TCP_QUEUE_SEQ = 0x15 ++ TCP_QUICKACK = 0xc ++ TCP_REPAIR = 0x13 ++ TCP_REPAIR_OPTIONS = 0x16 ++ TCP_REPAIR_QUEUE = 0x14 ++ TCP_REPAIR_WINDOW = 0x1d ++ TCP_SAVED_SYN = 0x1c ++ TCP_SAVE_SYN = 0x1b ++ TCP_SYNCNT = 0x7 ++ TCP_S_DATA_IN = 0x4 ++ TCP_S_DATA_OUT = 0x8 ++ TCP_THIN_DUPACK = 0x11 ++ TCP_THIN_LINEAR_TIMEOUTS = 0x10 ++ TCP_TIMESTAMP = 0x18 ++ TCP_ULP = 0x1f ++ TCP_USER_TIMEOUT = 0x12 ++ TCP_WINDOW_CLAMP = 0xa ++ TCSAFLUSH = 0x2 ++ TIOCCBRK = 0x5428 ++ TIOCCONS = 0x541d ++ TIOCEXCL = 0x540c ++ TIOCGDEV = 0x80045432 ++ TIOCGETD = 0x5424 ++ TIOCGEXCL = 0x80045440 ++ TIOCGICOUNT = 0x545d ++ TIOCGLCKTRMIOS = 0x5456 ++ TIOCGPGRP = 0x540f ++ TIOCGPKT = 0x80045438 ++ TIOCGPTLCK = 0x80045439 ++ TIOCGPTN = 0x80045430 ++ TIOCGPTPEER = 0x5441 ++ TIOCGRS485 = 0x542e ++ TIOCGSERIAL = 0x541e ++ TIOCGSID = 0x5429 ++ TIOCGSOFTCAR = 0x5419 ++ TIOCGWINSZ = 0x5413 ++ TIOCINQ = 0x541b ++ TIOCLINUX = 0x541c ++ TIOCMBIC = 0x5417 ++ TIOCMBIS = 0x5416 ++ TIOCMGET = 0x5415 ++ TIOCMIWAIT = 0x545c ++ TIOCMSET = 0x5418 ++ TIOCM_CAR = 0x40 ++ TIOCM_CD = 0x40 ++ TIOCM_CTS = 0x20 ++ TIOCM_DSR = 0x100 ++ TIOCM_DTR = 0x2 ++ TIOCM_LE = 0x1 ++ TIOCM_RI = 0x80 ++ TIOCM_RNG = 0x80 ++ TIOCM_RTS = 0x4 ++ TIOCM_SR = 0x10 ++ TIOCM_ST = 0x8 ++ TIOCNOTTY = 0x5422 ++ TIOCNXCL = 0x540d ++ TIOCOUTQ = 0x5411 ++ TIOCPKT = 0x5420 ++ TIOCPKT_DATA = 0x0 ++ TIOCPKT_DOSTOP = 0x20 ++ TIOCPKT_FLUSHREAD = 0x1 ++ TIOCPKT_FLUSHWRITE = 0x2 ++ TIOCPKT_IOCTL = 0x40 ++ TIOCPKT_NOSTOP = 0x10 ++ TIOCPKT_START = 0x8 ++ TIOCPKT_STOP = 0x4 ++ TIOCSBRK = 0x5427 ++ TIOCSCTTY = 0x540e ++ TIOCSERCONFIG = 0x5453 ++ TIOCSERGETLSR = 0x5459 ++ TIOCSERGETMULTI = 0x545a ++ TIOCSERGSTRUCT = 0x5458 ++ TIOCSERGWILD = 0x5454 ++ TIOCSERSETMULTI = 0x545b ++ TIOCSERSWILD = 0x5455 ++ TIOCSER_TEMT = 0x1 ++ TIOCSETD = 0x5423 ++ TIOCSIG = 0x40045436 ++ TIOCSLCKTRMIOS = 0x5457 ++ TIOCSPGRP = 0x5410 ++ TIOCSPTLCK = 0x40045431 ++ TIOCSRS485 = 0x542f ++ TIOCSSERIAL = 0x541f ++ TIOCSSOFTCAR = 0x541a ++ TIOCSTI = 0x5412 ++ TIOCSWINSZ = 0x5414 ++ TIOCVHANGUP = 0x5437 ++ TOSTOP = 0x100 ++ TUNATTACHFILTER = 0x401054d5 ++ TUNDETACHFILTER = 0x401054d6 ++ TUNGETFEATURES = 0x800454cf ++ TUNGETFILTER = 0x801054db ++ TUNGETIFF = 0x800454d2 ++ TUNGETSNDBUF = 0x800454d3 ++ TUNGETVNETBE = 0x800454df ++ TUNGETVNETHDRSZ = 0x800454d7 ++ TUNGETVNETLE = 0x800454dd ++ TUNSETDEBUG = 0x400454c9 ++ TUNSETFILTEREBPF = 0x800454e1 ++ TUNSETGROUP = 0x400454ce ++ TUNSETIFF = 0x400454ca ++ TUNSETIFINDEX = 0x400454da ++ TUNSETLINK = 0x400454cd ++ TUNSETNOCSUM = 0x400454c8 ++ TUNSETOFFLOAD = 0x400454d0 ++ TUNSETOWNER = 0x400454cc ++ TUNSETPERSIST = 0x400454cb ++ TUNSETQUEUE = 0x400454d9 ++ TUNSETSNDBUF = 0x400454d4 ++ TUNSETSTEERINGEBPF = 0x800454e0 ++ TUNSETTXFILTER = 0x400454d1 ++ TUNSETVNETBE = 0x400454de ++ TUNSETVNETHDRSZ = 0x400454d8 ++ TUNSETVNETLE = 0x400454dc ++ VDISCARD = 0xd ++ VEOF = 0x4 ++ VEOL = 0xb ++ VEOL2 = 0x10 ++ VERASE = 0x2 ++ VINTR = 0x0 ++ VKILL = 0x3 ++ VLNEXT = 0xf ++ VMIN = 0x6 ++ VQUIT = 0x1 ++ VREPRINT = 0xc ++ VSTART = 0x8 ++ VSTOP = 0x9 ++ VSUSP = 0xa ++ VSWTC = 0x7 ++ VT0 = 0x0 ++ VT1 = 0x4000 ++ VTDLY = 0x4000 ++ VTIME = 0x5 ++ VWERASE = 0xe ++ WALL = 0x40000000 ++ WCLONE = 0x80000000 ++ WCONTINUED = 0x8 ++ WEXITED = 0x4 ++ WNOHANG = 0x1 ++ WNOTHREAD = 0x20000000 ++ WNOWAIT = 0x1000000 ++ WORDSIZE = 0x40 ++ WSTOPPED = 0x2 ++ WUNTRACED = 0x2 ++) ++ ++// Errors ++const ( ++ E2BIG = Errno(0x7) ++ EACCES = Errno(0xd) ++ EADDRINUSE = Errno(0x62) ++ EADDRNOTAVAIL = Errno(0x63) ++ EADV = Errno(0x44) ++ EAFNOSUPPORT = Errno(0x61) ++ EAGAIN = Errno(0xb) ++ EALREADY = Errno(0x72) ++ EBADE = Errno(0x34) ++ EBADF = Errno(0x9) ++ EBADFD = Errno(0x4d) ++ EBADMSG = Errno(0x4a) ++ EBADR = Errno(0x35) ++ EBADRQC = Errno(0x38) ++ EBADSLT = Errno(0x39) ++ EBFONT = Errno(0x3b) ++ EBUSY = Errno(0x10) ++ ECANCELED = Errno(0x7d) ++ ECHILD = Errno(0xa) ++ ECHRNG = Errno(0x2c) ++ ECOMM = Errno(0x46) ++ ECONNABORTED = Errno(0x67) ++ ECONNREFUSED = Errno(0x6f) ++ ECONNRESET = Errno(0x68) ++ EDEADLK = Errno(0x23) ++ EDEADLOCK = Errno(0x23) ++ EDESTADDRREQ = Errno(0x59) ++ EDOM = Errno(0x21) ++ EDOTDOT = Errno(0x49) ++ EDQUOT = Errno(0x7a) ++ EEXIST = Errno(0x11) ++ EFAULT = Errno(0xe) ++ EFBIG = Errno(0x1b) ++ EHOSTDOWN = Errno(0x70) ++ EHOSTUNREACH = Errno(0x71) ++ EHWPOISON = Errno(0x85) ++ EIDRM = Errno(0x2b) ++ EILSEQ = Errno(0x54) ++ EINPROGRESS = Errno(0x73) ++ EINTR = Errno(0x4) ++ EINVAL = Errno(0x16) ++ EIO = Errno(0x5) ++ EISCONN = Errno(0x6a) ++ EISDIR = Errno(0x15) ++ EISNAM = Errno(0x78) ++ EKEYEXPIRED = Errno(0x7f) ++ EKEYREJECTED = Errno(0x81) ++ EKEYREVOKED = Errno(0x80) ++ EL2HLT = Errno(0x33) ++ EL2NSYNC = Errno(0x2d) ++ EL3HLT = Errno(0x2e) ++ EL3RST = Errno(0x2f) ++ ELIBACC = Errno(0x4f) ++ ELIBBAD = Errno(0x50) ++ ELIBEXEC = Errno(0x53) ++ ELIBMAX = Errno(0x52) ++ ELIBSCN = Errno(0x51) ++ ELNRNG = Errno(0x30) ++ ELOOP = Errno(0x28) ++ EMEDIUMTYPE = Errno(0x7c) ++ EMFILE = Errno(0x18) ++ EMLINK = Errno(0x1f) ++ EMSGSIZE = Errno(0x5a) ++ EMULTIHOP = Errno(0x48) ++ ENAMETOOLONG = Errno(0x24) ++ ENAVAIL = Errno(0x77) ++ ENETDOWN = Errno(0x64) ++ ENETRESET = Errno(0x66) ++ ENETUNREACH = Errno(0x65) ++ ENFILE = Errno(0x17) ++ ENOANO = Errno(0x37) ++ ENOBUFS = Errno(0x69) ++ ENOCSI = Errno(0x32) ++ ENODATA = Errno(0x3d) ++ ENODEV = Errno(0x13) ++ ENOENT = Errno(0x2) ++ ENOEXEC = Errno(0x8) ++ ENOKEY = Errno(0x7e) ++ ENOLCK = Errno(0x25) ++ ENOLINK = Errno(0x43) ++ ENOMEDIUM = Errno(0x7b) ++ ENOMEM = Errno(0xc) ++ ENOMSG = Errno(0x2a) ++ ENONET = Errno(0x40) ++ ENOPKG = Errno(0x41) ++ ENOPROTOOPT = Errno(0x5c) ++ ENOSPC = Errno(0x1c) ++ ENOSR = Errno(0x3f) ++ ENOSTR = Errno(0x3c) ++ ENOSYS = Errno(0x26) ++ ENOTBLK = Errno(0xf) ++ ENOTCONN = Errno(0x6b) ++ ENOTDIR = Errno(0x14) ++ ENOTEMPTY = Errno(0x27) ++ ENOTNAM = Errno(0x76) ++ ENOTRECOVERABLE = Errno(0x83) ++ ENOTSOCK = Errno(0x58) ++ ENOTSUP = Errno(0x5f) ++ ENOTTY = Errno(0x19) ++ ENOTUNIQ = Errno(0x4c) ++ ENXIO = Errno(0x6) ++ EOPNOTSUPP = Errno(0x5f) ++ EOVERFLOW = Errno(0x4b) ++ EOWNERDEAD = Errno(0x82) ++ EPERM = Errno(0x1) ++ EPFNOSUPPORT = Errno(0x60) ++ EPIPE = Errno(0x20) ++ EPROTO = Errno(0x47) ++ EPROTONOSUPPORT = Errno(0x5d) ++ EPROTOTYPE = Errno(0x5b) ++ ERANGE = Errno(0x22) ++ EREMCHG = Errno(0x4e) ++ EREMOTE = Errno(0x42) ++ EREMOTEIO = Errno(0x79) ++ ERESTART = Errno(0x55) ++ ERFKILL = Errno(0x84) ++ EROFS = Errno(0x1e) ++ ESHUTDOWN = Errno(0x6c) ++ ESOCKTNOSUPPORT = Errno(0x5e) ++ ESPIPE = Errno(0x1d) ++ ESRCH = Errno(0x3) ++ ESRMNT = Errno(0x45) ++ ESTALE = Errno(0x74) ++ ESTRPIPE = Errno(0x56) ++ ETIME = Errno(0x3e) ++ ETIMEDOUT = Errno(0x6e) ++ ETOOMANYREFS = Errno(0x6d) ++ ETXTBSY = Errno(0x1a) ++ EUCLEAN = Errno(0x75) ++ EUNATCH = Errno(0x31) ++ EUSERS = Errno(0x57) ++ EWOULDBLOCK = Errno(0xb) ++ EXDEV = Errno(0x12) ++ EXFULL = Errno(0x36) ++) ++ ++// Signals ++const ( ++ SIGABRT = Signal(0x6) ++ SIGALRM = Signal(0xe) ++ SIGBUS = Signal(0x7) ++ SIGCHLD = Signal(0x11) ++ SIGCLD = Signal(0x11) ++ SIGCONT = Signal(0x12) ++ SIGFPE = Signal(0x8) ++ SIGHUP = Signal(0x1) ++ SIGILL = Signal(0x4) ++ SIGINT = Signal(0x2) ++ SIGIO = Signal(0x1d) ++ SIGIOT = Signal(0x6) ++ SIGKILL = Signal(0x9) ++ SIGPIPE = Signal(0xd) ++ SIGPOLL = Signal(0x1d) ++ SIGPROF = Signal(0x1b) ++ SIGPWR = Signal(0x1e) ++ SIGQUIT = Signal(0x3) ++ SIGSEGV = Signal(0xb) ++ SIGSTKFLT = Signal(0x10) ++ SIGSTOP = Signal(0x13) ++ SIGSYS = Signal(0x1f) ++ SIGTERM = Signal(0xf) ++ SIGTRAP = Signal(0x5) ++ SIGTSTP = Signal(0x14) ++ SIGTTIN = Signal(0x15) ++ SIGTTOU = Signal(0x16) ++ SIGURG = Signal(0x17) ++ SIGUSR1 = Signal(0xa) ++ SIGUSR2 = Signal(0xc) ++ SIGVTALRM = Signal(0x1a) ++ SIGWINCH = Signal(0x1c) ++ SIGXCPU = Signal(0x18) ++ SIGXFSZ = Signal(0x19) ++) ++ ++// Error table ++var errors = [...]string{ ++ 1: "operation not permitted", ++ 2: "no such file or directory", ++ 3: "no such process", ++ 4: "interrupted system call", ++ 5: "input/output error", ++ 6: "no such device or address", ++ 7: "argument list too long", ++ 8: "exec format error", ++ 9: "bad file descriptor", ++ 10: "no child processes", ++ 11: "resource temporarily unavailable", ++ 12: "cannot allocate memory", ++ 13: "permission denied", ++ 14: "bad address", ++ 15: "block device required", ++ 16: "device or resource busy", ++ 17: "file exists", ++ 18: "invalid cross-device link", ++ 19: "no such device", ++ 20: "not a directory", ++ 21: "is a directory", ++ 22: "invalid argument", ++ 23: "too many open files in system", ++ 24: "too many open files", ++ 25: "inappropriate ioctl for device", ++ 26: "text file busy", ++ 27: "file too large", ++ 28: "no space left on device", ++ 29: "illegal seek", ++ 30: "read-only file system", ++ 31: "too many links", ++ 32: "broken pipe", ++ 33: "numerical argument out of domain", ++ 34: "numerical result out of range", ++ 35: "resource deadlock avoided", ++ 36: "file name too long", ++ 37: "no locks available", ++ 38: "function not implemented", ++ 39: "directory not empty", ++ 40: "too many levels of symbolic links", ++ 42: "no message of desired type", ++ 43: "identifier removed", ++ 44: "channel number out of range", ++ 45: "level 2 not synchronized", ++ 46: "level 3 halted", ++ 47: "level 3 reset", ++ 48: "link number out of range", ++ 49: "protocol driver not attached", ++ 50: "no CSI structure available", ++ 51: "level 2 halted", ++ 52: "invalid exchange", ++ 53: "invalid request descriptor", ++ 54: "exchange full", ++ 55: "no anode", ++ 56: "invalid request code", ++ 57: "invalid slot", ++ 59: "bad font file format", ++ 60: "device not a stream", ++ 61: "no data available", ++ 62: "timer expired", ++ 63: "out of streams resources", ++ 64: "machine is not on the network", ++ 65: "package not installed", ++ 66: "object is remote", ++ 67: "link has been severed", ++ 68: "advertise error", ++ 69: "srmount error", ++ 70: "communication error on send", ++ 71: "protocol error", ++ 72: "multihop attempted", ++ 73: "RFS specific error", ++ 74: "bad message", ++ 75: "value too large for defined data type", ++ 76: "name not unique on network", ++ 77: "file descriptor in bad state", ++ 78: "remote address changed", ++ 79: "can not access a needed shared library", ++ 80: "accessing a corrupted shared library", ++ 81: ".lib section in a.out corrupted", ++ 82: "attempting to link in too many shared libraries", ++ 83: "cannot exec a shared library directly", ++ 84: "invalid or incomplete multibyte or wide character", ++ 85: "interrupted system call should be restarted", ++ 86: "streams pipe error", ++ 87: "too many users", ++ 88: "socket operation on non-socket", ++ 89: "destination address required", ++ 90: "message too long", ++ 91: "protocol wrong type for socket", ++ 92: "protocol not available", ++ 93: "protocol not supported", ++ 94: "socket type not supported", ++ 95: "operation not supported", ++ 96: "protocol family not supported", ++ 97: "address family not supported by protocol", ++ 98: "address already in use", ++ 99: "cannot assign requested address", ++ 100: "network is down", ++ 101: "network is unreachable", ++ 102: "network dropped connection on reset", ++ 103: "software caused connection abort", ++ 104: "connection reset by peer", ++ 105: "no buffer space available", ++ 106: "transport endpoint is already connected", ++ 107: "transport endpoint is not connected", ++ 108: "cannot send after transport endpoint shutdown", ++ 109: "too many references: cannot splice", ++ 110: "connection timed out", ++ 111: "connection refused", ++ 112: "host is down", ++ 113: "no route to host", ++ 114: "operation already in progress", ++ 115: "operation now in progress", ++ 116: "stale file handle", ++ 117: "structure needs cleaning", ++ 118: "not a XENIX named type file", ++ 119: "no XENIX semaphores available", ++ 120: "is a named type file", ++ 121: "remote I/O error", ++ 122: "disk quota exceeded", ++ 123: "no medium found", ++ 124: "wrong medium type", ++ 125: "operation canceled", ++ 126: "required key not available", ++ 127: "key has expired", ++ 128: "key has been revoked", ++ 129: "key was rejected by service", ++ 130: "owner died", ++ 131: "state not recoverable", ++ 132: "operation not possible due to RF-kill", ++ 133: "memory page has hardware error", ++} ++ ++// Signal table ++var signals = [...]string{ ++ 1: "hangup", ++ 2: "interrupt", ++ 3: "quit", ++ 4: "illegal instruction", ++ 5: "trace/breakpoint trap", ++ 6: "aborted", ++ 7: "bus error", ++ 8: "floating point exception", ++ 9: "killed", ++ 10: "user defined signal 1", ++ 11: "segmentation fault", ++ 12: "user defined signal 2", ++ 13: "broken pipe", ++ 14: "alarm clock", ++ 15: "terminated", ++ 16: "stack fault", ++ 17: "child exited", ++ 18: "continued", ++ 19: "stopped (signal)", ++ 20: "stopped", ++ 21: "stopped (tty input)", ++ 22: "stopped (tty output)", ++ 23: "urgent I/O condition", ++ 24: "CPU time limit exceeded", ++ 25: "file size limit exceeded", ++ 26: "virtual timer expired", ++ 27: "profiling timer expired", ++ 28: "window changed", ++ 29: "I/O possible", ++ 30: "power failure", ++ 31: "bad system call", ++} +diff --git a/src/syscall/zsyscall_linux_loong64.go b/src/syscall/zsyscall_linux_loong64.go +new file mode 100644 +index 0000000000..48c58f3772 +--- /dev/null ++++ b/src/syscall/zsyscall_linux_loong64.go +@@ -0,0 +1,1564 @@ ++// mksyscall.pl -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go ++// Code generated by the command above; DO NOT EDIT. ++ ++//go:build linux && loong64 ++ ++package syscall ++ ++import "unsafe" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func faccessat(dirfd int, path string, mode uint32) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fchmodat(dirfd int, path string, mode uint32) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(oldpath) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(newpath) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pipe2(p *[2]_C_int, flags int) (err error) { ++ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 unsafe.Pointer ++ if len(buf) > 0 { ++ _p1 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p1 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(oldpath) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(newpath) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unlinkat(dirfd int, path string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getcwd(buf []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { ++ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) ++ wpid = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { ++ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(arg) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(source) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(target) ++ if err != nil { ++ return ++ } ++ var _p2 *byte ++ _p2, err = BytePtrFromString(fstype) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Acct(path string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Adjtimex(buf *Timex) (state int, err error) { ++ r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) ++ state = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Chdir(path string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Chroot(path string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Close(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Dup(oldfd int) (fd int, err error) { ++ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Dup3(oldfd int, newfd int, flags int) (err error) { ++ _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollCreate1(flag int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { ++ _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { ++ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchdir(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchmod(fd int, mode uint32) (err error) { ++ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntl(fd int, cmd int, arg int) (val int, err error) { ++ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ val = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fdatasync(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Flock(fd int, how int) (err error) { ++ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fsync(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getdents(fd int, buf []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getpgid(pid int) (pgid int, err error) { ++ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) ++ pgid = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getpid() (pid int) { ++ r0, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0) ++ pid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getppid() (ppid int) { ++ r0, _ := rawSyscallNoError(SYS_GETPPID, 0, 0, 0) ++ ppid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getpriority(which int, who int) (prio int, err error) { ++ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) ++ prio = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getrusage(who int, rusage *Rusage) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Gettid() (tid int) { ++ r0, _ := rawSyscallNoError(SYS_GETTID, 0, 0, 0) ++ tid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getxattr(path string, attr string, dest []byte) (sz int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(attr) ++ if err != nil { ++ return ++ } ++ var _p2 unsafe.Pointer ++ if len(dest) > 0 { ++ _p2 = unsafe.Pointer(&dest[0]) ++ } else { ++ _p2 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) ++ sz = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(pathname) ++ if err != nil { ++ return ++ } ++ r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) ++ watchdesc = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func InotifyInit1(flags int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { ++ r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) ++ success = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Kill(pid int, sig Signal) (err error) { ++ _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Klogctl(typ int, buf []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Listxattr(path string, dest []byte) (sz int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 unsafe.Pointer ++ if len(dest) > 0 { ++ _p1 = unsafe.Pointer(&dest[0]) ++ } else { ++ _p1 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) ++ sz = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mkdirat(dirfd int, path string, mode uint32) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Nanosleep(time *Timespec, leftover *Timespec) (err error) { ++ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func PivotRoot(newroot string, putold string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(newroot) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(putold) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { ++ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func read(fd int, p []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Removexattr(path string, attr string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(attr) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setdomainname(p []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Sethostname(p []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setpgid(pid int, pgid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setsid() (pid int, err error) { ++ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) ++ pid = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Settimeofday(tv *Timeval) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setpriority(which int, who int, prio int) (err error) { ++ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setxattr(path string, attr string, data []byte, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(attr) ++ if err != nil { ++ return ++ } ++ var _p2 unsafe.Pointer ++ if len(data) > 0 { ++ _p2 = unsafe.Pointer(&data[0]) ++ } else { ++ _p2 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Sync() { ++ Syscall(SYS_SYNC, 0, 0, 0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Sysinfo(info *Sysinfo_t) (err error) { ++ _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Tgkill(tgid int, tid int, sig Signal) (err error) { ++ _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Times(tms *Tms) (ticks uintptr, err error) { ++ r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) ++ ticks = uintptr(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Umask(mask int) (oldmask int) { ++ r0, _ := rawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) ++ oldmask = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Uname(buf *Utsname) (err error) { ++ _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Unmount(target string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(target) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Unshare(flags int) (err error) { ++ _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func write(fd int, p []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func exitThread(code int) (err error) { ++ _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func readlen(fd int, p *byte, np int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func writelen(fd int, p *byte, np int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func munmap(addr uintptr, length uintptr) (err error) { ++ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Madvise(b []byte, advice int) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mprotect(b []byte, prot int) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mlock(b []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Munlock(b []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mlockall(flags int) (err error) { ++ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Munlockall() (err error) { ++ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(events) > 0 { ++ _p0 = unsafe.Pointer(&events[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchown(fd int, uid int, gid int) (err error) { ++ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstat(fd int, stat *Stat_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatfs(fd int, buf *Statfs_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Ftruncate(fd int, length int64) (err error) { ++ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getegid() (egid int) { ++ r0, _ := rawSyscallNoError(SYS_GETEGID, 0, 0, 0) ++ egid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Geteuid() (euid int) { ++ r0, _ := rawSyscallNoError(SYS_GETEUID, 0, 0, 0) ++ euid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getgid() (gid int) { ++ r0, _ := rawSyscallNoError(SYS_GETGID, 0, 0, 0) ++ gid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getuid() (uid int) { ++ r0, _ := rawSyscallNoError(SYS_GETUID, 0, 0, 0) ++ uid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Listen(s int, n int) (err error) { ++ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pread(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pwrite(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(oldpath) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(newpath) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Seek(fd int, offset int64, whence int) (off int64, err error) { ++ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) ++ off = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { ++ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) ++ written = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setfsgid(gid int) (err error) { ++ _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setfsuid(uid int) (err error) { ++ _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Shutdown(fd int, how int) (err error) { ++ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Statfs(path string, buf *Statfs_t) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { ++ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Truncate(path string, length int64) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { ++ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { ++ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getgroups(n int, list *_Gid_t) (nn int, err error) { ++ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ nn = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setgroups(n int, list *_Gid_t) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { ++ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { ++ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socket(domain int, typ int, proto int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { ++ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { ++ r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) ++ xaddr = uintptr(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_t) (n int, err error) { ++ r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Gettimeofday(tv *Timeval) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func ppoll(fds *pollFd, nfds int, timeout *Timespec, sigmask *sigset_t) (n int, err error) { ++ r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} +diff --git a/src/syscall/zsysnum_linux_loong64.go b/src/syscall/zsysnum_linux_loong64.go +new file mode 100644 +index 0000000000..0821777056 +--- /dev/null ++++ b/src/syscall/zsysnum_linux_loong64.go +@@ -0,0 +1,308 @@ ++// mksysnum_linux.pl /usr/include/asm-generic/unistd.h ++// Code generated by the command above; DO NOT EDIT. ++ ++package syscall ++ ++const ( ++ SYS_IO_SETUP = 0 ++ SYS_IO_DESTROY = 1 ++ SYS_IO_SUBMIT = 2 ++ SYS_IO_CANCEL = 3 ++ SYS_IO_GETEVENTS = 4 ++ SYS_SETXATTR = 5 ++ SYS_LSETXATTR = 6 ++ SYS_FSETXATTR = 7 ++ SYS_GETXATTR = 8 ++ SYS_LGETXATTR = 9 ++ SYS_FGETXATTR = 10 ++ SYS_LISTXATTR = 11 ++ SYS_LLISTXATTR = 12 ++ SYS_FLISTXATTR = 13 ++ SYS_REMOVEXATTR = 14 ++ SYS_LREMOVEXATTR = 15 ++ SYS_FREMOVEXATTR = 16 ++ SYS_GETCWD = 17 ++ SYS_LOOKUP_DCOOKIE = 18 ++ SYS_EVENTFD2 = 19 ++ SYS_EPOLL_CREATE1 = 20 ++ SYS_EPOLL_CTL = 21 ++ SYS_EPOLL_PWAIT = 22 ++ SYS_DUP = 23 ++ SYS_DUP3 = 24 ++ SYS_FCNTL = 25 ++ SYS_INOTIFY_INIT1 = 26 ++ SYS_INOTIFY_ADD_WATCH = 27 ++ SYS_INOTIFY_RM_WATCH = 28 ++ SYS_IOCTL = 29 ++ SYS_IOPRIO_SET = 30 ++ SYS_IOPRIO_GET = 31 ++ SYS_FLOCK = 32 ++ SYS_MKNODAT = 33 ++ SYS_MKDIRAT = 34 ++ SYS_UNLINKAT = 35 ++ SYS_SYMLINKAT = 36 ++ SYS_LINKAT = 37 ++ SYS_UMOUNT2 = 39 ++ SYS_MOUNT = 40 ++ SYS_PIVOT_ROOT = 41 ++ SYS_NFSSERVCTL = 42 ++ SYS_STATFS = 43 ++ SYS_FSTATFS = 44 ++ SYS_TRUNCATE = 45 ++ SYS_FTRUNCATE = 46 ++ SYS_FALLOCATE = 47 ++ SYS_FACCESSAT = 48 ++ SYS_CHDIR = 49 ++ SYS_FCHDIR = 50 ++ SYS_CHROOT = 51 ++ SYS_FCHMOD = 52 ++ SYS_FCHMODAT = 53 ++ SYS_FCHOWNAT = 54 ++ SYS_FCHOWN = 55 ++ SYS_OPENAT = 56 ++ SYS_CLOSE = 57 ++ SYS_VHANGUP = 58 ++ SYS_PIPE2 = 59 ++ SYS_QUOTACTL = 60 ++ SYS_GETDENTS64 = 61 ++ SYS_LSEEK = 62 ++ SYS_READ = 63 ++ SYS_WRITE = 64 ++ SYS_READV = 65 ++ SYS_WRITEV = 66 ++ SYS_PREAD64 = 67 ++ SYS_PWRITE64 = 68 ++ SYS_PREADV = 69 ++ SYS_PWRITEV = 70 ++ SYS_SENDFILE = 71 ++ SYS_PSELECT6 = 72 ++ SYS_PPOLL = 73 ++ SYS_SIGNALFD4 = 74 ++ SYS_VMSPLICE = 75 ++ SYS_SPLICE = 76 ++ SYS_TEE = 77 ++ SYS_READLINKAT = 78 ++ SYS_FSTATAT = 79 ++ SYS_FSTAT = 80 ++ SYS_SYNC = 81 ++ SYS_FSYNC = 82 ++ SYS_FDATASYNC = 83 ++ SYS_SYNC_FILE_RANGE = 84 ++ SYS_TIMERFD_CREATE = 85 ++ SYS_TIMERFD_SETTIME = 86 ++ SYS_TIMERFD_GETTIME = 87 ++ SYS_UTIMENSAT = 88 ++ SYS_ACCT = 89 ++ SYS_CAPGET = 90 ++ SYS_CAPSET = 91 ++ SYS_PERSONALITY = 92 ++ SYS_EXIT = 93 ++ SYS_EXIT_GROUP = 94 ++ SYS_WAITID = 95 ++ SYS_SET_TID_ADDRESS = 96 ++ SYS_UNSHARE = 97 ++ SYS_FUTEX = 98 ++ SYS_SET_ROBUST_LIST = 99 ++ SYS_GET_ROBUST_LIST = 100 ++ SYS_NANOSLEEP = 101 ++ SYS_GETITIMER = 102 ++ SYS_SETITIMER = 103 ++ SYS_KEXEC_LOAD = 104 ++ SYS_INIT_MODULE = 105 ++ SYS_DELETE_MODULE = 106 ++ SYS_TIMER_CREATE = 107 ++ SYS_TIMER_GETTIME = 108 ++ SYS_TIMER_GETOVERRUN = 109 ++ SYS_TIMER_SETTIME = 110 ++ SYS_TIMER_DELETE = 111 ++ SYS_CLOCK_SETTIME = 112 ++ SYS_CLOCK_GETTIME = 113 ++ SYS_CLOCK_GETRES = 114 ++ SYS_CLOCK_NANOSLEEP = 115 ++ SYS_SYSLOG = 116 ++ SYS_PTRACE = 117 ++ SYS_SCHED_SETPARAM = 118 ++ SYS_SCHED_SETSCHEDULER = 119 ++ SYS_SCHED_GETSCHEDULER = 120 ++ SYS_SCHED_GETPARAM = 121 ++ SYS_SCHED_SETAFFINITY = 122 ++ SYS_SCHED_GETAFFINITY = 123 ++ SYS_SCHED_YIELD = 124 ++ SYS_SCHED_GET_PRIORITY_MAX = 125 ++ SYS_SCHED_GET_PRIORITY_MIN = 126 ++ SYS_SCHED_RR_GET_INTERVAL = 127 ++ SYS_RESTART_SYSCALL = 128 ++ SYS_KILL = 129 ++ SYS_TKILL = 130 ++ SYS_TGKILL = 131 ++ SYS_SIGALTSTACK = 132 ++ SYS_RT_SIGSUSPEND = 133 ++ SYS_RT_SIGACTION = 134 ++ SYS_RT_SIGPROCMASK = 135 ++ SYS_RT_SIGPENDING = 136 ++ SYS_RT_SIGTIMEDWAIT = 137 ++ SYS_RT_SIGQUEUEINFO = 138 ++ SYS_RT_SIGRETURN = 139 ++ SYS_SETPRIORITY = 140 ++ SYS_GETPRIORITY = 141 ++ SYS_REBOOT = 142 ++ SYS_SETREGID = 143 ++ SYS_SETGID = 144 ++ SYS_SETREUID = 145 ++ SYS_SETUID = 146 ++ SYS_SETRESUID = 147 ++ SYS_GETRESUID = 148 ++ SYS_SETRESGID = 149 ++ SYS_GETRESGID = 150 ++ SYS_SETFSUID = 151 ++ SYS_SETFSGID = 152 ++ SYS_TIMES = 153 ++ SYS_SETPGID = 154 ++ SYS_GETPGID = 155 ++ SYS_GETSID = 156 ++ SYS_SETSID = 157 ++ SYS_GETGROUPS = 158 ++ SYS_SETGROUPS = 159 ++ SYS_UNAME = 160 ++ SYS_SETHOSTNAME = 161 ++ SYS_SETDOMAINNAME = 162 ++ SYS_GETRUSAGE = 165 ++ SYS_UMASK = 166 ++ SYS_PRCTL = 167 ++ SYS_GETCPU = 168 ++ SYS_GETTIMEOFDAY = 169 ++ SYS_SETTIMEOFDAY = 170 ++ SYS_ADJTIMEX = 171 ++ SYS_GETPID = 172 ++ SYS_GETPPID = 173 ++ SYS_GETUID = 174 ++ SYS_GETEUID = 175 ++ SYS_GETGID = 176 ++ SYS_GETEGID = 177 ++ SYS_GETTID = 178 ++ SYS_SYSINFO = 179 ++ SYS_MQ_OPEN = 180 ++ SYS_MQ_UNLINK = 181 ++ SYS_MQ_TIMEDSEND = 182 ++ SYS_MQ_TIMEDRECEIVE = 183 ++ SYS_MQ_NOTIFY = 184 ++ SYS_MQ_GETSETATTR = 185 ++ SYS_MSGGET = 186 ++ SYS_MSGCTL = 187 ++ SYS_MSGRCV = 188 ++ SYS_MSGSND = 189 ++ SYS_SEMGET = 190 ++ SYS_SEMCTL = 191 ++ SYS_SEMTIMEDOP = 192 ++ SYS_SEMOP = 193 ++ SYS_SHMGET = 194 ++ SYS_SHMCTL = 195 ++ SYS_SHMAT = 196 ++ SYS_SHMDT = 197 ++ SYS_SOCKET = 198 ++ SYS_SOCKETPAIR = 199 ++ SYS_BIND = 200 ++ SYS_LISTEN = 201 ++ SYS_ACCEPT = 202 ++ SYS_CONNECT = 203 ++ SYS_GETSOCKNAME = 204 ++ SYS_GETPEERNAME = 205 ++ SYS_SENDTO = 206 ++ SYS_RECVFROM = 207 ++ SYS_SETSOCKOPT = 208 ++ SYS_GETSOCKOPT = 209 ++ SYS_SHUTDOWN = 210 ++ SYS_SENDMSG = 211 ++ SYS_RECVMSG = 212 ++ SYS_READAHEAD = 213 ++ SYS_BRK = 214 ++ SYS_MUNMAP = 215 ++ SYS_MREMAP = 216 ++ SYS_ADD_KEY = 217 ++ SYS_REQUEST_KEY = 218 ++ SYS_KEYCTL = 219 ++ SYS_CLONE = 220 ++ SYS_EXECVE = 221 ++ SYS_MMAP = 222 ++ SYS_FADVISE64 = 223 ++ SYS_SWAPON = 224 ++ SYS_SWAPOFF = 225 ++ SYS_MPROTECT = 226 ++ SYS_MSYNC = 227 ++ SYS_MLOCK = 228 ++ SYS_MUNLOCK = 229 ++ SYS_MLOCKALL = 230 ++ SYS_MUNLOCKALL = 231 ++ SYS_MINCORE = 232 ++ SYS_MADVISE = 233 ++ SYS_REMAP_FILE_PAGES = 234 ++ SYS_MBIND = 235 ++ SYS_GET_MEMPOLICY = 236 ++ SYS_SET_MEMPOLICY = 237 ++ SYS_MIGRATE_PAGES = 238 ++ SYS_MOVE_PAGES = 239 ++ SYS_RT_TGSIGQUEUEINFO = 240 ++ SYS_PERF_EVENT_OPEN = 241 ++ SYS_ACCEPT4 = 242 ++ SYS_RECVMMSG = 243 ++ SYS_ARCH_SPECIFIC_SYSCALL = 244 ++ SYS_WAIT4 = 260 ++ SYS_PRLIMIT64 = 261 ++ SYS_FANOTIFY_INIT = 262 ++ SYS_FANOTIFY_MARK = 263 ++ SYS_NAME_TO_HANDLE_AT = 264 ++ SYS_OPEN_BY_HANDLE_AT = 265 ++ SYS_CLOCK_ADJTIME = 266 ++ SYS_SYNCFS = 267 ++ SYS_SETNS = 268 ++ SYS_SENDMMSG = 269 ++ SYS_PROCESS_VM_READV = 270 ++ SYS_PROCESS_VM_WRITEV = 271 ++ SYS_KCMP = 272 ++ SYS_FINIT_MODULE = 273 ++ SYS_SCHED_SETATTR = 274 ++ SYS_SCHED_GETATTR = 275 ++ SYS_RENAMEAT2 = 276 ++ SYS_SECCOMP = 277 ++ SYS_GETRANDOM = 278 ++ SYS_MEMFD_CREATE = 279 ++ SYS_BPF = 280 ++ SYS_EXECVEAT = 281 ++ SYS_USERFAULTFD = 282 ++ SYS_MEMBARRIER = 283 ++ SYS_MLOCK2 = 284 ++ SYS_COPY_FILE_RANGE = 285 ++ SYS_PREADV2 = 286 ++ SYS_PWRITEV2 = 287 ++ SYS_PKEY_MPROTECT = 288 ++ SYS_PKEY_ALLOC = 289 ++ SYS_PKEY_FREE = 290 ++ SYS_STATX = 291 ++ SYS_IO_PGETEVENTS = 292 ++ SYS_RSEQ = 293 ++ SYS_KEXEC_FILE_LOAD = 294 ++ SYS_PIDFD_SEND_SIGNAL = 424 ++ SYS_IO_URING_SETUP = 425 ++ SYS_IO_URING_ENTER = 426 ++ SYS_IO_URING_REGISTER = 427 ++ SYS_OPEN_TREE = 428 ++ SYS_MOVE_MOUNT = 429 ++ SYS_FSOPEN = 430 ++ SYS_FSCONFIG = 431 ++ SYS_FSMOUNT = 432 ++ SYS_FSPICK = 433 ++ SYS_PIDFD_OPEN = 434 ++ SYS_CLOSE_RANGE = 436 ++ SYS_OPENAT2 = 437 ++ SYS_PIDFD_GETFD = 438 ++ SYS_FACCESSAT2 = 439 ++ SYS_PROCESS_MADVISE = 440 ++ SYS_EPOLL_PWAIT2 = 441 ++ SYS_MOUNT_SETATTR = 442 ++ SYS_QUOTACTL_FD = 443 ++ SYS_LANDLOCK_CREATE_RULESET = 444 ++ SYS_LANDLOCK_ADD_RULE = 445 ++ SYS_LANDLOCK_RESTRICT_SELF = 446 ++ SYS_PROCESS_MRELEASE = 448 ++ SYS_FUTEX_WAITV = 449 ++) +diff --git a/src/syscall/ztypes_linux_loong64.go b/src/syscall/ztypes_linux_loong64.go +new file mode 100644 +index 0000000000..ab2d7211fe +--- /dev/null ++++ b/src/syscall/ztypes_linux_loong64.go +@@ -0,0 +1,599 @@ ++// Code generated by cmd/cgo -godefs; DO NOT EDIT. ++// cgo -godefs -- types_linux.go ++ ++package syscall ++ ++const ( ++ sizeofPtr = 0x8 ++ sizeofShort = 0x2 ++ sizeofInt = 0x4 ++ sizeofLong = 0x8 ++ sizeofLongLong = 0x8 ++ PathMax = 0x1000 ++) ++ ++type ( ++ _C_short int16 ++ _C_int int32 ++ _C_long int64 ++ _C_long_long int64 ++) ++ ++type Timespec struct { ++ Sec int64 ++ Nsec int64 ++} ++ ++type Timeval struct { ++ Sec int64 ++ Usec int64 ++} ++ ++type Timex struct { ++ Modes uint32 ++ Offset int64 ++ Freq int64 ++ Maxerror int64 ++ Esterror int64 ++ Status int32 ++ Constant int64 ++ Precision int64 ++ Tolerance int64 ++ Time Timeval ++ Tick int64 ++ Ppsfreq int64 ++ Jitter int64 ++ Shift int32 ++ Stabil int64 ++ Jitcnt int64 ++ Calcnt int64 ++ Errcnt int64 ++ Stbcnt int64 ++ Tai int32 ++ Pad_cgo_0 [44]byte ++} ++ ++type Time_t int64 ++ ++type Tms struct { ++ Utime int64 ++ Stime int64 ++ Cutime int64 ++ Cstime int64 ++} ++ ++type Utimbuf struct { ++ Actime int64 ++ Modtime int64 ++} ++ ++type Rusage struct { ++ Utime Timeval ++ Stime Timeval ++ Maxrss int64 ++ Ixrss int64 ++ Idrss int64 ++ Isrss int64 ++ Minflt int64 ++ Majflt int64 ++ Nswap int64 ++ Inblock int64 ++ Oublock int64 ++ Msgsnd int64 ++ Msgrcv int64 ++ Nsignals int64 ++ Nvcsw int64 ++ Nivcsw int64 ++} ++ ++type Rlimit struct { ++ Cur uint64 ++ Max uint64 ++} ++ ++type _Gid_t uint32 ++ ++type Stat_t struct { ++ Dev uint64 ++ Ino uint64 ++ Mode uint32 ++ Nlink uint32 ++ Uid uint32 ++ Gid uint32 ++ Rdev uint64 ++ X__pad1 uint64 ++ Size int64 ++ Blksize int32 ++ X__pad2 int32 ++ Blocks int64 ++ Atim Timespec ++ Mtim Timespec ++ Ctim Timespec ++ X__glibc_reserved [2]int32 ++} ++ ++type Statfs_t struct { ++ Type int64 ++ Bsize int64 ++ Blocks uint64 ++ Bfree uint64 ++ Bavail uint64 ++ Files uint64 ++ Ffree uint64 ++ Fsid Fsid ++ Namelen int64 ++ Frsize int64 ++ Flags int64 ++ Spare [4]int64 ++} ++ ++type Dirent struct { ++ Ino uint64 ++ Off int64 ++ Reclen uint16 ++ Type uint8 ++ Name [256]int8 ++ Pad_cgo_0 [5]byte ++} ++ ++type Fsid struct { ++ X__val [2]int32 ++} ++ ++type Flock_t struct { ++ Type int16 ++ Whence int16 ++ Start int64 ++ Len int64 ++ Pid int32 ++ Pad_cgo_0 [4]byte ++} ++ ++type RawSockaddrInet4 struct { ++ Family uint16 ++ Port uint16 ++ Addr [4]byte /* in_addr */ ++ Zero [8]uint8 ++} ++ ++type RawSockaddrInet6 struct { ++ Family uint16 ++ Port uint16 ++ Flowinfo uint32 ++ Addr [16]byte /* in6_addr */ ++ Scope_id uint32 ++} ++ ++type RawSockaddrUnix struct { ++ Family uint16 ++ Path [108]int8 ++} ++ ++type RawSockaddrLinklayer struct { ++ Family uint16 ++ Protocol uint16 ++ Ifindex int32 ++ Hatype uint16 ++ Pkttype uint8 ++ Halen uint8 ++ Addr [8]uint8 ++} ++ ++type RawSockaddrNetlink struct { ++ Family uint16 ++ Pad uint16 ++ Pid uint32 ++ Groups uint32 ++} ++ ++type RawSockaddr struct { ++ Family uint16 ++ Data [14]int8 ++} ++ ++type RawSockaddrAny struct { ++ Addr RawSockaddr ++ Pad [96]int8 ++} ++ ++type _Socklen uint32 ++ ++type Linger struct { ++ Onoff int32 ++ Linger int32 ++} ++ ++type Iovec struct { ++ Base *byte ++ Len uint64 ++} ++ ++type IPMreq struct { ++ Multiaddr [4]byte /* in_addr */ ++ Interface [4]byte /* in_addr */ ++} ++ ++type IPMreqn struct { ++ Multiaddr [4]byte /* in_addr */ ++ Address [4]byte /* in_addr */ ++ Ifindex int32 ++} ++ ++type IPv6Mreq struct { ++ Multiaddr [16]byte /* in6_addr */ ++ Interface uint32 ++} ++ ++type Msghdr struct { ++ Name *byte ++ Namelen uint32 ++ Iov *Iovec ++ Iovlen uint64 ++ Control *byte ++ Controllen uint64 ++ Flags int32 ++ Pad_cgo_0 [4]byte ++} ++ ++type Cmsghdr struct { ++ Len uint64 ++ Level int32 ++ Type int32 ++} ++ ++type Inet4Pktinfo struct { ++ Ifindex int32 ++ Spec_dst [4]byte /* in_addr */ ++ Addr [4]byte /* in_addr */ ++} ++ ++type Inet6Pktinfo struct { ++ Addr [16]byte /* in6_addr */ ++ Ifindex uint32 ++} ++ ++type IPv6MTUInfo struct { ++ Addr RawSockaddrInet6 ++ Mtu uint32 ++} ++ ++type ICMPv6Filter struct { ++ Data [8]uint32 ++} ++ ++type Ucred struct { ++ Pid int32 ++ Uid uint32 ++ Gid uint32 ++} ++ ++type TCPInfo struct { ++ State uint8 ++ Ca_state uint8 ++ Retransmits uint8 ++ Probes uint8 ++ Backoff uint8 ++ Options uint8 ++ Rto uint32 ++ Ato uint32 ++ Snd_mss uint32 ++ Rcv_mss uint32 ++ Unacked uint32 ++ Sacked uint32 ++ Lost uint32 ++ Retrans uint32 ++ Fackets uint32 ++ Last_data_sent uint32 ++ Last_ack_sent uint32 ++ Last_data_recv uint32 ++ Last_ack_recv uint32 ++ Pmtu uint32 ++ Rcv_ssthresh uint32 ++ Rtt uint32 ++ Rttvar uint32 ++ Snd_ssthresh uint32 ++ Snd_cwnd uint32 ++ Advmss uint32 ++ Reordering uint32 ++ Rcv_rtt uint32 ++ Rcv_space uint32 ++ Total_retrans uint32 ++} ++ ++const ( ++ SizeofSockaddrInet4 = 0x10 ++ SizeofSockaddrInet6 = 0x1c ++ SizeofSockaddrAny = 0x70 ++ SizeofSockaddrUnix = 0x6e ++ SizeofSockaddrLinklayer = 0x14 ++ SizeofSockaddrNetlink = 0xc ++ SizeofLinger = 0x8 ++ SizeofIPMreq = 0x8 ++ SizeofIPMreqn = 0xc ++ SizeofIPv6Mreq = 0x14 ++ SizeofMsghdr = 0x38 ++ SizeofCmsghdr = 0x10 ++ SizeofInet4Pktinfo = 0xc ++ SizeofInet6Pktinfo = 0x14 ++ SizeofIPv6MTUInfo = 0x20 ++ SizeofICMPv6Filter = 0x20 ++ SizeofUcred = 0xc ++ SizeofTCPInfo = 0x68 ++) ++ ++const ( ++ IFA_UNSPEC = 0x0 ++ IFA_ADDRESS = 0x1 ++ IFA_LOCAL = 0x2 ++ IFA_LABEL = 0x3 ++ IFA_BROADCAST = 0x4 ++ IFA_ANYCAST = 0x5 ++ IFA_CACHEINFO = 0x6 ++ IFA_MULTICAST = 0x7 ++ IFLA_UNSPEC = 0x0 ++ IFLA_ADDRESS = 0x1 ++ IFLA_BROADCAST = 0x2 ++ IFLA_IFNAME = 0x3 ++ IFLA_MTU = 0x4 ++ IFLA_LINK = 0x5 ++ IFLA_QDISC = 0x6 ++ IFLA_STATS = 0x7 ++ IFLA_COST = 0x8 ++ IFLA_PRIORITY = 0x9 ++ IFLA_MASTER = 0xa ++ IFLA_WIRELESS = 0xb ++ IFLA_PROTINFO = 0xc ++ IFLA_TXQLEN = 0xd ++ IFLA_MAP = 0xe ++ IFLA_WEIGHT = 0xf ++ IFLA_OPERSTATE = 0x10 ++ IFLA_LINKMODE = 0x11 ++ IFLA_LINKINFO = 0x12 ++ IFLA_NET_NS_PID = 0x13 ++ IFLA_IFALIAS = 0x14 ++ IFLA_MAX = 0x39 ++ RT_SCOPE_UNIVERSE = 0x0 ++ RT_SCOPE_SITE = 0xc8 ++ RT_SCOPE_LINK = 0xfd ++ RT_SCOPE_HOST = 0xfe ++ RT_SCOPE_NOWHERE = 0xff ++ RT_TABLE_UNSPEC = 0x0 ++ RT_TABLE_COMPAT = 0xfc ++ RT_TABLE_DEFAULT = 0xfd ++ RT_TABLE_MAIN = 0xfe ++ RT_TABLE_LOCAL = 0xff ++ RT_TABLE_MAX = 0xffffffff ++ RTA_UNSPEC = 0x0 ++ RTA_DST = 0x1 ++ RTA_SRC = 0x2 ++ RTA_IIF = 0x3 ++ RTA_OIF = 0x4 ++ RTA_GATEWAY = 0x5 ++ RTA_PRIORITY = 0x6 ++ RTA_PREFSRC = 0x7 ++ RTA_METRICS = 0x8 ++ RTA_MULTIPATH = 0x9 ++ RTA_FLOW = 0xb ++ RTA_CACHEINFO = 0xc ++ RTA_TABLE = 0xf ++ RTN_UNSPEC = 0x0 ++ RTN_UNICAST = 0x1 ++ RTN_LOCAL = 0x2 ++ RTN_BROADCAST = 0x3 ++ RTN_ANYCAST = 0x4 ++ RTN_MULTICAST = 0x5 ++ RTN_BLACKHOLE = 0x6 ++ RTN_UNREACHABLE = 0x7 ++ RTN_PROHIBIT = 0x8 ++ RTN_THROW = 0x9 ++ RTN_NAT = 0xa ++ RTN_XRESOLVE = 0xb ++ RTNLGRP_NONE = 0x0 ++ RTNLGRP_LINK = 0x1 ++ RTNLGRP_NOTIFY = 0x2 ++ RTNLGRP_NEIGH = 0x3 ++ RTNLGRP_TC = 0x4 ++ RTNLGRP_IPV4_IFADDR = 0x5 ++ RTNLGRP_IPV4_MROUTE = 0x6 ++ RTNLGRP_IPV4_ROUTE = 0x7 ++ RTNLGRP_IPV4_RULE = 0x8 ++ RTNLGRP_IPV6_IFADDR = 0x9 ++ RTNLGRP_IPV6_MROUTE = 0xa ++ RTNLGRP_IPV6_ROUTE = 0xb ++ RTNLGRP_IPV6_IFINFO = 0xc ++ RTNLGRP_IPV6_PREFIX = 0x12 ++ RTNLGRP_IPV6_RULE = 0x13 ++ RTNLGRP_ND_USEROPT = 0x14 ++ SizeofNlMsghdr = 0x10 ++ SizeofNlMsgerr = 0x14 ++ SizeofRtGenmsg = 0x1 ++ SizeofNlAttr = 0x4 ++ SizeofRtAttr = 0x4 ++ SizeofIfInfomsg = 0x10 ++ SizeofIfAddrmsg = 0x8 ++ SizeofRtMsg = 0xc ++ SizeofRtNexthop = 0x8 ++) ++ ++type NlMsghdr struct { ++ Len uint32 ++ Type uint16 ++ Flags uint16 ++ Seq uint32 ++ Pid uint32 ++} ++ ++type NlMsgerr struct { ++ Error int32 ++ Msg NlMsghdr ++} ++ ++type RtGenmsg struct { ++ Family uint8 ++} ++ ++type NlAttr struct { ++ Len uint16 ++ Type uint16 ++} ++ ++type RtAttr struct { ++ Len uint16 ++ Type uint16 ++} ++ ++type IfInfomsg struct { ++ Family uint8 ++ X__ifi_pad uint8 ++ Type uint16 ++ Index int32 ++ Flags uint32 ++ Change uint32 ++} ++ ++type IfAddrmsg struct { ++ Family uint8 ++ Prefixlen uint8 ++ Flags uint8 ++ Scope uint8 ++ Index uint32 ++} ++ ++type RtMsg struct { ++ Family uint8 ++ Dst_len uint8 ++ Src_len uint8 ++ Tos uint8 ++ Table uint8 ++ Protocol uint8 ++ Scope uint8 ++ Type uint8 ++ Flags uint32 ++} ++ ++type RtNexthop struct { ++ Len uint16 ++ Flags uint8 ++ Hops uint8 ++ Ifindex int32 ++} ++ ++const ( ++ SizeofSockFilter = 0x8 ++ SizeofSockFprog = 0x10 ++) ++ ++type SockFilter struct { ++ Code uint16 ++ Jt uint8 ++ Jf uint8 ++ K uint32 ++} ++ ++type SockFprog struct { ++ Len uint16 ++ Filter *SockFilter ++} ++ ++type InotifyEvent struct { ++ Wd int32 ++ Mask uint32 ++ Cookie uint32 ++ Len uint32 ++} ++ ++const SizeofInotifyEvent = 0x10 ++ ++type PtraceRegs struct { ++ Regs [32]uint64 ++ Era uint64 ++ Badv uint64 ++ Reserved [11]uint64 ++} ++ ++type ptracePsw struct { ++} ++ ++type ptraceFpregs struct { ++} ++ ++type ptracePer struct { ++} ++ ++type FdSet struct { ++ Bits [16]int64 ++} ++ ++type Sysinfo_t struct { ++ Uptime int64 ++ Loads [3]uint64 ++ Totalram uint64 ++ Freeram uint64 ++ Sharedram uint64 ++ Bufferram uint64 ++ Totalswap uint64 ++ Freeswap uint64 ++ Procs uint16 ++ Pad uint16 ++ Totalhigh uint64 ++ Freehigh uint64 ++ Unit uint32 ++ X_f [0]int8 ++ Pad_cgo_0 [4]byte ++} ++ ++type Utsname struct { ++ Sysname [65]int8 ++ Nodename [65]int8 ++ Release [65]int8 ++ Version [65]int8 ++ Machine [65]int8 ++ Domainname [65]int8 ++} ++ ++type Ustat_t struct { ++ Tfree int32 ++ Tinode uint64 ++ Fname [6]int8 ++ Fpack [6]int8 ++ Pad_cgo_0 [4]byte ++} ++ ++type EpollEvent struct { ++ Events uint32 ++ _ int32 ++ Fd int32 ++ Pad int32 ++} ++ ++const ( ++ _AT_FDCWD = -0x64 ++ _AT_REMOVEDIR = 0x200 ++ _AT_SYMLINK_NOFOLLOW = 0x100 ++ _AT_EACCESS = 0x200 ++) ++ ++type pollFd struct { ++ Fd int32 ++ Events int16 ++ Revents int16 ++} ++ ++type Termios struct { ++ Iflag uint32 ++ Oflag uint32 ++ Cflag uint32 ++ Lflag uint32 ++ Line uint8 ++ Cc [32]uint8 ++ Ispeed uint32 ++ Ospeed uint32 ++} ++ ++const ( ++ IUCLC = 0x200 ++ OLCUC = 0x2 ++ TCGETS = 0x5401 ++ TCSETS = 0x5402 ++ XCASE = 0x4 ++) +-- +2.38.0 + diff --git a/loongarch64/0037-internal-syscall-unix-loong64-use-generic-syscall.patch b/loongarch64/0037-internal-syscall-unix-loong64-use-generic-syscall.patch new file mode 100644 index 0000000..4af583b --- /dev/null +++ b/loongarch64/0037-internal-syscall-unix-loong64-use-generic-syscall.patch @@ -0,0 +1,45 @@ +From 00776af6da1ca76845125eaa7c83a6197746c7ee Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 15 Nov 2021 20:59:24 +0800 +Subject: [PATCH 37/82] internal/syscall/unix: loong64 use generic syscall + +Change-Id: I5988bf3efed37b03b9193f1089dfece060ccba99 +--- + src/internal/syscall/unix/at_sysnum_fstatat_linux.go | 4 ++-- + src/internal/syscall/unix/sysnum_linux_generic.go | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/internal/syscall/unix/at_sysnum_fstatat_linux.go b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go +index e53a2d1b75..5f7ea12e01 100644 +--- a/src/internal/syscall/unix/at_sysnum_fstatat_linux.go ++++ b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build arm64 || riscv64 +-// +build arm64 riscv64 ++//go:build arm64 || riscv64 || loong64 ++// +build arm64 riscv64 loong64 + + package unix + +diff --git a/src/internal/syscall/unix/sysnum_linux_generic.go b/src/internal/syscall/unix/sysnum_linux_generic.go +index a76025454c..3ec0712573 100644 +--- a/src/internal/syscall/unix/sysnum_linux_generic.go ++++ b/src/internal/syscall/unix/sysnum_linux_generic.go +@@ -2,9 +2,9 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && (arm64 || riscv64) ++//go:build linux && (arm64 || riscv64 || loong64) + // +build linux +-// +build arm64 riscv64 ++// +build arm64 riscv64 loong64 + + package unix + +-- +2.38.0 + diff --git a/loongarch64/0038-misc-test-fix-test-error-for-loong64.patch b/loongarch64/0038-misc-test-fix-test-error-for-loong64.patch new file mode 100644 index 0000000..7cf0a03 --- /dev/null +++ b/loongarch64/0038-misc-test-fix-test-error-for-loong64.patch @@ -0,0 +1,93 @@ +From f8b046d1bced9feed15058779030b023bef8d0fa Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:13:29 +0800 +Subject: [PATCH 38/82] misc, test: fix test error for loong64 + +Change-Id: I6760b4a7e51646773cd0f48baa1baba01b213b7d +--- + .../cgo/test/testdata/issue9400/asm_loong64.s | 33 +++++++++++++++++++ + test/intrinsic_atomic.go | 2 +- + test/nosplit.go | 3 ++ + test/run.go | 1 + + 4 files changed, 38 insertions(+), 1 deletion(-) + create mode 100644 misc/cgo/test/testdata/issue9400/asm_loong64.s + +diff --git a/misc/cgo/test/testdata/issue9400/asm_loong64.s b/misc/cgo/test/testdata/issue9400/asm_loong64.s +new file mode 100644 +index 0000000000..25132fa601 +--- /dev/null ++++ b/misc/cgo/test/testdata/issue9400/asm_loong64.s +@@ -0,0 +1,33 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++// +build !gccgo ++ ++#include "textflag.h" ++ ++#define DBAR WORD $0x38720000 ++ ++TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 ++ // Rewind stack pointer so anything that happens on the stack ++ // will clobber the test pattern created by the caller ++ ADDV $(1024*8), R3 ++ ++ // Ask signaller to setgid ++ MOVW $1, R12 ++ DBAR ++ MOVW R12, ·Baton(SB) ++ DBAR ++ ++ // Wait for setgid completion ++loop: ++ DBAR ++ MOVW ·Baton(SB), R12 ++ OR R13, R13, R13 // hint that we're in a spin loop ++ BNE R12, loop ++ DBAR ++ ++ // Restore stack ++ ADDV $(-1024*8), R3 ++ RET +diff --git a/test/intrinsic_atomic.go b/test/intrinsic_atomic.go +index 61911b7a46..a1004c89d9 100644 +--- a/test/intrinsic_atomic.go ++++ b/test/intrinsic_atomic.go +@@ -1,5 +1,5 @@ + // errorcheck -0 -d=ssa/intrinsics/debug +-// +build amd64 arm64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x ++// +build amd64 arm64 loong64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x + + // Copyright 2016 The Go Authors. All rights reserved. + // Use of this source code is governed by a BSD-style +diff --git a/test/nosplit.go b/test/nosplit.go +index 7c7e1bfd99..8e1ebfffb4 100644 +--- a/test/nosplit.go ++++ b/test/nosplit.go +@@ -263,6 +263,9 @@ TestCases: + case "mips64", "mips64le": + ptrSize = 8 + fmt.Fprintf(&buf, "#define REGISTER (R0)\n") ++ case "loong64": ++ ptrSize = 8 ++ fmt.Fprintf(&buf, "#define REGISTER (R0)\n") + case "ppc64", "ppc64le": + ptrSize = 8 + fmt.Fprintf(&buf, "#define REGISTER (CTR)\n") +diff --git a/test/run.go b/test/run.go +index d7f5d02391..4b94538f6a 100644 +--- a/test/run.go ++++ b/test/run.go +@@ -1588,6 +1588,7 @@ var ( + "amd64": {}, + "arm": {"GOARM", "5", "6", "7"}, + "arm64": {}, ++ "loong64": {}, + "mips": {"GOMIPS", "hardfloat", "softfloat"}, + "mips64": {"GOMIPS64", "hardfloat", "softfloat"}, + "ppc64": {"GOPPC64", "power8", "power9"}, +-- +2.38.0 + diff --git a/loongarch64/0039-copyright-add-Loongson-into-AUTHORS.patch b/loongarch64/0039-copyright-add-Loongson-into-AUTHORS.patch new file mode 100644 index 0000000..6b4b0d3 --- /dev/null +++ b/loongarch64/0039-copyright-add-Loongson-into-AUTHORS.patch @@ -0,0 +1,25 @@ +From 400cb127f9017e9336b10c4a0149aea88ef2b750 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 14:57:00 +0800 +Subject: [PATCH 39/82] copyright: add Loongson into AUTHORS + +Change-Id: I23fb430f1f6e8a587f13e2f020721cbd3a45d4ed +--- + AUTHORS | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/AUTHORS b/AUTHORS +index 95d3158d20..c0bad0c247 100644 +--- a/AUTHORS ++++ b/AUTHORS +@@ -824,6 +824,7 @@ Liberty Fund Inc + Linaro Limited + Lion Yang + Lloyd Dewolf ++Loongson Inc. + Lorenzo Masini + Lorenzo Stoakes + Luan Santos +-- +2.38.0 + diff --git a/loongarch64/0040-api-fix-check-errors-for-loong64.patch b/loongarch64/0040-api-fix-check-errors-for-loong64.patch new file mode 100644 index 0000000..5bab29f --- /dev/null +++ b/loongarch64/0040-api-fix-check-errors-for-loong64.patch @@ -0,0 +1,126 @@ +From 49ac9125a8e3ff13ddebd5fcfc0600e2af77f462 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 23 Jun 2022 15:07:21 +0800 +Subject: [PATCH 40/82] api: fix check errors for loong64. + +Change-Id: I36775fadfb0a538136b119fe0350dcbb536d5ec4 +--- + api/go1.17.txt | 105 +++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 105 insertions(+) + +diff --git a/api/go1.17.txt b/api/go1.17.txt +index 48505381f1..61b38c3343 100644 +--- a/api/go1.17.txt ++++ b/api/go1.17.txt +@@ -193,3 +193,108 @@ pkg time, method (Time) GoString() string + pkg time, method (Time) IsDST() bool + pkg time, method (Time) UnixMicro() int64 + pkg time, method (Time) UnixMilli() int64 ++pkg debug/elf, const EM_LOONGARCH = 258 ++pkg debug/elf, const EM_LOONGARCH Machine ++pkg debug/elf, const R_LARCH_32 = 1 ++pkg debug/elf, const R_LARCH_32 R_LARCH ++pkg debug/elf, const R_LARCH_64 = 2 ++pkg debug/elf, const R_LARCH_64 R_LARCH ++pkg debug/elf, const R_LARCH_ADD16 = 48 ++pkg debug/elf, const R_LARCH_ADD16 R_LARCH ++pkg debug/elf, const R_LARCH_ADD24 = 49 ++pkg debug/elf, const R_LARCH_ADD24 R_LARCH ++pkg debug/elf, const R_LARCH_ADD32 = 50 ++pkg debug/elf, const R_LARCH_ADD32 R_LARCH ++pkg debug/elf, const R_LARCH_ADD64 = 51 ++pkg debug/elf, const R_LARCH_ADD64 R_LARCH ++pkg debug/elf, const R_LARCH_ADD8 = 47 ++pkg debug/elf, const R_LARCH_ADD8 R_LARCH ++pkg debug/elf, const R_LARCH_COPY = 4 ++pkg debug/elf, const R_LARCH_COPY R_LARCH ++pkg debug/elf, const R_LARCH_IRELATIVE = 12 ++pkg debug/elf, const R_LARCH_IRELATIVE R_LARCH ++pkg debug/elf, const R_LARCH_JUMP_SLOT = 5 ++pkg debug/elf, const R_LARCH_JUMP_SLOT R_LARCH ++pkg debug/elf, const R_LARCH_MARK_LA = 20 ++pkg debug/elf, const R_LARCH_MARK_LA R_LARCH ++pkg debug/elf, const R_LARCH_MARK_PCREL = 21 ++pkg debug/elf, const R_LARCH_MARK_PCREL R_LARCH ++pkg debug/elf, const R_LARCH_NONE = 0 ++pkg debug/elf, const R_LARCH_NONE R_LARCH ++pkg debug/elf, const R_LARCH_RELATIVE = 3 ++pkg debug/elf, const R_LARCH_RELATIVE R_LARCH ++pkg debug/elf, const R_LARCH_SOP_ADD = 35 ++pkg debug/elf, const R_LARCH_SOP_ADD R_LARCH ++pkg debug/elf, const R_LARCH_SOP_AND = 36 ++pkg debug/elf, const R_LARCH_SOP_AND R_LARCH ++pkg debug/elf, const R_LARCH_SOP_ASSERT = 30 ++pkg debug/elf, const R_LARCH_SOP_ASSERT R_LARCH ++pkg debug/elf, const R_LARCH_SOP_IF_ELSE = 37 ++pkg debug/elf, const R_LARCH_SOP_IF_ELSE R_LARCH ++pkg debug/elf, const R_LARCH_SOP_NOT = 31 ++pkg debug/elf, const R_LARCH_SOP_NOT R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 = 45 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 = 44 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 = 40 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 = 41 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 = 42 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 = 38 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 = 43 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_U = 46 ++pkg debug/elf, const R_LARCH_SOP_POP_32_U R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 = 39 ++pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE = 23 ++pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_DUP = 24 ++pkg debug/elf, const R_LARCH_SOP_PUSH_DUP R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL = 25 ++pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL = 22 ++pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL = 29 ++pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD = 28 ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT = 27 ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL = 26 ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_SL = 33 ++pkg debug/elf, const R_LARCH_SOP_SL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_SR = 34 ++pkg debug/elf, const R_LARCH_SOP_SR R_LARCH ++pkg debug/elf, const R_LARCH_SOP_SUB = 32 ++pkg debug/elf, const R_LARCH_SOP_SUB R_LARCH ++pkg debug/elf, const R_LARCH_SUB16 = 53 ++pkg debug/elf, const R_LARCH_SUB16 R_LARCH ++pkg debug/elf, const R_LARCH_SUB24 = 54 ++pkg debug/elf, const R_LARCH_SUB24 R_LARCH ++pkg debug/elf, const R_LARCH_SUB32 = 55 ++pkg debug/elf, const R_LARCH_SUB32 R_LARCH ++pkg debug/elf, const R_LARCH_SUB64 = 56 ++pkg debug/elf, const R_LARCH_SUB64 R_LARCH ++pkg debug/elf, const R_LARCH_SUB8 = 52 ++pkg debug/elf, const R_LARCH_SUB8 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPMOD32 = 6 ++pkg debug/elf, const R_LARCH_TLS_DTPMOD32 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPMOD64 = 7 ++pkg debug/elf, const R_LARCH_TLS_DTPMOD64 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPREL32 = 8 ++pkg debug/elf, const R_LARCH_TLS_DTPREL32 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPREL64 = 9 ++pkg debug/elf, const R_LARCH_TLS_DTPREL64 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_TPREL32 = 10 ++pkg debug/elf, const R_LARCH_TLS_TPREL32 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_TPREL64 = 11 ++pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH ++pkg debug/elf, method (R_LARCH) GoString() string ++pkg debug/elf, method (R_LARCH) String() string ++pkg debug/elf, type R_LARCH int +-- +2.38.0 + diff --git a/loongarch64/0041-runtime-fixed-func-breakpoint-implementation-on-loon.patch b/loongarch64/0041-runtime-fixed-func-breakpoint-implementation-on-loon.patch new file mode 100644 index 0000000..17d326d --- /dev/null +++ b/loongarch64/0041-runtime-fixed-func-breakpoint-implementation-on-loon.patch @@ -0,0 +1,30 @@ +From a97e788c90c32fc3fdb3d6a74db016390ffbac09 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 24 Mar 2022 21:11:16 +0800 +Subject: [PATCH 41/82] runtime: fixed func breakpoint implementation on + loong64. + +use the break instruction to implement the function of the func breakpoint. + +Signed-off-by: Guoqi Chen +Change-Id: Iad0274d110f39f0ca2b330df103fd73f8ebbb358 +--- + src/runtime/asm_loong64.s | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index cfc270f28b..9541f6e045 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -79,7 +79,7 @@ DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) + GLOBL runtime·mainPC(SB),RODATA,$8 + + TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 +- MOVV R0, 2(R0) // TODO: TD ++ BREAK + RET + + TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 +-- +2.38.0 + diff --git a/loongarch64/0042-update-vendor-golang.org-x-sys-for-byteorder-fix.patch b/loongarch64/0042-update-vendor-golang.org-x-sys-for-byteorder-fix.patch new file mode 100644 index 0000000..1de35e8 --- /dev/null +++ b/loongarch64/0042-update-vendor-golang.org-x-sys-for-byteorder-fix.patch @@ -0,0 +1,26 @@ +From bd158c025686e5c022ef75d68ecea3f8e1fcf619 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Tue, 26 Apr 2022 17:43:15 +0800 +Subject: [PATCH 42/82] update vendor golang.org/x/sys for byteorder fix. + +Signed-off-by: Guoqi Chen +Change-Id: I3a192b1af2e254d7c0e4edce64ccf5460e1f1168 +--- + src/vendor/golang.org/x/sys/cpu/byteorder.go | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/vendor/golang.org/x/sys/cpu/byteorder.go b/src/vendor/golang.org/x/sys/cpu/byteorder.go +index dcbb14ef35..271055be0b 100644 +--- a/src/vendor/golang.org/x/sys/cpu/byteorder.go ++++ b/src/vendor/golang.org/x/sys/cpu/byteorder.go +@@ -46,6 +46,7 @@ func hostByteOrder() byteOrder { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", ++ "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", +-- +2.38.0 + diff --git a/loongarch64/0043-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch b/loongarch64/0043-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch new file mode 100644 index 0000000..793f9b1 --- /dev/null +++ b/loongarch64/0043-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch @@ -0,0 +1,68 @@ +From f336152c77dbebdfa546ab2353d14908ae427f99 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 11 May 2022 07:49:35 +0800 +Subject: [PATCH 43/82] cmd/compile: remove atomic Cas Xchg and Xadd intrinsics + on loong64 + +Change-Id: Id182b0e39845d55668a92b252a36cae6b83bb018 +--- + src/cmd/compile/internal/ssagen/ssa.go | 6 +++--- + test/inline_sync.go | 2 +- + test/intrinsic_atomic.go | 2 +- + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go +index f61f1c2e2d..7ddec846ff 100644 +--- a/src/cmd/compile/internal/ssagen/ssa.go ++++ b/src/cmd/compile/internal/ssagen/ssa.go +@@ -3945,7 +3945,7 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xchg64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) +@@ -4010,7 +4010,7 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xadd64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) +@@ -4032,7 +4032,7 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, +- sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Cas64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) +diff --git a/test/inline_sync.go b/test/inline_sync.go +index 30b436af41..de3934359d 100644 +--- a/test/inline_sync.go ++++ b/test/inline_sync.go +@@ -1,4 +1,4 @@ +-// +build !nacl,!386,!wasm,!arm,!gcflags_noopt ++// +build !nacl,!386,!wasm,!arm,!gcflags_noopt,!loong64 + // errorcheck -0 -m + + // Copyright 2019 The Go Authors. All rights reserved. +diff --git a/test/intrinsic_atomic.go b/test/intrinsic_atomic.go +index a1004c89d9..61911b7a46 100644 +--- a/test/intrinsic_atomic.go ++++ b/test/intrinsic_atomic.go +@@ -1,5 +1,5 @@ + // errorcheck -0 -d=ssa/intrinsics/debug +-// +build amd64 arm64 loong64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x ++// +build amd64 arm64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x + + // Copyright 2016 The Go Authors. All rights reserved. + // Use of this source code is governed by a BSD-style +-- +2.38.0 + diff --git a/loongarch64/0044-runtime-fix-asyncPreempt-implementation-for-errors-o.patch b/loongarch64/0044-runtime-fix-asyncPreempt-implementation-for-errors-o.patch new file mode 100644 index 0000000..ba813ce --- /dev/null +++ b/loongarch64/0044-runtime-fix-asyncPreempt-implementation-for-errors-o.patch @@ -0,0 +1,101 @@ +From b3686dfc9bdef9f12694f8c3156d55de9f47a2cb Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 19 May 2022 10:26:36 +0800 +Subject: [PATCH 44/82] runtime: fix asyncPreempt implementation for errors on + loong64 + +Change-Id: If93ff18167adec2783503cf428e07da121b1c918 +--- + src/runtime/mkpreempt.go | 28 ++++++++++------------------ + src/runtime/preempt.go | 2 +- + 2 files changed, 11 insertions(+), 19 deletions(-) + +diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go +index 1bad20d60b..7f74d54289 100644 +--- a/src/runtime/mkpreempt.go ++++ b/src/runtime/mkpreempt.go +@@ -81,7 +81,7 @@ var arches = map[string]func(){ + "amd64": genAMD64, + "arm": genARM, + "arm64": genARM64, +- "loong64": genLOONG64, ++ "loong64": genLoong64, + "mips64x": func() { genMIPS(true) }, + "mipsx": func() { genMIPS(false) }, + "ppc64x": genPPC64, +@@ -450,7 +450,7 @@ func genMIPS(_64bit bool) { + p("JMP (R23)") + } + +-func genLOONG64() { ++func genLoong64() { + mov := "MOVV" + movf := "MOVD" + add := "ADDV" +@@ -459,42 +459,34 @@ func genLOONG64() { + regsize := 8 + + // Add integer registers r4-r21 r23-r29 r31 +- // R0 (zero), R30 (REGTMP), R2(tp),R3 (SP), R22 (g), R1 (LR) are special, ++ // R0 (zero), R30 (REGTMP), R2 (tp), R3 (SP), R22 (g), R1 (LR) are special, + var l = layout{sp: "R3", stack: regsize} // add slot to save PC of interrupted instruction (in LR) + for i := 4; i <= 29; i++ { + if i == 22 { +- continue //R3 is REGSP R22 is g ++ continue // R3 is REGSP R22 is g + } + reg := fmt.Sprintf("R%d", i) + l.add(mov, reg, regsize) + } + l.add(mov, r31, regsize) + +- // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant) +- var lfp = layout{sp: "R3", stack: l.stack} +- // lfp.addSpecial( +- // mov+" FCR31, R1\n"+mov+" R1, %d(R29)", +- // mov+" %d(R29), R1\n"+mov+" R1, FCR31", +- // regsize) + // Add floating point registers F0-F31. + for i := 0; i <= 31; i++ { + reg := fmt.Sprintf("F%d", i) +- lfp.add(movf, reg, regsize) ++ l.add(movf, reg, regsize) + } + + // allocate frame, save PC of interrupted instruction (in LR) +- p(mov+" R1, -%d(R3)", lfp.stack) +- p(sub+" $%d, R3", lfp.stack) ++ p(mov+" R1, -%d(R3)", l.stack) ++ p(sub+" $%d, R3", l.stack) + + l.save() +- lfp.save() + p("CALL ·asyncPreempt2(SB)") +- lfp.restore() + l.restore() + +- p(mov+" %d(R3), R1", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it +- p(mov + " (R3), R30") // load PC to REGTMP +- p(add+" $%d, R3", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) ++ p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it ++ p(mov + " (R3), R30") // load PC to REGTMP ++ p(add+" $%d, R3", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) + p("JMP (R30)") + } + +diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go +index 92a05d227d..1d5aae1363 100644 +--- a/src/runtime/preempt.go ++++ b/src/runtime/preempt.go +@@ -386,7 +386,7 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) { + // Not Go code. + return false, 0 + } +- if (GOARCH == "loong64" || GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { ++ if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { + // We probably stopped at a half-executed CALL instruction, + // where the LR is updated but the PC has not. If we preempt + // here we'll see a seemingly self-recursive call, which is in +-- +2.38.0 + diff --git a/loongarch64/0045-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch b/loongarch64/0045-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch new file mode 100644 index 0000000..b04dde7 --- /dev/null +++ b/loongarch64/0045-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch @@ -0,0 +1,50 @@ +From 6a802e230b9ada07bba0a0022dd35b8c86a1c1b4 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 23 May 2022 08:32:35 +0800 +Subject: [PATCH 45/82] cmd/internal/obj: add FuncInfo SPWRITE flag for + linux/loong64 + +Co-authored-by: limeidan +Change-Id: Iac4f706fc9ff1047f1fa02f7178ebfbe38ad1ec3 +--- + src/cmd/internal/obj/loong64/obj.go | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go +index 36036e5cd5..abfe67dcbc 100644 +--- a/src/cmd/internal/obj/loong64/obj.go ++++ b/src/cmd/internal/obj/loong64/obj.go +@@ -8,7 +8,7 @@ import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" +- ++ "log" + "math" + ) + +@@ -460,6 +460,21 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + p.From.Reg = REGSP + } + } ++ ++ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 { ++ f := c.cursym.Func() ++ if f.FuncFlag&objabi.FuncFlag_SPWRITE == 0 { ++ c.cursym.Func().FuncFlag |= objabi.FuncFlag_SPWRITE ++ if ctxt.Debugvlog || !ctxt.IsAsm { ++ ctxt.Logf("auto-SPWRITE: %s %v\n", c.cursym.Name, p) ++ if !ctxt.IsAsm { ++ ctxt.Diag("invalid auto-SPWRITE in non-assembly") ++ ctxt.DiagFlush() ++ log.Fatalf("bad SPWRITE") ++ } ++ } ++ } ++ } + } + } + +-- +2.38.0 + diff --git a/loongarch64/0046-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch b/loongarch64/0046-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch new file mode 100644 index 0000000..61b593c --- /dev/null +++ b/loongarch64/0046-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch @@ -0,0 +1,48 @@ +From 88308741850cb8d2deaf74b825439aef7e8bd101 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 23 May 2022 08:58:51 +0800 +Subject: [PATCH 46/82] runtime: add missing {TOPFRAME,NOFRAME} flag for + linux/loong64 + +Co-authored-by: limeidan +Change-Id: I0011a10f831e6c2b0da96265682212b0747f0e2a +--- + src/runtime/asm_loong64.s | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 9541f6e045..85df3ed3c9 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -11,7 +11,7 @@ + + #define REGCTXT R29 + +-TEXT runtime·rt0_go(SB),NOSPLIT,$0 ++TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // R3 = stack; R4 = argc; R5 = argv + + ADDV $-24, R3 +@@ -127,12 +127,16 @@ TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 + + // void gogo(Gobuf*) + // restore state from Gobuf; longjmp +-TEXT runtime·gogo(SB), NOSPLIT, $16-8 ++TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOVV buf+0(FP), R4 +- MOVV gobuf_g(R4), g // make sure g is not nil ++ MOVV gobuf_g(R4), R5 ++ MOVV 0(R5), R0 // make sure g != nil ++ JMP gogo<>(SB) ++ ++TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 ++ MOVV R5, g + JAL runtime·save_g(SB) + +- MOVV 0(g), R5 + MOVV gobuf_sp(R4), R3 + MOVV gobuf_lr(R4), R1 + MOVV gobuf_ret(R4), R19 +-- +2.38.0 + diff --git a/loongarch64/0047-cmd-compile-fix-loong64-constant-folding-in-division.patch b/loongarch64/0047-cmd-compile-fix-loong64-constant-folding-in-division.patch new file mode 100644 index 0000000..85a507d --- /dev/null +++ b/loongarch64/0047-cmd-compile-fix-loong64-constant-folding-in-division.patch @@ -0,0 +1,148 @@ +From aa6458665064ed642010cf34948310ac31139284 Mon Sep 17 00:00:00 2001 +From: Cuong Manh Le +Date: Sat, 21 May 2022 19:00:18 +0700 +Subject: [PATCH 47/82] cmd/compile: fix loong64 constant folding in division + rules + +The divisor must be non-zero for the rule to be triggered. + +Fixes #53018 + +Change-Id: Id56b8d986945bbb66e13131d11264ee438de5cb2 +--- + .../compile/internal/ssa/gen/LOONG64.rules | 8 ++--- + .../compile/internal/ssa/rewriteLOONG64.go | 16 ++++++++++ + test/fixedbugs/issue53018.go | 30 +++++++++++++++++++ + 3 files changed, 50 insertions(+), 4 deletions(-) + create mode 100644 test/fixedbugs/issue53018.go + +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +index 3fd4552aa4..4237aea16f 100644 +--- a/src/cmd/compile/internal/ssa/gen/LOONG64.rules ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -617,10 +617,10 @@ + (SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))]) + (SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) + (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) +-(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c/d]) +-(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)/uint64(d))]) +-(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c%d]) // mod +-(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod ++(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d]) ++(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))]) ++(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod ++(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod + (ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) + (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) + (ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +index 463a0458a3..e0f0df03e8 100644 +--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -6825,6 +6825,7 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + return true + } + // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [c%d]) + for { + if v_0.Op != OpLOONG64DIVV { +@@ -6841,11 +6842,15 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c % d) + return true + } + // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)%uint64(d))]) + for { + if v_0.Op != OpLOONG64DIVVU { +@@ -6862,6 +6867,9 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) + return true +@@ -7018,6 +7026,7 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + return true + } + // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [c/d]) + for { + if v_0.Op != OpLOONG64DIVV { +@@ -7034,11 +7043,15 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c / d) + return true + } + // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)/uint64(d))]) + for { + if v_0.Op != OpLOONG64DIVVU { +@@ -7055,6 +7068,9 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) + return true +diff --git a/test/fixedbugs/issue53018.go b/test/fixedbugs/issue53018.go +new file mode 100644 +index 0000000000..439d9d58c1 +--- /dev/null ++++ b/test/fixedbugs/issue53018.go +@@ -0,0 +1,30 @@ ++// compile ++ ++// Copyright 2022 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package p ++ ++var V []int ++ ++func f(i int, c chan int) int { ++ arr := []int{0, 1} ++ for range c { ++ for a2 := range arr { ++ var a []int ++ V = V[:1/a2] ++ a[i] = 0 ++ } ++ return func() int { ++ arr = []int{} ++ return func() int { ++ return func() int { ++ return func() int { return 4 }() ++ }() ++ }() ++ }() ++ } ++ ++ return 0 ++} +-- +2.38.0 + diff --git a/loongarch64/0048-runtime-fix-the-vDSO-symbol-version-on-loong64.patch b/loongarch64/0048-runtime-fix-the-vDSO-symbol-version-on-loong64.patch new file mode 100644 index 0000000..220cedf --- /dev/null +++ b/loongarch64/0048-runtime-fix-the-vDSO-symbol-version-on-loong64.patch @@ -0,0 +1,39 @@ +From e6f984121b5f737e2897f7c1fc439d5aeac493b7 Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Mon, 23 May 2022 11:02:57 +0800 +Subject: [PATCH 48/82] runtime: fix the vDSO symbol version on loong64 + +The current value is appropriate for an early in-house version of +Linux/LoongArch, but for the upstream version it is very likely +"LINUX_5.10" instead, per the latest upstream submission [1]. + +[1]: https://lore.kernel.org/all/20220518095709.1313120-3-chenhuacai@loongson.cn/ + +Change-Id: Ia97e5cae82a5b306bd3eea86b9e442441da07973 +--- + src/runtime/vdso_linux_loong64.go | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/runtime/vdso_linux_loong64.go b/src/runtime/vdso_linux_loong64.go +index e8afdd4f1e..00cdb17c47 100644 +--- a/src/runtime/vdso_linux_loong64.go ++++ b/src/runtime/vdso_linux_loong64.go +@@ -13,11 +13,11 @@ const ( + vdsoArrayMax = 1<<50 - 1 + ) + +-// see man 7 vdso : loong64 +-var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6", 0x3ae75f6} ++// not currently described in manpages as of May 2022, but will eventually ++// appear ++// when that happens, see man 7 vdso : loongarch ++var vdsoLinuxVersion = vdsoVersionKey{"LINUX_5.10", 0xae78f70} + +-// The symbol name is not __kernel_clock_gettime as suggested by the manpage; +-// according to Linux source code it should be __vdso_clock_gettime instead. + var vdsoSymbolKeys = []vdsoSymbolKey{ + {"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym}, + } +-- +2.38.0 + diff --git a/loongarch64/0049-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch b/loongarch64/0049-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch new file mode 100644 index 0000000..dc06fa0 --- /dev/null +++ b/loongarch64/0049-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch @@ -0,0 +1,36 @@ +From ddba319534b47fcede9a4f2255d8dc1fc0e002ea Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 26 May 2022 19:38:02 +0800 +Subject: [PATCH 49/82] internal/cpu: fix cpu cacheLineSize for loong64 + +We choose 64 because the L1 Dcache of Loongson 3A5000 CPU is +4-way 256-line 64-byte-per-line. + +Change-Id: Ifb9a9f993dd6f75b5adb4ff6e4d93e945b1b2a98 +Reviewed-on: https://go-review.googlesource.com/c/go/+/408854 +Run-TryBot: Ian Lance Taylor +TryBot-Result: Gopher Robot +Auto-Submit: Ian Lance Taylor +Reviewed-by: Alex Rakoczy +Reviewed-by: Ian Lance Taylor +--- + src/internal/cpu/cpu_loong64.go | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/src/internal/cpu/cpu_loong64.go b/src/internal/cpu/cpu_loong64.go +index d0ff93455f..7e7701db40 100644 +--- a/src/internal/cpu/cpu_loong64.go ++++ b/src/internal/cpu/cpu_loong64.go +@@ -7,6 +7,8 @@ + + package cpu + +-const CacheLinePadSize = 32 ++// CacheLinePadSize is used to prevent false sharing of cache lines. ++// We choose 64 because Loongson 3A5000 the L1 Dcache is 4-way 256-line 64-byte-per-line. ++const CacheLinePadSize = 64 + + func doinit() {} +-- +2.38.0 + diff --git a/loongarch64/0050-syscall-runtime-internal-syscall-always-zero-the-hig.patch b/loongarch64/0050-syscall-runtime-internal-syscall-always-zero-the-hig.patch new file mode 100644 index 0000000..da7e0a0 --- /dev/null +++ b/loongarch64/0050-syscall-runtime-internal-syscall-always-zero-the-hig.patch @@ -0,0 +1,68 @@ +From 21152b907239ea1e2d19598d07d0c9c49c0072d6 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 10 Jun 2022 19:08:14 +0800 +Subject: [PATCH 50/82] syscall, runtime/internal/syscall: always zero the + higher bits of return value on linux/loong64 + +All loong64 syscalls return values only via R4/A0, and R5/A1 may contain unrelated +content. Always zero the second return value. + +Change-Id: I62af59369bece5bd8028b937c74f4694150f7a55 +Reviewed-on: https://go-review.googlesource.com/c/go/+/411615 +Run-TryBot: Ian Lance Taylor +TryBot-Result: Gopher Robot +Auto-Submit: Ian Lance Taylor +Reviewed-by: Ian Lance Taylor +Reviewed-by: Austin Clements +--- + src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s | 4 ++-- + src/runtime/internal/syscall/asm_linux_loong64.s | 2 +- + src/syscall/asm_linux_loong64.s | 2 +- + 3 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +index 1ccfa5ded5..dccf8ac85f 100644 +--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s ++++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +@@ -30,7 +30,7 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) +- MOVV R5, r2+40(FP) ++ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + JAL runtime·exitsyscall(SB) + RET + +@@ -50,5 +50,5 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) +- MOVV R5, r2+40(FP) ++ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + RET +diff --git a/src/runtime/internal/syscall/asm_linux_loong64.s b/src/runtime/internal/syscall/asm_linux_loong64.s +index 39bf5b1465..ccddeee356 100644 +--- a/src/runtime/internal/syscall/asm_linux_loong64.s ++++ b/src/runtime/internal/syscall/asm_linux_loong64.s +@@ -24,6 +24,6 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-80 + RET + ok: + MOVV R4, r1+56(FP) // r1 +- MOVV R5, r2+64(FP) // r2 ++ MOVV R0, r2+64(FP) // r2 is not used. Always set to 0. + MOVV R0, err+72(FP) // errno + RET +diff --git a/src/syscall/asm_linux_loong64.s b/src/syscall/asm_linux_loong64.s +index 09f3f9773f..0ec9b3dd23 100644 +--- a/src/syscall/asm_linux_loong64.s ++++ b/src/syscall/asm_linux_loong64.s +@@ -143,5 +143,5 @@ TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) +- MOVV R5, r2+40(FP) ++ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0. + RET +-- +2.38.0 + diff --git a/loongarch64/0051-runtime-clean-up-unused-function-gosave-on-loong64.patch b/loongarch64/0051-runtime-clean-up-unused-function-gosave-on-loong64.patch new file mode 100644 index 0000000..ecf9ed9 --- /dev/null +++ b/loongarch64/0051-runtime-clean-up-unused-function-gosave-on-loong64.patch @@ -0,0 +1,39 @@ +From c74571972ad2d0cbb51ed2d1b4a494bf5e41a149 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 30 May 2022 18:44:57 +0800 +Subject: [PATCH 51/82] runtime: clean up unused function gosave on loong64 + +Change-Id: I28960a33d251a36e5e364fa6e27c5b2e13349f6b +--- + src/runtime/asm_loong64.s | 15 --------------- + 1 file changed, 15 deletions(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 85df3ed3c9..0cb1c412ef 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -110,21 +110,6 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 + * go-routine + */ + +-// void gosave(Gobuf*) +-// save state in Gobuf; setjmp +-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 +- MOVV buf+0(FP), R19 +- MOVV R3, gobuf_sp(R19) +- MOVV R1, gobuf_pc(R19) +- MOVV g, gobuf_g(R19) +- MOVV R0, gobuf_lr(R19) +- MOVV R0, gobuf_ret(R19) +- // Assert ctxt is zero. See func save. +- MOVV gobuf_ctxt(R19), R19 +- BEQ R19, 2(PC) +- JAL runtime·badctxt(SB) +- RET +- + // void gogo(Gobuf*) + // restore state from Gobuf; longjmp + TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 +-- +2.38.0 + diff --git a/loongarch64/0052-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch b/loongarch64/0052-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch new file mode 100644 index 0000000..ae261f7 --- /dev/null +++ b/loongarch64/0052-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch @@ -0,0 +1,85 @@ +From 145772f832299e95702917ae898ca875669b95d0 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 27 Jun 2022 10:12:53 +0800 +Subject: [PATCH 52/82] debug/pe: add IMAGE_FILE_MACHINE_LOONGARCH{64,32} + +Related: https://github.com/MicrosoftDocs/win32/pull/1067 + +Change-Id: I946253f217a5c616ae4a19be44634000cba5020e +--- + api/go1.17.txt | 4 ++++ + src/debug/pe/pe.go | 46 ++++++++++++++++++++++++---------------------- + 2 files changed, 28 insertions(+), 22 deletions(-) + +diff --git a/api/go1.17.txt b/api/go1.17.txt +index 61b38c3343..0e7fe3c09f 100644 +--- a/api/go1.17.txt ++++ b/api/go1.17.txt +@@ -298,3 +298,7 @@ pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH + pkg debug/elf, method (R_LARCH) GoString() string + pkg debug/elf, method (R_LARCH) String() string + pkg debug/elf, type R_LARCH int ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH32 = 25138 ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH32 ideal-int ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH64 = 25188 ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH64 ideal-int +diff --git a/src/debug/pe/pe.go b/src/debug/pe/pe.go +index db112fe7ad..9d55c40f9c 100644 +--- a/src/debug/pe/pe.go ++++ b/src/debug/pe/pe.go +@@ -87,28 +87,30 @@ type OptionalHeader64 struct { + } + + const ( +- IMAGE_FILE_MACHINE_UNKNOWN = 0x0 +- IMAGE_FILE_MACHINE_AM33 = 0x1d3 +- IMAGE_FILE_MACHINE_AMD64 = 0x8664 +- IMAGE_FILE_MACHINE_ARM = 0x1c0 +- IMAGE_FILE_MACHINE_ARMNT = 0x1c4 +- IMAGE_FILE_MACHINE_ARM64 = 0xaa64 +- IMAGE_FILE_MACHINE_EBC = 0xebc +- IMAGE_FILE_MACHINE_I386 = 0x14c +- IMAGE_FILE_MACHINE_IA64 = 0x200 +- IMAGE_FILE_MACHINE_M32R = 0x9041 +- IMAGE_FILE_MACHINE_MIPS16 = 0x266 +- IMAGE_FILE_MACHINE_MIPSFPU = 0x366 +- IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466 +- IMAGE_FILE_MACHINE_POWERPC = 0x1f0 +- IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1 +- IMAGE_FILE_MACHINE_R4000 = 0x166 +- IMAGE_FILE_MACHINE_SH3 = 0x1a2 +- IMAGE_FILE_MACHINE_SH3DSP = 0x1a3 +- IMAGE_FILE_MACHINE_SH4 = 0x1a6 +- IMAGE_FILE_MACHINE_SH5 = 0x1a8 +- IMAGE_FILE_MACHINE_THUMB = 0x1c2 +- IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169 ++ IMAGE_FILE_MACHINE_UNKNOWN = 0x0 ++ IMAGE_FILE_MACHINE_AM33 = 0x1d3 ++ IMAGE_FILE_MACHINE_AMD64 = 0x8664 ++ IMAGE_FILE_MACHINE_ARM = 0x1c0 ++ IMAGE_FILE_MACHINE_ARMNT = 0x1c4 ++ IMAGE_FILE_MACHINE_ARM64 = 0xaa64 ++ IMAGE_FILE_MACHINE_EBC = 0xebc ++ IMAGE_FILE_MACHINE_I386 = 0x14c ++ IMAGE_FILE_MACHINE_IA64 = 0x200 ++ IMAGE_FILE_MACHINE_LOONGARCH32 = 0x6232 ++ IMAGE_FILE_MACHINE_LOONGARCH64 = 0x6264 ++ IMAGE_FILE_MACHINE_M32R = 0x9041 ++ IMAGE_FILE_MACHINE_MIPS16 = 0x266 ++ IMAGE_FILE_MACHINE_MIPSFPU = 0x366 ++ IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466 ++ IMAGE_FILE_MACHINE_POWERPC = 0x1f0 ++ IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1 ++ IMAGE_FILE_MACHINE_R4000 = 0x166 ++ IMAGE_FILE_MACHINE_SH3 = 0x1a2 ++ IMAGE_FILE_MACHINE_SH3DSP = 0x1a3 ++ IMAGE_FILE_MACHINE_SH4 = 0x1a6 ++ IMAGE_FILE_MACHINE_SH5 = 0x1a8 ++ IMAGE_FILE_MACHINE_THUMB = 0x1c2 ++ IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169 + ) + + // IMAGE_DIRECTORY_ENTRY constants +-- +2.38.0 + diff --git a/loongarch64/0053-runtime-delete-useless-constant-definitions-SiginfoM.patch b/loongarch64/0053-runtime-delete-useless-constant-definitions-SiginfoM.patch new file mode 100644 index 0000000..e633707 --- /dev/null +++ b/loongarch64/0053-runtime-delete-useless-constant-definitions-SiginfoM.patch @@ -0,0 +1,29 @@ +From 975d42f267ccda563c34d2c19584e21194d7fc89 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 30 Jun 2022 10:17:21 +0800 +Subject: [PATCH 53/82] runtime: delete useless constant definitions + SiginfoMaxSize and SigeventMaxSize + +Change-Id: I082668bb428148fc192caeb02d551741d926e1b9 +(cherry picked from commit 7d97849d56a19040dc38c24acec404c7e2463a6b) +--- + src/runtime/export_linux_test.go | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/src/runtime/export_linux_test.go b/src/runtime/export_linux_test.go +index 057f4b6918..b7c901f238 100644 +--- a/src/runtime/export_linux_test.go ++++ b/src/runtime/export_linux_test.go +@@ -8,9 +8,6 @@ package runtime + + import "unsafe" + +-const SiginfoMaxSize = _si_max_size +-const SigeventMaxSize = _sigev_max_size +- + var NewOSProc0 = newosproc0 + var Mincore = mincore + var Add = add +-- +2.38.0 + diff --git a/loongarch64/0054-cmd-compile-remove-the-resultInArg0-register-checks-.patch b/loongarch64/0054-cmd-compile-remove-the-resultInArg0-register-checks-.patch new file mode 100644 index 0000000..e73e8b2 --- /dev/null +++ b/loongarch64/0054-cmd-compile-remove-the-resultInArg0-register-checks-.patch @@ -0,0 +1,31 @@ +From 4eb5244e29722e1f12dea31cfc5719b6db865cee Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 4 Jul 2022 10:16:12 +0800 +Subject: [PATCH 54/82] cmd/compile: remove the resultInArg0 register checks on + linux/loong64 + +The automatic resultInArg0 register check has been implemented by CL 296010. + +Change-Id: Id0e085ded9aa097bd02593c359a750d938a3aaa6 +(cherry picked from commit 21c62284f81da9fa544f1b519706280f084e1410) +--- + src/cmd/compile/internal/loong64/ssa.go | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go +index 4f3aa6858f..2478963643 100644 +--- a/src/cmd/compile/internal/loong64/ssa.go ++++ b/src/cmd/compile/internal/loong64/ssa.go +@@ -101,9 +101,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { + p.To.Type = obj.TYPE_REG + p.To.Reg = y + case ssa.OpLOONG64MOVVnop: +- if v.Reg() != v.Args[0].Reg() { +- v.Fatalf("input[0] and output not in same register %s", v.LongString()) +- } + // nothing to do + case ssa.OpLoadReg: + if v.Type.IsFlags() { +-- +2.38.0 + diff --git a/loongarch64/0055-runtime-remove-the-fake-mstart-caller-in-systemstack.patch b/loongarch64/0055-runtime-remove-the-fake-mstart-caller-in-systemstack.patch new file mode 100644 index 0000000..b16c0df --- /dev/null +++ b/loongarch64/0055-runtime-remove-the-fake-mstart-caller-in-systemstack.patch @@ -0,0 +1,39 @@ +From 4fec6e075b13ba02f3be102ee6b081843e9738ed Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 4 Jul 2022 14:29:52 +0800 +Subject: [PATCH 55/82] runtime: remove the fake mstart caller in systemstack + on linux/loong64 + +ref. CL 288799 + +Change-Id: I0841e75fd515cf6a0d98abe4cffc3f63fc275e0e +--- + src/runtime/asm_loong64.s | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 0cb1c412ef..90d36a3e01 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -142,7 +142,6 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 + MOVV R3, (g_sched+gobuf_sp)(g) + MOVV R1, (g_sched+gobuf_pc)(g) + MOVV R0, (g_sched+gobuf_lr)(g) +- MOVV g, (g_sched+gobuf_g)(g) + + // Switch to m->g0 & its stack, call fn. + MOVV g, R19 +@@ -200,10 +199,6 @@ switch: + MOVV R5, g + JAL runtime·save_g(SB) + MOVV (g_sched+gobuf_sp)(g), R19 +- // make it look like mstart called systemstack on g0, to stop traceback +- ADDV $-8, R19 +- MOVV $runtime·mstart(SB), R6 +- MOVV R6, 0(R19) + MOVV R19, R3 + + // call target function +-- +2.38.0 + diff --git a/loongarch64/0056-runtime-minor-refactoring-of-_rt0_loong64_linux.patch b/loongarch64/0056-runtime-minor-refactoring-of-_rt0_loong64_linux.patch new file mode 100644 index 0000000..5843073 --- /dev/null +++ b/loongarch64/0056-runtime-minor-refactoring-of-_rt0_loong64_linux.patch @@ -0,0 +1,39 @@ +From 11d34218bf33eb7aa9837f3898990001bb9ce69d Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 25 Jul 2022 15:30:53 +0800 +Subject: [PATCH 56/82] runtime: minor refactoring of _rt0_loong64_linux + +remove meaningless jump and add missing NOFRAME flag + +Change-Id: I1aec68c556615b42042684bd176dfc2a8af094d1 +--- + src/runtime/rt0_linux_loong64.s | 9 +++------ + 1 file changed, 3 insertions(+), 6 deletions(-) + +diff --git a/src/runtime/rt0_linux_loong64.s b/src/runtime/rt0_linux_loong64.s +index 840c8b134e..86885dfa80 100644 +--- a/src/runtime/rt0_linux_loong64.s ++++ b/src/runtime/rt0_linux_loong64.s +@@ -7,16 +7,13 @@ + + #include "textflag.h" + +-TEXT _rt0_loong64_linux(SB),NOSPLIT,$0 +- JMP _main<>(SB) +- +-TEXT _main<>(SB),NOSPLIT|NOFRAME,$0 ++TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0 + // In a statically linked binary, the stack contains argc, + // argv as argc string pointers followed by a NULL, envv as a + // sequence of string pointers followed by a NULL, and auxv. + // There is no TLS base pointer. +- MOVW 0(R3), R4 // argc +- ADDV $8, R3, R5 // argv ++ MOVW 0(R3), R4 // argc ++ ADDV $8, R3, R5 // argv + JMP main(SB) + + TEXT main(SB),NOSPLIT|NOFRAME,$0 +-- +2.38.0 + diff --git a/loongarch64/0057-runtime-fix-gcWriteBarrier-frame-size-on-loong64-mip.patch b/loongarch64/0057-runtime-fix-gcWriteBarrier-frame-size-on-loong64-mip.patch new file mode 100644 index 0000000..cfee19a --- /dev/null +++ b/loongarch64/0057-runtime-fix-gcWriteBarrier-frame-size-on-loong64-mip.patch @@ -0,0 +1,55 @@ +From dcd7b2e1ed398999f6aa5940aa340bb531477987 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Tue, 26 Jul 2022 15:43:23 +0800 +Subject: [PATCH 57/82] runtime: fix gcWriteBarrier frame size on + loong64,mipsx,mips64x + +Change-Id: I5bc7f275135f144b23a7568527fb3b465404386e +--- + src/runtime/asm_loong64.s | 2 +- + src/runtime/asm_mips64x.s | 2 +- + src/runtime/asm_mipsx.s | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 90d36a3e01..16332f7c51 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -639,7 +639,7 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1 + // The act of CALLing gcWriteBarrier will clobber R1 (LR). + // It does not clobber any other general-purpose registers, + // but may clobber others (e.g., floating point registers). +-TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$216 ++TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$224 + // Save the registers clobbered by the fast path. + MOVV R19, 208(R3) + MOVV R13, 216(R3) +diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s +index d4d2280105..8b525dbaed 100644 +--- a/src/runtime/asm_mips64x.s ++++ b/src/runtime/asm_mips64x.s +@@ -648,7 +648,7 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1 + // The act of CALLing gcWriteBarrier will clobber R31 (LR). + // It does not clobber any other general-purpose registers, + // but may clobber others (e.g., floating point registers). +-TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$192 ++TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$200 + // Save the registers clobbered by the fast path. + MOVV R1, 184(R29) + MOVV R2, 192(R29) +diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s +index ea7edf20cf..39403b2eef 100644 +--- a/src/runtime/asm_mipsx.s ++++ b/src/runtime/asm_mipsx.s +@@ -641,7 +641,7 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1 + // The act of CALLing gcWriteBarrier will clobber R31 (LR). + // It does not clobber any other general-purpose registers, + // but may clobber others (e.g., floating point registers). +-TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$104 ++TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$108 + // Save the registers clobbered by the fast path. + MOVW R1, 100(R29) + MOVW R2, 104(R29) +-- +2.38.0 + diff --git a/loongarch64/0058-cmd-internal-obj-loong64-remove-invalid-branch-delay.patch b/loongarch64/0058-cmd-internal-obj-loong64-remove-invalid-branch-delay.patch new file mode 100644 index 0000000..eaea921 --- /dev/null +++ b/loongarch64/0058-cmd-internal-obj-loong64-remove-invalid-branch-delay.patch @@ -0,0 +1,48 @@ +From 84c42fcfedc40217bfa99d96446d8b76bad8e212 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 3 Aug 2022 17:45:02 +0800 +Subject: [PATCH 58/82] cmd/internal/obj/loong64: remove invalid branch delay + slots + +Change-Id: I222717771019f7aefa547971b2d94ef4677a42c9 +--- + src/cmd/internal/obj/loong64/asm.go | 3 --- + src/cmd/internal/obj/loong64/obj.go | 8 -------- + 2 files changed, 11 deletions(-) + +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +index 345366f004..072fc1d9a7 100644 +--- a/src/cmd/internal/obj/loong64/asm.go ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -429,9 +429,6 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + q.Pos = p.Pos + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(q.Link.Link) +- +- c.addnop(p.Link) +- c.addnop(p) + bflag = 1 + } + } +diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go +index abfe67dcbc..0f39c666a3 100644 +--- a/src/cmd/internal/obj/loong64/obj.go ++++ b/src/cmd/internal/obj/loong64/obj.go +@@ -622,14 +622,6 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { + return p + } + +-func (c *ctxt0) addnop(p *obj.Prog) { +- q := c.newprog() +- q.As = ANOOP +- q.Pos = p.Pos +- q.Link = p.Link +- p.Link = q +-} +- + var Linkloong64 = obj.LinkArch{ + Arch: sys.ArchLoong64, + Init: buildop, +-- +2.38.0 + diff --git a/loongarch64/0059-cmd-compile-cmd-internal-obj-rename-loong64-instruct.patch b/loongarch64/0059-cmd-compile-cmd-internal-obj-rename-loong64-instruct.patch new file mode 100644 index 0000000..e8fbc84 --- /dev/null +++ b/loongarch64/0059-cmd-compile-cmd-internal-obj-rename-loong64-instruct.patch @@ -0,0 +1,208 @@ +From 937a799ed90549d2d0e469ef529403372e321e11 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Tue, 2 Aug 2022 19:35:45 +0800 +Subject: [PATCH 59/82] cmd/compile,cmd/internal/obj: rename loong64 + instructions SQRT{F,D} to FSQRT{S,D} + +The SQRT{F,D} instructions were misleading because they correspond to the mnemonic +FSQRT{S,D} as defined in the LoongArch Architecture Reference Manual. This changes +the assembler to use the same mnemonic as the GNU assembler and the manual. + +LoongArch Architecture Reference Manual: + https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html + +Change-Id: Iaa0b2cd4681f9659f21305fadbbfe4a5f8b9dfc4 +--- + src/cmd/asm/internal/asm/testdata/loong64enc1.s | 4 ++-- + src/cmd/compile/internal/loong64/ssa.go | 4 ++-- + src/cmd/compile/internal/ssa/gen/LOONG64.rules | 4 ++-- + src/cmd/compile/internal/ssa/gen/LOONG64Ops.go | 10 +++++----- + src/cmd/compile/internal/ssa/opGen.go | 12 ++++++------ + src/cmd/compile/internal/ssa/rewriteLOONG64.go | 4 ++-- + src/cmd/internal/obj/loong64/a.out.go | 4 ++-- + src/cmd/internal/obj/loong64/anames.go | 4 ++-- + src/cmd/internal/obj/loong64/asm.go | 8 ++++---- + 9 files changed, 27 insertions(+), 27 deletions(-) + +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +index c724cf97f5..b483facee3 100644 +--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +@@ -191,8 +191,8 @@ lable2: + ABSD F4, F5 // 85081401 + TRUNCDW F4, F5 // 85881a01 + TRUNCFW F4, F5 // 85841a01 +- SQRTF F4, F5 // 85441401 +- SQRTD F4, F5 // 85481401 ++ FSQRTS F4, F5 // 85441401 ++ FSQRTD F4, F5 // 85481401 + + DBAR // 00007238 + NOOP // 00004003 +diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go +index 2478963643..cb27389fa6 100644 +--- a/src/cmd/compile/internal/loong64/ssa.go ++++ b/src/cmd/compile/internal/loong64/ssa.go +@@ -370,8 +370,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { + ssa.OpLOONG64MOVDF, + ssa.OpLOONG64NEGF, + ssa.OpLOONG64NEGD, +- ssa.OpLOONG64SQRTD, +- ssa.OpLOONG64SQRTF: ++ ssa.OpLOONG64FSQRTD, ++ ssa.OpLOONG64FSQRTS: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +index 4237aea16f..bde427d99c 100644 +--- a/src/cmd/compile/internal/ssa/gen/LOONG64.rules ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -120,8 +120,8 @@ + + (Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) + +-(Sqrt ...) => (SQRTD ...) +-(Sqrt32 ...) => (SQRTF ...) ++(Sqrt ...) => (FSQRTD ...) ++(Sqrt32 ...) => (FSQRTS ...) + + // boolean ops -- booleans are represented with 0=false, 1=true + (AndB ...) => (AND ...) +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go +index 1f0fec79dc..fbc751bdbd 100644 +--- a/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go +@@ -235,11 +235,11 @@ func init() { + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1) + {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt) + +- {name: "NEGV", argLength: 1, reg: gp11}, // -arg0 +- {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 +- {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 +- {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 +- {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 ++ {name: "NEGV", argLength: 1, reg: gp11}, // -arg0 ++ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 ++ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 ++ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64 ++ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32 + + // shifts + {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64 +diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go +index 5aa0f52819..71e080d561 100644 +--- a/src/cmd/compile/internal/ssa/opGen.go ++++ b/src/cmd/compile/internal/ssa/opGen.go +@@ -1657,8 +1657,8 @@ const ( + OpLOONG64NEGV + OpLOONG64NEGF + OpLOONG64NEGD +- OpLOONG64SQRTD +- OpLOONG64SQRTF ++ OpLOONG64FSQRTD ++ OpLOONG64FSQRTS + OpLOONG64SLLV + OpLOONG64SLLVconst + OpLOONG64SRLV +@@ -21977,9 +21977,9 @@ var opcodeTable = [...]opInfo{ + }, + }, + { +- name: "SQRTD", ++ name: "FSQRTD", + argLen: 1, +- asm: loong64.ASQRTD, ++ asm: loong64.AFSQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 +@@ -21990,9 +21990,9 @@ var opcodeTable = [...]opInfo{ + }, + }, + { +- name: "SQRTF", ++ name: "FSQRTS", + argLen: 1, +- asm: loong64.ASQRTF, ++ asm: loong64.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +index e0f0df03e8..8f3c823049 100644 +--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -592,10 +592,10 @@ func rewriteValueLOONG64(v *Value) bool { + case OpSlicemask: + return rewriteValueLOONG64_OpSlicemask(v) + case OpSqrt: +- v.Op = OpLOONG64SQRTD ++ v.Op = OpLOONG64FSQRTD + return true + case OpSqrt32: +- v.Op = OpLOONG64SQRTF ++ v.Op = OpLOONG64FSQRTS + return true + case OpStaticCall: + v.Op = OpLOONG64CALLstatic +diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go +index e3857eac04..9846bc9297 100644 +--- a/src/cmd/internal/obj/loong64/a.out.go ++++ b/src/cmd/internal/obj/loong64/a.out.go +@@ -331,8 +331,8 @@ const ( + ASGTU + + ASLL +- ASQRTD +- ASQRTF ++ AFSQRTD ++ AFSQRTS + ASRA + ASRL + ASUB +diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go +index 48d8a78828..715f03a538 100644 +--- a/src/cmd/internal/obj/loong64/anames.go ++++ b/src/cmd/internal/obj/loong64/anames.go +@@ -84,8 +84,8 @@ var Anames = []string{ + "SGT", + "SGTU", + "SLL", +- "SQRTD", +- "SQRTF", ++ "FSQRTD", ++ "FSQRTS", + "SRA", + "SRL", + "SUB", +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +index 072fc1d9a7..02e0f362d6 100644 +--- a/src/cmd/internal/obj/loong64/asm.go ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -906,8 +906,8 @@ func buildop(ctxt *obj.Link) { + opset(AABSD, r0) + opset(ATRUNCDW, r0) + opset(ATRUNCFW, r0) +- opset(ASQRTF, r0) +- opset(ASQRTD, r0) ++ opset(AFSQRTS, r0) ++ opset(AFSQRTD, r0) + + case AMOVVF: + opset(AMOVVD, r0) +@@ -1768,9 +1768,9 @@ func (c *ctxt0) oprrr(a obj.As) uint32 { + case ACMPGTF: + return 0x0c1<<20 | 0x3<<15 // FCMP.SLT.S + +- case ASQRTF: ++ case AFSQRTS: + return 0x4511 << 10 +- case ASQRTD: ++ case AFSQRTD: + return 0x4512 << 10 + + case ADBAR: +-- +2.38.0 + diff --git a/loongarch64/0060-math-implement-Sqrt-in-assembly-for-loong64.patch b/loongarch64/0060-math-implement-Sqrt-in-assembly-for-loong64.patch new file mode 100644 index 0000000..36e35bc --- /dev/null +++ b/loongarch64/0060-math-implement-Sqrt-in-assembly-for-loong64.patch @@ -0,0 +1,74 @@ +From 783382a155991cb8608d69467c26c1f6fa6fea30 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 3 Aug 2022 14:54:11 +0800 +Subject: [PATCH 60/82] math: implement Sqrt in assembly for loong64 + +Benchmark: +goos: linux +goarch: loong64 + old time/op new time/op delta +BenchmarkSqrtIndirect 81.6ns 21.42ns -73.75% +BenchmarkSqrtLatency 4.004ns 4.004ns 0.00% +BenchmarkSqrtIndirectLatency 59.74ns 10.35ns -82.67% +BenchmarkSqrtGoLatency 64.46ns 64.47ns 0.02% +BenchmarkSqrtPrime 1851ns 1852ns 0.05% + +Change-Id: Icc29b90ea3a2eaff67bc4fbea6d0931f929179c5 +--- + src/math/sqrt_asm.go | 4 ++-- + src/math/sqrt_loong64.s | 12 ++++++++++++ + src/math/sqrt_noasm.go | 4 ++-- + 3 files changed, 16 insertions(+), 4 deletions(-) + create mode 100644 src/math/sqrt_loong64.s + +diff --git a/src/math/sqrt_asm.go b/src/math/sqrt_asm.go +index b9102568ed..fc5aa7752a 100644 +--- a/src/math/sqrt_asm.go ++++ b/src/math/sqrt_asm.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build 386 || amd64 || arm64 || arm || mips || mipsle || ppc64 || ppc64le || s390x || riscv64 || wasm +-// +build 386 amd64 arm64 arm mips mipsle ppc64 ppc64le s390x riscv64 wasm ++//go:build 386 || amd64 || arm64 || arm || loong64 || mips || mipsle || ppc64 || ppc64le || s390x || riscv64 || wasm ++// +build 386 amd64 arm64 arm loong64 mips mipsle ppc64 ppc64le s390x riscv64 wasm + + package math + +diff --git a/src/math/sqrt_loong64.s b/src/math/sqrt_loong64.s +new file mode 100644 +index 0000000000..e81e734caf +--- /dev/null ++++ b/src/math/sqrt_loong64.s +@@ -0,0 +1,12 @@ ++// Copyright 2022 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "textflag.h" ++ ++// func archSqrt(x float64) float64 ++TEXT ·archSqrt(SB),NOSPLIT,$0 ++ MOVD x+0(FP), F0 ++ FSQRTD F0, F0 ++ MOVD F0, ret+8(FP) ++ RET +diff --git a/src/math/sqrt_noasm.go b/src/math/sqrt_noasm.go +index 7b546b7e8c..6b3de9a953 100644 +--- a/src/math/sqrt_noasm.go ++++ b/src/math/sqrt_noasm.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !386 && !amd64 && !arm64 && !arm && !mips && !mipsle && !ppc64 && !ppc64le && !s390x && !riscv64 && !wasm +-// +build !386,!amd64,!arm64,!arm,!mips,!mipsle,!ppc64,!ppc64le,!s390x,!riscv64,!wasm ++//go:build !386 && !amd64 && !arm64 && !arm && !loong64 && !mips && !mipsle && !ppc64 && !ppc64le && !s390x && !riscv64 && !wasm ++// +build !386,!amd64,!arm64,!arm,!loong64,!mips,!mipsle,!ppc64,!ppc64le,!s390x,!riscv64,!wasm + + package math + +-- +2.38.0 + diff --git a/loongarch64/0061-cmd-asm-add-RDTIME-L-H-.W-RDTIME.D-support-for-loong.patch b/loongarch64/0061-cmd-asm-add-RDTIME-L-H-.W-RDTIME.D-support-for-loong.patch new file mode 100644 index 0000000..f00c8fa --- /dev/null +++ b/loongarch64/0061-cmd-asm-add-RDTIME-L-H-.W-RDTIME.D-support-for-loong.patch @@ -0,0 +1,114 @@ +From 02470a2c86eb3dfb4f8c1918f389a0abf541078c Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 4 Aug 2022 18:13:59 +0800 +Subject: [PATCH 61/82] cmd/asm: add RDTIME{L,H}.W, RDTIME.D support for + loong64 + +Instruction formats: rdtimeX rd, rj + +The rdtimex instructions are used to read constant frequency timer information, +the stable counter value is written into the general register rd, and the counter +id information is written into the general register rj. + +Ref: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html + +Change-Id: Ida5bbb28316ef70b5f616dac3e6fa6f2e77875b5 +--- + src/cmd/asm/internal/asm/testdata/loong64enc1.s | 4 ++++ + src/cmd/internal/obj/loong64/a.out.go | 5 +++++ + src/cmd/internal/obj/loong64/anames.go | 3 +++ + src/cmd/internal/obj/loong64/asm.go | 16 ++++++++++++++++ + 4 files changed, 28 insertions(+) + +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +index b483facee3..1eced6b4df 100644 +--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +@@ -207,3 +207,7 @@ lable2: + CMPGEF F4, R5 // a090130c + CMPGED F4, R5 // a090230c + CMPEQD F4, R5 // a010220c ++ ++ RDTIMELW R4, R0 // 80600000 ++ RDTIMEHW R4, R0 // 80640000 ++ RDTIMED R4, R5 // 85680000 +diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go +index 9846bc9297..c2825108e5 100644 +--- a/src/cmd/internal/obj/loong64/a.out.go ++++ b/src/cmd/internal/obj/loong64/a.out.go +@@ -386,6 +386,11 @@ const ( + AMOVVF + AMOVVD + ++ // 2.2.10. Other Miscellaneous Instructions ++ ARDTIMELW ++ ARDTIMEHW ++ ARDTIMED ++ + ALAST + + // aliases +diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go +index 715f03a538..10555af6df 100644 +--- a/src/cmd/internal/obj/loong64/anames.go ++++ b/src/cmd/internal/obj/loong64/anames.go +@@ -126,5 +126,8 @@ var Anames = []string{ + "MOVDV", + "MOVVF", + "MOVVD", ++ "RDTIMELW", ++ "RDTIMEHW", ++ "RDTIMED", + "LAST", + } +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +index 02e0f362d6..66265690e3 100644 +--- a/src/cmd/internal/obj/loong64/asm.go ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -336,6 +336,10 @@ var optab = []Optab{ + {ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, + {ABREAK, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, + ++ {ARDTIMELW, C_REG, C_NONE, C_REG, 62, 4, 0, 0, 0}, ++ {ARDTIMEHW, C_REG, C_NONE, C_REG, 62, 4, 0, 0, 0}, ++ {ARDTIMED, C_REG, C_NONE, C_REG, 62, 4, 0, 0, 0}, ++ + {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0}, + {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0}, + {obj.APCDATA, C_DCON, C_NONE, C_DCON, 0, 0, 0, 0, 0}, +@@ -1024,6 +1028,9 @@ func buildop(ctxt *obj.Link) { + ANEGW, + ANEGV, + AWORD, ++ ARDTIMELW, ++ ARDTIMEHW, ++ ARDTIMED, + obj.ANOP, + obj.ATEXT, + obj.AUNDEF, +@@ -1595,6 +1602,9 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { + case 61: // word C_DCON + o1 = uint32(c.vregoff(&p.From)) + o2 = uint32(c.vregoff(&p.From) >> 32) ++ ++ case 62: // rdtimex rd, rj ++ o1 = OP_RR(c.oprr(p.As), uint32(p.From.Reg), uint32(p.To.Reg)) + } + + out[0] = o1 +@@ -1794,6 +1804,12 @@ func (c *ctxt0) oprr(a obj.As) uint32 { + return 0x4 << 10 + case ACLZ: + return 0x5 << 10 ++ case ARDTIMELW: ++ return 0x18 << 10 // rdtimel.w ++ case ARDTIMEHW: ++ return 0x19 << 10 // rdtimeh.w ++ case ARDTIMED: ++ return 0x1a << 10 // rdtimed + } + + c.ctxt.Diag("bad rr opcode %v", a) +-- +2.38.0 + diff --git a/loongarch64/0062-runtime-use-StableCounter-implement-cputicks-on-linu.patch b/loongarch64/0062-runtime-use-StableCounter-implement-cputicks-on-linu.patch new file mode 100644 index 0000000..8cc45ca --- /dev/null +++ b/loongarch64/0062-runtime-use-StableCounter-implement-cputicks-on-linu.patch @@ -0,0 +1,82 @@ +From bbc98b354e5ebb62dd73a6192fd71b1c5ecfe01d Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 5 Aug 2022 13:32:08 +0800 +Subject: [PATCH 62/82] runtime: use StableCounter implement cputicks() on + linux/loong64 + +In Loongson 3A5000 CPU, each core has a StableCounter, refer to the implementation +in the Linux kernel, use the StableCounter of core 0 as the source of cputicks + +The introduction of StableCounter in chapter 2.2.10.4. of the reference manual: + The LoongArch instruction system defines-a constant frequency timer, whose +main body is-a 64-bit counter called StableCounter. StableCounter is set to 0 +after reset, and then increments by 1 every counting clock cycle. When the count +reaches all 1s, it automatically wraps around to 0 and continues to increment. +At the same time, each timer has a software-configurable globally unique-number, +called Counter ID. The characteristic of the constant frequency timer is that its +timing frequency remains unchanged after reset, no matter how the clock frequency +of the processor core changes. + +Ref: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html + +Change-Id: I160b695a8c0e38ef49b21fb8b41460fd23d9538c +--- + src/runtime/asm_loong64.s | 10 ++++++++++ + src/runtime/cputicks.go | 4 ++-- + src/runtime/os_linux_loong64.go | 7 ------- + 3 files changed, 12 insertions(+), 9 deletions(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 16332f7c51..732a5c9583 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -106,6 +106,16 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 + MOVV 0(REGCTXT), R4 + JMP (R4) + ++// In Loongson 3A5000 CPU, each core has a StableCounter, ++// refer to the implementation in the Linux kernel, use ++// the StableCounter of core 0 as the source of cputicks ++// ++// func cputicks() int64 ++TEXT runtime·cputicks(SB),NOSPLIT,$0-8 ++ RDTIMED R0, R4 ++ MOVV R4, ret+0(FP) ++ RET ++ + /* + * go-routine + */ +diff --git a/src/runtime/cputicks.go b/src/runtime/cputicks.go +index 79ddcdc8d6..7c926f4a2b 100644 +--- a/src/runtime/cputicks.go ++++ b/src/runtime/cputicks.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !arm && !arm64 && !loong64 && !mips64 && !mips64le && !mips && !mipsle && !wasm +-// +build !arm,!arm64,!loong64,!mips64,!mips64le,!mips,!mipsle,!wasm ++//go:build !arm && !arm64 && !mips64 && !mips64le && !mips && !mipsle && !wasm ++// +build !arm,!arm64,!mips64,!mips64le,!mips,!mipsle,!wasm + + package runtime + +diff --git a/src/runtime/os_linux_loong64.go b/src/runtime/os_linux_loong64.go +index e9a8728445..9541f8f70e 100644 +--- a/src/runtime/os_linux_loong64.go ++++ b/src/runtime/os_linux_loong64.go +@@ -10,10 +10,3 @@ package runtime + func archauxv(tag, val uintptr) {} + + func osArchInit() {} +- +-//go:nosplit +-func cputicks() int64 { +- // Currently cputicks() is used in blocking profiler and to seed fastrand(). +- // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. +- return nanotime() +-} +-- +2.38.0 + diff --git a/loongarch64/0063-debug-elf-add-new-style-LoongArch-reloc-types.patch b/loongarch64/0063-debug-elf-add-new-style-LoongArch-reloc-types.patch new file mode 100644 index 0000000..8c67408 --- /dev/null +++ b/loongarch64/0063-debug-elf-add-new-style-LoongArch-reloc-types.patch @@ -0,0 +1,213 @@ +From 536e6c29adc84552c3c8379408711c2304fd9f52 Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Wed, 3 Aug 2022 15:46:34 +0800 +Subject: [PATCH 63/82] debug/elf: add new-style LoongArch reloc types + +LoongArch ELF psABI spec update: +https://github.com/loongson/LoongArch-Documentation/pull/57 + +Corresponding binutils implementation: +https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f09482a8747b6fd4c2d59a6a64677d3a3fe1e092 + +For #54222 + +Change-Id: I51e72294205847a69c01d741a3126248f7a7e41c +--- + api/go1.17.txt | 78 ++++++++++++++++++++++++++++++++++++++++++ + src/debug/elf/elf.go | 80 +++++++++++++++++++++++++++++++++++++++++++- + 2 files changed, 157 insertions(+), 1 deletion(-) + +diff --git a/api/go1.17.txt b/api/go1.17.txt +index 0e7fe3c09f..8e4519ea13 100644 +--- a/api/go1.17.txt ++++ b/api/go1.17.txt +@@ -302,3 +302,81 @@ pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH32 = 25138 + pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH32 ideal-int + pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH64 = 25188 + pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH64 ideal-int ++pkg debug/elf, const R_LARCH_32_PCREL = 99 ++pkg debug/elf, const R_LARCH_32_PCREL R_LARCH ++pkg debug/elf, const R_LARCH_ABS64_HI12 = 70 ++pkg debug/elf, const R_LARCH_ABS64_HI12 R_LARCH ++pkg debug/elf, const R_LARCH_ABS64_LO20 = 69 ++pkg debug/elf, const R_LARCH_ABS64_LO20 R_LARCH ++pkg debug/elf, const R_LARCH_ABS_HI20 = 67 ++pkg debug/elf, const R_LARCH_ABS_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_ABS_LO12 = 68 ++pkg debug/elf, const R_LARCH_ABS_LO12 R_LARCH ++pkg debug/elf, const R_LARCH_B16 = 64 ++pkg debug/elf, const R_LARCH_B16 R_LARCH ++pkg debug/elf, const R_LARCH_B21 = 65 ++pkg debug/elf, const R_LARCH_B21 R_LARCH ++pkg debug/elf, const R_LARCH_B26 = 66 ++pkg debug/elf, const R_LARCH_B26 R_LARCH ++pkg debug/elf, const R_LARCH_GNU_VTENTRY = 58 ++pkg debug/elf, const R_LARCH_GNU_VTENTRY R_LARCH ++pkg debug/elf, const R_LARCH_GNU_VTINHERIT = 57 ++pkg debug/elf, const R_LARCH_GNU_VTINHERIT R_LARCH ++pkg debug/elf, const R_LARCH_GOT64_HI12 = 82 ++pkg debug/elf, const R_LARCH_GOT64_HI12 R_LARCH ++pkg debug/elf, const R_LARCH_GOT64_LO20 = 81 ++pkg debug/elf, const R_LARCH_GOT64_LO20 R_LARCH ++pkg debug/elf, const R_LARCH_GOT64_PC_HI12 = 78 ++pkg debug/elf, const R_LARCH_GOT64_PC_HI12 R_LARCH ++pkg debug/elf, const R_LARCH_GOT64_PC_LO20 = 77 ++pkg debug/elf, const R_LARCH_GOT64_PC_LO20 R_LARCH ++pkg debug/elf, const R_LARCH_GOT_HI20 = 79 ++pkg debug/elf, const R_LARCH_GOT_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_GOT_LO12 = 80 ++pkg debug/elf, const R_LARCH_GOT_LO12 R_LARCH ++pkg debug/elf, const R_LARCH_GOT_PC_HI20 = 75 ++pkg debug/elf, const R_LARCH_GOT_PC_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_GOT_PC_LO12 = 76 ++pkg debug/elf, const R_LARCH_GOT_PC_LO12 R_LARCH ++pkg debug/elf, const R_LARCH_PCALA64_HI12 = 74 ++pkg debug/elf, const R_LARCH_PCALA64_HI12 R_LARCH ++pkg debug/elf, const R_LARCH_PCALA64_LO20 = 73 ++pkg debug/elf, const R_LARCH_PCALA64_LO20 R_LARCH ++pkg debug/elf, const R_LARCH_PCALA_HI20 = 71 ++pkg debug/elf, const R_LARCH_PCALA_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_PCALA_LO12 = 72 ++pkg debug/elf, const R_LARCH_PCALA_LO12 R_LARCH ++pkg debug/elf, const R_LARCH_RELAX = 100 ++pkg debug/elf, const R_LARCH_RELAX R_LARCH ++pkg debug/elf, const R_LARCH_TLS_GD_HI20 = 98 ++pkg debug/elf, const R_LARCH_TLS_GD_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_GD_PC_HI20 = 97 ++pkg debug/elf, const R_LARCH_TLS_GD_PC_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE64_HI12 = 94 ++pkg debug/elf, const R_LARCH_TLS_IE64_HI12 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE64_LO20 = 93 ++pkg debug/elf, const R_LARCH_TLS_IE64_LO20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE64_PC_HI12 = 90 ++pkg debug/elf, const R_LARCH_TLS_IE64_PC_HI12 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE64_PC_LO20 = 89 ++pkg debug/elf, const R_LARCH_TLS_IE64_PC_LO20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE_HI20 = 91 ++pkg debug/elf, const R_LARCH_TLS_IE_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE_LO12 = 92 ++pkg debug/elf, const R_LARCH_TLS_IE_LO12 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE_PC_HI20 = 87 ++pkg debug/elf, const R_LARCH_TLS_IE_PC_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_IE_PC_LO12 = 88 ++pkg debug/elf, const R_LARCH_TLS_IE_PC_LO12 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_LD_HI20 = 96 ++pkg debug/elf, const R_LARCH_TLS_LD_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_LD_PC_HI20 = 95 ++pkg debug/elf, const R_LARCH_TLS_LD_PC_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_LE64_HI12 = 86 ++pkg debug/elf, const R_LARCH_TLS_LE64_HI12 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_LE64_LO20 = 85 ++pkg debug/elf, const R_LARCH_TLS_LE64_LO20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_LE_HI20 = 83 ++pkg debug/elf, const R_LARCH_TLS_LE_HI20 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_LE_LO12 = 84 ++pkg debug/elf, const R_LARCH_TLS_LE_LO12 R_LARCH +diff --git a/src/debug/elf/elf.go b/src/debug/elf/elf.go +index 972a75e1e6..4603002cdf 100644 +--- a/src/debug/elf/elf.go ++++ b/src/debug/elf/elf.go +@@ -2152,7 +2152,7 @@ var rmipsStrings = []intName{ + func (i R_MIPS) String() string { return stringName(uint32(i), rmipsStrings, false) } + func (i R_MIPS) GoString() string { return stringName(uint32(i), rmipsStrings, true) } + +-// Relocation types for LARCH. ++// Relocation types for LoongArch. + type R_LARCH int + + const ( +@@ -2206,6 +2206,45 @@ const ( + R_LARCH_SUB24 R_LARCH = 54 + R_LARCH_SUB32 R_LARCH = 55 + R_LARCH_SUB64 R_LARCH = 56 ++ R_LARCH_GNU_VTINHERIT R_LARCH = 57 ++ R_LARCH_GNU_VTENTRY R_LARCH = 58 ++ R_LARCH_B16 R_LARCH = 64 ++ R_LARCH_B21 R_LARCH = 65 ++ R_LARCH_B26 R_LARCH = 66 ++ R_LARCH_ABS_HI20 R_LARCH = 67 ++ R_LARCH_ABS_LO12 R_LARCH = 68 ++ R_LARCH_ABS64_LO20 R_LARCH = 69 ++ R_LARCH_ABS64_HI12 R_LARCH = 70 ++ R_LARCH_PCALA_HI20 R_LARCH = 71 ++ R_LARCH_PCALA_LO12 R_LARCH = 72 ++ R_LARCH_PCALA64_LO20 R_LARCH = 73 ++ R_LARCH_PCALA64_HI12 R_LARCH = 74 ++ R_LARCH_GOT_PC_HI20 R_LARCH = 75 ++ R_LARCH_GOT_PC_LO12 R_LARCH = 76 ++ R_LARCH_GOT64_PC_LO20 R_LARCH = 77 ++ R_LARCH_GOT64_PC_HI12 R_LARCH = 78 ++ R_LARCH_GOT_HI20 R_LARCH = 79 ++ R_LARCH_GOT_LO12 R_LARCH = 80 ++ R_LARCH_GOT64_LO20 R_LARCH = 81 ++ R_LARCH_GOT64_HI12 R_LARCH = 82 ++ R_LARCH_TLS_LE_HI20 R_LARCH = 83 ++ R_LARCH_TLS_LE_LO12 R_LARCH = 84 ++ R_LARCH_TLS_LE64_LO20 R_LARCH = 85 ++ R_LARCH_TLS_LE64_HI12 R_LARCH = 86 ++ R_LARCH_TLS_IE_PC_HI20 R_LARCH = 87 ++ R_LARCH_TLS_IE_PC_LO12 R_LARCH = 88 ++ R_LARCH_TLS_IE64_PC_LO20 R_LARCH = 89 ++ R_LARCH_TLS_IE64_PC_HI12 R_LARCH = 90 ++ R_LARCH_TLS_IE_HI20 R_LARCH = 91 ++ R_LARCH_TLS_IE_LO12 R_LARCH = 92 ++ R_LARCH_TLS_IE64_LO20 R_LARCH = 93 ++ R_LARCH_TLS_IE64_HI12 R_LARCH = 94 ++ R_LARCH_TLS_LD_PC_HI20 R_LARCH = 95 ++ R_LARCH_TLS_LD_HI20 R_LARCH = 96 ++ R_LARCH_TLS_GD_PC_HI20 R_LARCH = 97 ++ R_LARCH_TLS_GD_HI20 R_LARCH = 98 ++ R_LARCH_32_PCREL R_LARCH = 99 ++ R_LARCH_RELAX R_LARCH = 100 + ) + + var rlarchStrings = []intName{ +@@ -2259,6 +2298,45 @@ var rlarchStrings = []intName{ + {54, "R_LARCH_SUB24"}, + {55, "R_LARCH_SUB32"}, + {56, "R_LARCH_SUB64"}, ++ {57, "R_LARCH_GNU_VTINHERIT"}, ++ {58, "R_LARCH_GNU_VTENTRY"}, ++ {64, "R_LARCH_B16"}, ++ {65, "R_LARCH_B21"}, ++ {66, "R_LARCH_B26"}, ++ {67, "R_LARCH_ABS_HI20"}, ++ {68, "R_LARCH_ABS_LO12"}, ++ {69, "R_LARCH_ABS64_LO20"}, ++ {70, "R_LARCH_ABS64_HI12"}, ++ {71, "R_LARCH_PCALA_HI20"}, ++ {72, "R_LARCH_PCALA_LO12"}, ++ {73, "R_LARCH_PCALA64_LO20"}, ++ {74, "R_LARCH_PCALA64_HI12"}, ++ {75, "R_LARCH_GOT_PC_HI20"}, ++ {76, "R_LARCH_GOT_PC_LO12"}, ++ {77, "R_LARCH_GOT64_PC_LO20"}, ++ {78, "R_LARCH_GOT64_PC_HI12"}, ++ {79, "R_LARCH_GOT_HI20"}, ++ {80, "R_LARCH_GOT_LO12"}, ++ {81, "R_LARCH_GOT64_LO20"}, ++ {82, "R_LARCH_GOT64_HI12"}, ++ {83, "R_LARCH_TLS_LE_HI20"}, ++ {84, "R_LARCH_TLS_LE_LO12"}, ++ {85, "R_LARCH_TLS_LE64_LO20"}, ++ {86, "R_LARCH_TLS_LE64_HI12"}, ++ {87, "R_LARCH_TLS_IE_PC_HI20"}, ++ {88, "R_LARCH_TLS_IE_PC_LO12"}, ++ {89, "R_LARCH_TLS_IE64_PC_LO20"}, ++ {90, "R_LARCH_TLS_IE64_PC_HI12"}, ++ {91, "R_LARCH_TLS_IE_HI20"}, ++ {92, "R_LARCH_TLS_IE_LO12"}, ++ {93, "R_LARCH_TLS_IE64_LO20"}, ++ {94, "R_LARCH_TLS_IE64_HI12"}, ++ {95, "R_LARCH_TLS_LD_PC_HI20"}, ++ {96, "R_LARCH_TLS_LD_HI20"}, ++ {97, "R_LARCH_TLS_GD_PC_HI20"}, ++ {98, "R_LARCH_TLS_GD_HI20"}, ++ {99, "R_LARCH_32_PCREL"}, ++ {100, "R_LARCH_RELAX"}, + } + + func (i R_LARCH) String() string { return stringName(uint32(i), rlarchStrings, false) } +-- +2.38.0 + diff --git a/loongarch64/0064-cmd-link-recognize-the-new-R_LARCH_32_PCREL-type-on-.patch b/loongarch64/0064-cmd-link-recognize-the-new-R_LARCH_32_PCREL-type-on-.patch new file mode 100644 index 0000000..8a05868 --- /dev/null +++ b/loongarch64/0064-cmd-link-recognize-the-new-R_LARCH_32_PCREL-type-on-.patch @@ -0,0 +1,39 @@ +From cb0e1d40dbdd8faa5266cfd58b2f1a7220f70683 Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Wed, 3 Aug 2022 19:43:49 +0800 +Subject: [PATCH 64/82] cmd/link: recognize the new R_LARCH_32_PCREL type on + loong64 + +Due to the latest binutils change [1], at least for certain 32-bit +relocs in .eh_frame section, this new type of relocation record is +emitted, leading to breakage on systems with bleeding-edge toolchain +when trying to link with object(s) with such new-style relocs. + +Simply treating it the same as the existing reloc types seems enough. + +Fixes #54222 + +[1]: https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f09482a8747b6fd4c2d59a6a64677d3a3fe1e092 + +Change-Id: I876d6711d5d4a674bead37e57f9503f1622d1136 +--- + src/cmd/link/internal/loadelf/ldelf.go | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go +index d677bff3c9..fbdace587f 100644 +--- a/src/cmd/link/internal/loadelf/ldelf.go ++++ b/src/cmd/link/internal/loadelf/ldelf.go +@@ -1004,7 +1004,8 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, uint8, error) { + LOONG64 | uint32(elf.R_LARCH_MARK_LA)<<16, + LOONG64 | uint32(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)<<16, + LOONG64 | uint32(elf.R_LARCH_64)<<16, +- LOONG64 | uint32(elf.R_LARCH_MARK_PCREL)<<16: ++ LOONG64 | uint32(elf.R_LARCH_MARK_PCREL)<<16, ++ LOONG64 | uint32(elf.R_LARCH_32_PCREL)<<16: + return 4, 4, nil + + case S390X | uint32(elf.R_390_8)<<16: +-- +2.38.0 + diff --git a/loongarch64/0065-runtime-fix-runtime.usleep-on-linux-loong64.patch b/loongarch64/0065-runtime-fix-runtime.usleep-on-linux-loong64.patch new file mode 100644 index 0000000..177effc --- /dev/null +++ b/loongarch64/0065-runtime-fix-runtime.usleep-on-linux-loong64.patch @@ -0,0 +1,55 @@ +From 8b7f2e95c95653fc2d35e2e18ab65f4a10a94f70 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Aug 2022 02:44:22 +0800 +Subject: [PATCH 65/82] runtime: fix runtime.usleep() on linux/loong64 + +fix usleep by correctly setting nanoseconds parameter + +Change-Id: Ia4cfdea3df8834e6260527ce8e6e894a0547070f +--- + src/runtime/sys_linux_loong64.s | 27 +++++++++++++++------------ + 1 file changed, 15 insertions(+), 12 deletions(-) + +diff --git a/src/runtime/sys_linux_loong64.s b/src/runtime/sys_linux_loong64.s +index 1496d922c5..1abc5b6cda 100644 +--- a/src/runtime/sys_linux_loong64.s ++++ b/src/runtime/sys_linux_loong64.s +@@ -126,20 +126,23 @@ TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20 + RET + + TEXT runtime·usleep(SB),NOSPLIT,$16-4 +- MOVWU usec+0(FP), R6 +- MOVV R6, R5 +- MOVW $1000000, R4 +- DIVVU R4, R6, R6 +- MOVV R6, 8(R3) +- MOVW $1000, R4 +- MULVU R6, R4, R4 +- SUBVU R4, R5 +- MOVV R5, 16(R3) ++ MOVWU usec+0(FP), R6 ++ MOVV $1000, R4 ++ MULVU R4, R6, R6 ++ MOVV $1000000000, R4 ++ ++ // ts->tv_sec ++ DIVVU R4, R6, R5 ++ MOVV R5, 8(R3) ++ ++ // ts->tv_nsec ++ REMVU R4, R6, R5 ++ MOVV R5, 16(R3) + + // nanosleep(&ts, 0) +- ADDV $8, R3, R4 +- MOVW $0, R5 +- MOVV $SYS_nanosleep, R11 ++ ADDV $8, R3, R4 ++ MOVV R0, R5 ++ MOVV $SYS_nanosleep, R11 + SYSCALL + RET + +-- +2.38.0 + diff --git a/loongarch64/0066-cmd-internal-obj-remove-redundant-cnames-on-loong64.patch b/loongarch64/0066-cmd-internal-obj-remove-redundant-cnames-on-loong64.patch new file mode 100644 index 0000000..38e9c03 --- /dev/null +++ b/loongarch64/0066-cmd-internal-obj-remove-redundant-cnames-on-loong64.patch @@ -0,0 +1,30 @@ +From 25d1b81277ca67c92db98c9503311aeeee7ec4bd Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Aug 2022 09:22:36 +0800 +Subject: [PATCH 66/82] cmd/internal/obj: remove redundant cnames on loong64 + +Change-Id: I5aa6328a12e74b2801ab60b5a5bb8571d382d5ef +--- + src/cmd/internal/obj/loong64/cnames.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/cmd/internal/obj/loong64/cnames.go b/src/cmd/internal/obj/loong64/cnames.go +index d6d3091757..e757e242ee 100644 +--- a/src/cmd/internal/obj/loong64/cnames.go ++++ b/src/cmd/internal/obj/loong64/cnames.go +@@ -4,11 +4,11 @@ + + package loong64 + ++// This order should be strictly consistent to that in a.out.go + var cnames0 = []string{ + "NONE", + "REG", + "FREG", +- "FCREG", + "FCSRREG", + "FCCREG", + "ZCON", +-- +2.38.0 + diff --git a/loongarch64/0067-runtime-save-fetch-g-register-during-VDSO-on-loong64.patch b/loongarch64/0067-runtime-save-fetch-g-register-during-VDSO-on-loong64.patch new file mode 100644 index 0000000..3380570 --- /dev/null +++ b/loongarch64/0067-runtime-save-fetch-g-register-during-VDSO-on-loong64.patch @@ -0,0 +1,113 @@ +From 78a0dd97a806c431e2243c16987322d3f7f9f1d8 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 26 Aug 2022 14:01:27 +0800 +Subject: [PATCH 67/82] runtime: save/fetch g register during VDSO on loong64 + +Change-Id: Iaffa8cce4f0ef8ef74225c355ec3c20ed238025f +--- + src/runtime/signal_unix.go | 2 +- + src/runtime/sys_linux_loong64.s | 44 ++++++++++++++++++++++++++++++--- + 2 files changed, 41 insertions(+), 5 deletions(-) + +diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go +index 6096760b50..29a99b770c 100644 +--- a/src/runtime/signal_unix.go ++++ b/src/runtime/signal_unix.go +@@ -382,7 +382,7 @@ func preemptM(mp *m) { + //go:nosplit + func sigFetchG(c *sigctxt) *g { + switch GOARCH { +- case "arm", "arm64", "ppc64", "ppc64le": ++ case "arm", "arm64", "loong64", "ppc64", "ppc64le": + if !iscgo && inVDSOPage(c.sigpc()) { + // When using cgo, we save the g on TLS and load it from there + // in sigtramp. Just use that. +diff --git a/src/runtime/sys_linux_loong64.s b/src/runtime/sys_linux_loong64.s +index 1abc5b6cda..8947797698 100644 +--- a/src/runtime/sys_linux_loong64.s ++++ b/src/runtime/sys_linux_loong64.s +@@ -13,7 +13,9 @@ + #include "go_tls.h" + #include "textflag.h" + +-#define AT_FDCWD -100 ++#define AT_FDCWD -100 ++#define CLOCK_REALTIME 0 ++#define CLOCK_MONOTONIC 1 + + #define SYS_exit 93 + #define SYS_read 63 +@@ -206,7 +208,7 @@ TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28 + RET + + // func walltime() (sec int64, nsec int32) +-TEXT runtime·walltime(SB),NOSPLIT,$16-12 ++TEXT runtime·walltime(SB),NOSPLIT,$24-12 + MOVV R3, R23 // R23 is unchanged by C code + MOVV R3, R25 + +@@ -236,12 +238,29 @@ noswitch: + AND $~15, R25 // Align for C code + MOVV R25, R3 + +- MOVW $0, R4 // CLOCK_REALTIME=0 ++ MOVW $CLOCK_REALTIME, R4 + MOVV $0(R3), R5 + + MOVV runtime·vdsoClockgettimeSym(SB), R20 + BEQ R20, fallback + ++ // Store g on gsignal's stack, see sys_linux_arm64.s for detail ++ MOVBU runtime·iscgo(SB), R25 ++ BNE R0, R25, nosaveg ++ ++ MOVV m_gsignal(R24), R25 // g.m.gsignal ++ BEQ R25, nosaveg ++ BEQ g, R25, nosaveg ++ ++ MOVV (g_stack+stack_lo)(R25), R25 // g.m.gsignal.stack.lo ++ MOVV g, (R25) ++ ++ JAL (R20) ++ ++ MOVV R0, (R25) ++ JMP finish ++ ++nosaveg: + JAL (R20) + + finish: +@@ -298,12 +317,29 @@ noswitch: + AND $~15, R25 // Align for C code + MOVV R25, R3 + +- MOVW $1, R4 // CLOCK_MONOTONIC=1 ++ MOVW $CLOCK_MONOTONIC, R4 + MOVV $0(R3), R5 + + MOVV runtime·vdsoClockgettimeSym(SB), R20 + BEQ R20, fallback + ++ // Store g on gsignal's stack, see sys_linux_arm64.s for detail ++ MOVBU runtime·iscgo(SB), R25 ++ BNE R0, R25, nosaveg ++ ++ MOVV m_gsignal(R24), R25 // g.m.gsignal ++ BEQ R25, nosaveg ++ BEQ g, R25, nosaveg ++ ++ MOVV (g_stack+stack_lo)(R25), R25 // g.m.gsignal.stack.lo ++ MOVV g, (R25) ++ ++ JAL (R20) ++ ++ MOVV R0, (R25) ++ JMP finish ++ ++nosaveg: + JAL (R20) + + finish: +-- +2.38.0 + diff --git a/loongarch64/0068-runtime-save-restore-callee-saved-registers-in-loong.patch b/loongarch64/0068-runtime-save-restore-callee-saved-registers-in-loong.patch new file mode 100644 index 0000000..1d3f6f5 --- /dev/null +++ b/loongarch64/0068-runtime-save-restore-callee-saved-registers-in-loong.patch @@ -0,0 +1,222 @@ +From dc5be912b83d1843100dcd0f6b3aadff281f79a2 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 26 Aug 2022 14:05:31 +0800 +Subject: [PATCH 68/82] runtime: save/restore callee-saved registers in + loong64's sigtramp + +Loong64's R22-R31 and F24-F31 are callee saved registers, which +should be saved in the beginning of sigtramp, and restored at +the end. + +In reviewing comments about sigtramp in sys_linux_arm64 it was +noted that a previous issue in arm64 due to missing callee save +registers could also be a problem on loong64, so code was added +to save and restore those. + +Change-Id: I3ae58fe8a64ddb052d0a89b63e82c01ad328dd15 +--- + src/runtime/cgo/abi_loong64.h | 60 +++++++++++++++++++++++++++++++++ + src/runtime/cgo/asm_loong64.s | 55 ++++++++---------------------- + src/runtime/sys_linux_loong64.s | 32 ++++++++++++------ + 3 files changed, 96 insertions(+), 51 deletions(-) + create mode 100644 src/runtime/cgo/abi_loong64.h + +diff --git a/src/runtime/cgo/abi_loong64.h b/src/runtime/cgo/abi_loong64.h +new file mode 100644 +index 0000000000..b10d83732f +--- /dev/null ++++ b/src/runtime/cgo/abi_loong64.h +@@ -0,0 +1,60 @@ ++// Copyright 2022 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// Macros for transitioning from the host ABI to Go ABI0. ++// ++// These macros save and restore the callee-saved registers ++// from the stack, but they don't adjust stack pointer, so ++// the user should prepare stack space in advance. ++// SAVE_R22_TO_R31(offset) saves R22 ~ R31 to the stack space ++// of ((offset)+0*8)(R3) ~ ((offset)+9*8)(R3). ++// ++// SAVE_F24_TO_F31(offset) saves F24 ~ F31 to the stack space ++// of ((offset)+0*8)(R3) ~ ((offset)+7*8)(R3). ++// ++// Note: g is R22 ++ ++#define SAVE_R22_TO_R31(offset) \ ++ MOVV g, ((offset)+(0*8))(R3) \ ++ MOVV R23, ((offset)+(1*8))(R3) \ ++ MOVV R24, ((offset)+(2*8))(R3) \ ++ MOVV R25, ((offset)+(3*8))(R3) \ ++ MOVV R26, ((offset)+(4*8))(R3) \ ++ MOVV R27, ((offset)+(5*8))(R3) \ ++ MOVV R28, ((offset)+(6*8))(R3) \ ++ MOVV R29, ((offset)+(7*8))(R3) \ ++ MOVV R30, ((offset)+(8*8))(R3) \ ++ MOVV R31, ((offset)+(9*8))(R3) ++ ++#define SAVE_F24_TO_F31(offset) \ ++ MOVD F24, ((offset)+(0*8))(R3) \ ++ MOVD F25, ((offset)+(1*8))(R3) \ ++ MOVD F26, ((offset)+(2*8))(R3) \ ++ MOVD F27, ((offset)+(3*8))(R3) \ ++ MOVD F28, ((offset)+(4*8))(R3) \ ++ MOVD F29, ((offset)+(5*8))(R3) \ ++ MOVD F30, ((offset)+(6*8))(R3) \ ++ MOVD F31, ((offset)+(7*8))(R3) ++ ++#define RESTORE_R22_TO_R31(offset) \ ++ MOVV ((offset)+(0*8))(R3), g \ ++ MOVV ((offset)+(1*8))(R3), R23 \ ++ MOVV ((offset)+(2*8))(R3), R24 \ ++ MOVV ((offset)+(3*8))(R3), R25 \ ++ MOVV ((offset)+(4*8))(R3), R26 \ ++ MOVV ((offset)+(5*8))(R3), R27 \ ++ MOVV ((offset)+(6*8))(R3), R28 \ ++ MOVV ((offset)+(7*8))(R3), R29 \ ++ MOVV ((offset)+(8*8))(R3), R30 \ ++ MOVV ((offset)+(9*8))(R3), R31 ++ ++#define RESTORE_F24_TO_F31(offset) \ ++ MOVD ((offset)+(0*8))(R3), F24 \ ++ MOVD ((offset)+(1*8))(R3), F25 \ ++ MOVD ((offset)+(2*8))(R3), F26 \ ++ MOVD ((offset)+(3*8))(R3), F27 \ ++ MOVD ((offset)+(4*8))(R3), F28 \ ++ MOVD ((offset)+(5*8))(R3), F29 \ ++ MOVD ((offset)+(6*8))(R3), F30 \ ++ MOVD ((offset)+(7*8))(R3), F31 +diff --git a/src/runtime/cgo/asm_loong64.s b/src/runtime/cgo/asm_loong64.s +index aa5a4ca200..d3dfd02c07 100644 +--- a/src/runtime/cgo/asm_loong64.s ++++ b/src/runtime/cgo/asm_loong64.s +@@ -5,6 +5,7 @@ + // +build loong64 + + #include "textflag.h" ++#include "abi_loong64.h" + + // Called by C code generated by cmd/cgo. + // func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +@@ -18,52 +19,24 @@ TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0 + * first arg. + */ + +- ADDV $(-8*22), R3 +- MOVV R4, (8*1)(R3) // fn unsafe.Pointer +- MOVV R5, (8*2)(R3) // a unsafe.Pointer +- MOVV R7, (8*3)(R3) // ctxt uintptr +- MOVV R23, (8*4)(R3) +- MOVV R24, (8*5)(R3) +- MOVV R25, (8*6)(R3) +- MOVV R26, (8*7)(R3) +- MOVV R27, (8*8)(R3) +- MOVV R28, (8*9)(R3) +- MOVV R29, (8*10)(R3) +- MOVV R30, (8*11)(R3) +- MOVV g, (8*12)(R3) +- MOVV R1, (8*13)(R3) +- MOVD F24, (8*14)(R3) +- MOVD F25, (8*15)(R3) +- MOVD F26, (8*16)(R3) +- MOVD F27, (8*17)(R3) +- MOVD F28, (8*18)(R3) +- MOVD F29, (8*19)(R3) +- MOVD F30, (8*20)(R3) +- MOVD F31, (8*21)(R3) ++ ADDV $(-23*8), R3 ++ MOVV R4, (1*8)(R3) // fn unsafe.Pointer ++ MOVV R5, (2*8)(R3) // a unsafe.Pointer ++ MOVV R7, (3*8)(R3) // ctxt uintptr ++ ++ SAVE_R22_TO_R31((4*8)) ++ SAVE_F24_TO_F31((14*8)) ++ MOVV R1, (22*8)(R3) + + // Initialize Go ABI environment + JAL runtime·load_g(SB) + + JAL runtime·cgocallback(SB) + +- MOVV (8*4)(R3), R23 +- MOVV (8*5)(R3), R24 +- MOVV (8*6)(R3), R25 +- MOVV (8*7)(R3), R26 +- MOVV (8*8)(R3), R27 +- MOVV (8*9)(R3), R28 +- MOVV (8*10)(R3), R29 +- MOVV (8*11)(R3), R30 +- MOVV (8*12)(R3), g +- MOVV (8*13)(R3), R1 +- MOVD (8*14)(R3), F24 +- MOVD (8*15)(R3), F25 +- MOVD (8*16)(R3), F26 +- MOVD (8*17)(R3), F27 +- MOVD (8*18)(R3), F28 +- MOVD (8*19)(R3), F29 +- MOVD (8*20)(R3), F30 +- MOVD (8*21)(R3), F31 +- ADDV $(8*22), R3 ++ RESTORE_R22_TO_R31((4*8)) ++ RESTORE_F24_TO_F31((14*8)) ++ MOVV (22*8)(R3), R1 ++ ++ ADDV $(23*8), R3 + + RET +diff --git a/src/runtime/sys_linux_loong64.s b/src/runtime/sys_linux_loong64.s +index 8947797698..85d704e13e 100644 +--- a/src/runtime/sys_linux_loong64.s ++++ b/src/runtime/sys_linux_loong64.s +@@ -12,6 +12,7 @@ + #include "go_asm.h" + #include "go_tls.h" + #include "textflag.h" ++#include "cgo/abi_loong64.h" + + #define AT_FDCWD -100 + #define CLOCK_REALTIME 0 +@@ -400,18 +401,29 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 + JAL (R20) + RET + +-TEXT runtime·sigtramp(SB),NOSPLIT,$64 ++TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$182 ++ MOVW R4, (1*8)(R3) ++ MOVV R5, (2*8)(R3) ++ MOVV R6, (3*8)(R3) ++ ++ // Save callee-save registers in the case of signal forwarding. ++ // Please refer to https://golang.org/issue/31827 . ++ SAVE_R22_TO_R31((4*8)) ++ SAVE_F24_TO_F31((14*8)) ++ + // this might be called in external code context, + // where g is not set. +- MOVB runtime·iscgo(SB), R19 +- BEQ R19, 2(PC) +- JAL runtime·load_g(SB) +- +- MOVW R4, 8(R3) +- MOVV R5, 16(R3) +- MOVV R6, 24(R3) +- MOVV $runtime·sigtrampgo(SB), R19 +- JAL (R19) ++ MOVB runtime·iscgo(SB), R4 ++ BEQ R4, 2(PC) ++ JAL runtime·load_g(SB) ++ ++ MOVV $runtime·sigtrampgo(SB), R4 ++ JAL (R4) ++ ++ // Restore callee-save registers. ++ RESTORE_R22_TO_R31((4*8)) ++ RESTORE_F24_TO_F31((14*8)) ++ + RET + + TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 +-- +2.38.0 + diff --git a/loongarch64/0069-runtime-add-comment-for-sys_linux_loong64.patch b/loongarch64/0069-runtime-add-comment-for-sys_linux_loong64.patch new file mode 100644 index 0000000..f521f26 --- /dev/null +++ b/loongarch64/0069-runtime-add-comment-for-sys_linux_loong64.patch @@ -0,0 +1,211 @@ +From d15818537b7339e4a18d4d338f37ecb4cc6fb920 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 26 Aug 2022 14:19:06 +0800 +Subject: [PATCH 69/82] runtime: add comment for sys_linux_loong64 + +Change-Id: I617d6d788cb213c1405f81d9f689fd6846ee105a +--- + src/runtime/sys_linux_loong64.s | 27 +++++++++++++++++++++++++-- + 1 file changed, 25 insertions(+), 2 deletions(-) + +diff --git a/src/runtime/sys_linux_loong64.s b/src/runtime/sys_linux_loong64.s +index 85d704e13e..f062cdec3f 100644 +--- a/src/runtime/sys_linux_loong64.s ++++ b/src/runtime/sys_linux_loong64.s +@@ -50,6 +50,7 @@ + #define SYS_brk 214 + #define SYS_pipe2 59 + ++// func exit(code int32) + TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4 + MOVW code+0(FP), R4 + MOVV $SYS_exit_group, R11 +@@ -69,6 +70,7 @@ TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8 + SYSCALL + JMP 0(PC) + ++// func open(name *byte, mode, perm int32) int32 + TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20 + MOVW $AT_FDCWD, R4 // AT_FDCWD, so this acts like open + MOVV name+0(FP), R5 +@@ -82,6 +84,7 @@ TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20 + MOVW R4, ret+16(FP) + RET + ++// func closefd(fd int32) int32 + TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12 + MOVW fd+0(FP), R4 + MOVV $SYS_close, R11 +@@ -92,6 +95,7 @@ TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12 + MOVW R4, ret+8(FP) + RET + ++// func write1(fd uintptr, p unsafe.Pointer, n int32) int32 + TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28 + MOVV fd+0(FP), R4 + MOVV p+8(FP), R5 +@@ -101,6 +105,7 @@ TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28 + MOVW R4, ret+24(FP) + RET + ++// func read(fd int32, p unsafe.Pointer, n int32) int32 + TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28 + MOVW fd+0(FP), R4 + MOVV p+8(FP), R5 +@@ -128,6 +133,7 @@ TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20 + MOVW R4, errno+16(FP) + RET + ++// func usleep(usec uint32) + TEXT runtime·usleep(SB),NOSPLIT,$16-4 + MOVWU usec+0(FP), R6 + MOVV $1000, R4 +@@ -149,12 +155,14 @@ TEXT runtime·usleep(SB),NOSPLIT,$16-4 + SYSCALL + RET + ++// func gettid() uint32 + TEXT runtime·gettid(SB),NOSPLIT,$0-4 + MOVV $SYS_gettid, R11 + SYSCALL + MOVW R4, ret+0(FP) + RET + ++// func raise(sig uint32) + TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVV $SYS_getpid, R11 + SYSCALL +@@ -168,6 +176,7 @@ TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + SYSCALL + RET + ++// func raiseproc(sig uint32) + TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 + MOVV $SYS_getpid, R11 + SYSCALL +@@ -177,12 +186,14 @@ TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 + SYSCALL + RET + ++// func getpid() int + TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8 + MOVV $SYS_getpid, R11 + SYSCALL + MOVV R4, ret+0(FP) + RET + ++// func tgkill(tgid, tid, sig int) + TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24 + MOVV tgid+0(FP), R4 + MOVV tid+8(FP), R5 +@@ -191,6 +202,7 @@ TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24 + SYSCALL + RET + ++// func setitimer(mode int32, new, old *itimerval) + TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24 + MOVW mode+0(FP), R4 + MOVV new+8(FP), R5 +@@ -288,6 +300,7 @@ fallback: + SYSCALL + JMP finish + ++// func nanotime1() int64 + TEXT runtime·nanotime1(SB),NOSPLIT,$16-8 + MOVV R3, R23 // R23 is unchanged by C code + MOVV R3, R25 +@@ -371,6 +384,7 @@ fallback: + SYSCALL + JMP finish + ++// func rtsigprocmask(how int32, new, old *sigset, size int32) + TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28 + MOVW how+0(FP), R4 + MOVV new+8(FP), R5 +@@ -383,6 +397,7 @@ TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28 + MOVV R0, 0xf1(R0) // crash + RET + ++// func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32 + TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36 + MOVV sig+0(FP), R4 + MOVV new+8(FP), R5 +@@ -393,6 +408,7 @@ TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36 + MOVW R4, ret+32(FP) + RET + ++// func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer) + TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 + MOVW sig+8(FP), R4 + MOVV info+16(FP), R5 +@@ -401,6 +417,7 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 + JAL (R20) + RET + ++// func sigtramp(signo, ureg, ctxt unsafe.Pointer) + TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$182 + MOVW R4, (1*8)(R3) + MOVV R5, (2*8)(R3) +@@ -426,9 +443,11 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$182 + + RET + ++// func cgoSigtramp() + TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 + JMP runtime·sigtramp(SB) + ++// func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int) + TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0 + MOVV addr+0(FP), R4 + MOVV n+8(FP), R5 +@@ -450,6 +469,7 @@ ok: + MOVV $0, err+40(FP) + RET + ++// func munmap(addr unsafe.Pointer, n uintptr) + TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0 + MOVV addr+0(FP), R4 + MOVV n+8(FP), R5 +@@ -460,6 +480,7 @@ TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0 + MOVV R0, 0xf3(R0) // crash + RET + ++// func madvise(addr unsafe.Pointer, n uintptr, flags int32) + TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 + MOVV addr+0(FP), R4 + MOVV n+8(FP), R5 +@@ -469,8 +490,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 + MOVW R4, ret+24(FP) + RET + +-// int64 futex(int32 *uaddr, int32 op, int32 val, +-// struct timespec *timeout, int32 *uaddr2, int32 val2); ++// func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32 + TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0 + MOVV addr+0(FP), R4 + MOVW op+8(FP), R5 +@@ -544,6 +564,7 @@ nog: + SYSCALL + JMP -3(PC) // keep exiting + ++// func sigaltstack(new, old *stackt) + TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 + MOVV new+0(FP), R4 + MOVV old+8(FP), R5 +@@ -554,11 +575,13 @@ TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 + MOVV R0, 0xf1(R0) // crash + RET + ++// func osyield() + TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0 + MOVV $SYS_sched_yield, R11 + SYSCALL + RET + ++// func sched_getaffinity(pid, len uintptr, buf *uintptr) int32 + TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0 + MOVV pid+0(FP), R4 + MOVV len+8(FP), R5 +-- +2.38.0 + diff --git a/loongarch64/0070-runtime-add-support-for-buildmode-c-shared-on-loong6.patch b/loongarch64/0070-runtime-add-support-for-buildmode-c-shared-on-loong6.patch new file mode 100644 index 0000000..75549b0 --- /dev/null +++ b/loongarch64/0070-runtime-add-support-for-buildmode-c-shared-on-loong6.patch @@ -0,0 +1,142 @@ +From 3de68c346496fcf62fb93f975a424ac5b3d8effa Mon Sep 17 00:00:00 2001 +From: limeidan +Date: Mon, 22 Aug 2022 20:22:21 +0800 +Subject: [PATCH 70/82] runtime: add support for --buildmode=c-shared on + loong64 + +Signed-off-by: limeidan +Change-Id: I6885c7ecc3904b8917ca014b54d1b22d18b2d04d +--- + src/runtime/rt0_linux_loong64.s | 51 +++++++++++++++++++++++++++++++++ + src/runtime/tls_loong64.s | 37 ++++++++++++++++++++++-- + 2 files changed, 86 insertions(+), 2 deletions(-) + +diff --git a/src/runtime/rt0_linux_loong64.s b/src/runtime/rt0_linux_loong64.s +index 86885dfa80..96fb4d30c4 100644 +--- a/src/runtime/rt0_linux_loong64.s ++++ b/src/runtime/rt0_linux_loong64.s +@@ -6,6 +6,7 @@ + // +build loong64 + + #include "textflag.h" ++#include "cgo/abi_loong64.h" + + TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0 + // In a statically linked binary, the stack contains argc, +@@ -16,6 +17,56 @@ TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0 + ADDV $8, R3, R5 // argv + JMP main(SB) + ++// When building with -buildmode=c-shared, this symbol is called when the shared ++// library is loaded. ++TEXT _rt0_loong64_linux_lib(SB),NOSPLIT,$232 ++ // Preserve callee-save registers. ++ SAVE_R22_TO_R31(24) ++ SAVE_F24_TO_F31(104) ++ ++ // Initialize g as null in case of using g later e.g. sigaction in cgo_sigaction.go ++ MOVV R0, g ++ ++ MOVV R4, _rt0_loong64_linux_lib_argc<>(SB) ++ MOVV R5, _rt0_loong64_linux_lib_argv<>(SB) ++ ++ // Synchronous initialization. ++ MOVV $runtime·libpreinit(SB), R19 ++ JAL (R19) ++ ++ // Create a new thread to do the runtime initialization and return. ++ MOVV _cgo_sys_thread_create(SB), R19 ++ BEQ R19, nocgo ++ MOVV $_rt0_loong64_linux_lib_go(SB), R4 ++ MOVV $0, R5 ++ JAL (R19) ++ JMP restore ++ ++nocgo: ++ MOVV $0x800000, R4 // stacksize = 8192KB ++ MOVV $_rt0_loong64_linux_lib_go(SB), R5 ++ MOVV R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV $runtime·newosproc0(SB), R19 ++ JAL (R19) ++ ++restore: ++ // Restore callee-save registers. ++ RESTORE_R22_TO_R31(24) ++ RESTORE_F24_TO_F31(104) ++ RET ++ ++TEXT _rt0_loong64_linux_lib_go(SB),NOSPLIT,$0 ++ MOVV _rt0_loong64_linux_lib_argc<>(SB), R4 ++ MOVV _rt0_loong64_linux_lib_argv<>(SB), R5 ++ MOVV $runtime·rt0_go(SB),R19 ++ JMP (R19) ++ ++DATA _rt0_loong64_linux_lib_argc<>(SB)/8, $0 ++GLOBL _rt0_loong64_linux_lib_argc<>(SB),NOPTR, $8 ++DATA _rt0_loong64_linux_lib_argv<>(SB)/8, $0 ++GLOBL _rt0_loong64_linux_lib_argv<>(SB),NOPTR, $8 ++ + TEXT main(SB),NOSPLIT|NOFRAME,$0 + // in external linking, glibc jumps to main with argc in R4 + // and argv in R5 +diff --git a/src/runtime/tls_loong64.s b/src/runtime/tls_loong64.s +index 30627d849b..ee3be31afd 100644 +--- a/src/runtime/tls_loong64.s ++++ b/src/runtime/tls_loong64.s +@@ -12,17 +12,50 @@ + // If !iscgo, this is a no-op. + // + // NOTE: mcall() assumes this clobbers only R30 (REGTMP). +-TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0 ++TEXT runtime·save_g(SB),NOSPLIT,$0-0 + MOVB runtime·iscgo(SB), R30 + BEQ R30, nocgo + ++ // here use the func __tls_get_addr to get the address of tls_g, which clobbers these regs below. ++ ADDV $-56, R3 ++ MOVV R1, 0(R3) ++ MOVV R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ MOVV R12, 32(R3) ++ MOVV R13, 40(R3) ++ MOVV R30, 48(R3) + MOVV g, runtime·tls_g(SB) ++ MOVV 0(R3), R1 ++ MOVV 8(R3), R4 ++ MOVV 16(R3), R5 ++ MOVV 24(R3), R6 ++ MOVV 32(R3), R12 ++ MOVV 40(R3), R13 ++ MOVV 48(R3), R30 ++ ADDV $56, R3 + + nocgo: + RET + +-TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0 ++TEXT runtime·load_g(SB),NOSPLIT,$0-0 ++ ADDV $-56, R3 ++ MOVV R1, 0(R3) ++ MOVV R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ MOVV R12, 32(R3) ++ MOVV R13, 40(R3) ++ MOVV R30, 48(R3) + MOVV runtime·tls_g(SB), g ++ MOVV 0(R3), R1 ++ MOVV 8(R3), R4 ++ MOVV 16(R3), R5 ++ MOVV 24(R3), R6 ++ MOVV 32(R3), R12 ++ MOVV 40(R3), R13 ++ MOVV 48(R3), R30 ++ ADDV $56, R3 + RET + + GLOBL runtime·tls_g(SB), TLSBSS, $8 +-- +2.38.0 + diff --git a/loongarch64/0071-cmd-compile-add-support-for-buildmode-c-shared-on-lo.patch b/loongarch64/0071-cmd-compile-add-support-for-buildmode-c-shared-on-lo.patch new file mode 100644 index 0000000..eb622eb --- /dev/null +++ b/loongarch64/0071-cmd-compile-add-support-for-buildmode-c-shared-on-lo.patch @@ -0,0 +1,899 @@ +From edb2e5a42e5e9e3beb31b9e699286a2f58f5c1e7 Mon Sep 17 00:00:00 2001 +From: limeidan +Date: Mon, 22 Aug 2022 20:23:33 +0800 +Subject: [PATCH 71/82] cmd/compile: add support for --buildmode=c-shared on + loong64 + +Signed-off-by: limeidan +Change-Id: If30bee4d2ecb7055dfb9249dc370aa2f991ae199 +--- + src/cmd/compile/internal/base/flag.go | 2 +- + src/cmd/compile/internal/liveness/plive.go | 2 +- + .../compile/internal/ssa/gen/LOONG64.rules | 80 ++++---- + .../compile/internal/ssa/rewriteLOONG64.go | 190 +++++++++++------- + 4 files changed, 156 insertions(+), 118 deletions(-) + +diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go +index 42c0c1b94b..fa06cac021 100644 +--- a/src/cmd/compile/internal/base/flag.go ++++ b/src/cmd/compile/internal/base/flag.go +@@ -175,7 +175,7 @@ func ParseFlags() { + if Flag.Race && !sys.RaceDetectorSupported(buildcfg.GOOS, buildcfg.GOARCH) { + log.Fatalf("%s/%s does not support -race", buildcfg.GOOS, buildcfg.GOARCH) + } +- if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) { ++ if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) { + log.Fatalf("%s/%s does not support -shared", buildcfg.GOOS, buildcfg.GOARCH) + } + parseSpectre(Flag.Spectre) // left as string for RecordFlags +diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go +index f5c2ef7709..6d2195dc3e 100644 +--- a/src/cmd/compile/internal/liveness/plive.go ++++ b/src/cmd/compile/internal/liveness/plive.go +@@ -521,7 +521,7 @@ func (lv *liveness) markUnsafePoints() { + v = v.Args[0] + continue + } +- case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U: ++ case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpLOONG64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U: + // Args[0] is the address of the write + // barrier control. Ignore Args[1], + // which is the mem operand. +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +index bde427d99c..06337b6242 100644 +--- a/src/cmd/compile/internal/ssa/gen/LOONG64.rules ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -441,65 +441,65 @@ + (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) + + // fold address into load/store +-(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem) +-(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) +-(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem) +-(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) +-(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem) +-(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) +-(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem) +-(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem) +-(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem) +- +-(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) +-(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) +-(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) +-(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) +-(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) +-(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) +-(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) +-(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) +-(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) +-(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) +- +-(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBload [off1+int32(off2)] {sym} ptr mem) ++(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) ++(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHload [off1+int32(off2)] {sym} ptr mem) ++(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) ++(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWload [off1+int32(off2)] {sym} ptr mem) ++(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) ++(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVload [off1+int32(off2)] {sym} ptr mem) ++(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFload [off1+int32(off2)] {sym} ptr mem) ++(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDload [off1+int32(off2)] {sym} ptr mem) ++ ++(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) ++ ++(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +-(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +-(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +-(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +-(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +-(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +-(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +-(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +-(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + + // store zero +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +index 8f3c823049..6b971b676c 100644 +--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -1592,8 +1592,10 @@ func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1604,7 +1606,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBUload) +@@ -1614,7 +1616,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { + return true + } + // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1626,7 +1628,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBUload) +@@ -1677,8 +1679,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1689,7 +1693,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBload) +@@ -1699,7 +1703,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { + return true + } + // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1711,7 +1715,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBload) +@@ -1763,8 +1767,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1776,7 +1782,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBstore) +@@ -1786,7 +1792,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { + return true + } + // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1799,7 +1805,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBstore) +@@ -1915,8 +1921,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1927,7 +1935,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBstorezero) +@@ -1937,7 +1945,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { + return true + } + // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1949,7 +1957,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVBstorezero) +@@ -1963,8 +1971,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1975,7 +1985,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVDload) +@@ -1985,7 +1995,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { + return true + } + // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -1997,7 +2007,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVDload) +@@ -2012,8 +2022,10 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2025,7 +2037,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVDstore) +@@ -2035,7 +2047,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { + return true + } + // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2048,7 +2060,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVDstore) +@@ -2062,8 +2074,10 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2074,7 +2088,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVFload) +@@ -2084,7 +2098,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { + return true + } + // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2096,7 +2110,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVFload) +@@ -2111,8 +2125,10 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2124,7 +2140,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVFstore) +@@ -2134,7 +2150,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { + return true + } + // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2147,7 +2163,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVFstore) +@@ -2161,8 +2177,10 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2173,7 +2191,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHUload) +@@ -2183,7 +2201,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { + return true + } + // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2195,7 +2213,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHUload) +@@ -2268,8 +2286,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2280,7 +2300,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHload) +@@ -2290,7 +2310,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { + return true + } + // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2302,7 +2322,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHload) +@@ -2398,8 +2418,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2411,7 +2433,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHstore) +@@ -2421,7 +2443,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { + return true + } + // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2434,7 +2456,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHstore) +@@ -2516,8 +2538,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2528,7 +2552,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHstorezero) +@@ -2538,7 +2562,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { + return true + } + // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2550,7 +2574,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVHstorezero) +@@ -2564,8 +2588,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2576,7 +2602,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVVload) +@@ -2586,7 +2612,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { + return true + } + // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2598,7 +2624,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVVload) +@@ -2640,8 +2666,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2653,7 +2681,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVVstore) +@@ -2663,7 +2691,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { + return true + } + // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2676,7 +2704,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVVstore) +@@ -2690,8 +2718,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2702,7 +2732,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVVstorezero) +@@ -2712,7 +2742,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { + return true + } + // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2724,7 +2754,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVVstorezero) +@@ -2738,8 +2768,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2750,7 +2782,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWUload) +@@ -2760,7 +2792,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { + return true + } + // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2772,7 +2804,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWUload) +@@ -2867,8 +2899,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2879,7 +2913,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWload) +@@ -2889,7 +2923,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { + return true + } + // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -2901,7 +2935,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWload) +@@ -3030,8 +3064,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -3043,7 +3079,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWstore) +@@ -3053,7 +3089,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { + return true + } + // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -3066,7 +3102,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { + ptr := v_0.Args[0] + val := v_1 + mem := v_2 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWstore) +@@ -3114,8 +3150,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { + func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config + // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) +- // cond: is32Bit(int64(off1)+off2) ++ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -3126,7 +3164,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 +- if !(is32Bit(int64(off1) + off2)) { ++ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWstorezero) +@@ -3136,7 +3174,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { + return true + } + // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) +- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) +@@ -3148,7 +3186,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 +- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpLOONG64MOVWstorezero) +-- +2.38.0 + diff --git a/loongarch64/0072-cmd-internal-obj-loong64-cmd-internal-objabi-add-c-s.patch b/loongarch64/0072-cmd-internal-obj-loong64-cmd-internal-objabi-add-c-s.patch new file mode 100644 index 0000000..7a1ecb8 --- /dev/null +++ b/loongarch64/0072-cmd-internal-obj-loong64-cmd-internal-objabi-add-c-s.patch @@ -0,0 +1,323 @@ +From 0e946b940ca5b6ff447d625855a75b32f1856d5a Mon Sep 17 00:00:00 2001 +From: limeidan +Date: Mon, 22 Aug 2022 20:24:59 +0800 +Subject: [PATCH 72/82] cmd/internal/obj/loong64, cmd/internal/objabi: add + c-shared relocations on loong64 + +Signed-off-by: limeidan +Change-Id: I9e8ca663813662f89646e455eb42192b3c149881 +--- + src/cmd/internal/obj/loong64/a.out.go | 4 +- + src/cmd/internal/obj/loong64/asm.go | 127 ++++++++++++++++++-- + src/cmd/internal/obj/loong64/cnames.go | 4 +- + src/cmd/internal/objabi/reloctype.go | 13 ++ + src/cmd/internal/objabi/reloctype_string.go | 26 ++-- + 5 files changed, 149 insertions(+), 25 deletions(-) + +diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go +index c2825108e5..e02cae7c75 100644 +--- a/src/cmd/internal/obj/loong64/a.out.go ++++ b/src/cmd/internal/obj/loong64/a.out.go +@@ -225,7 +225,9 @@ const ( + C_LOREG + C_GOK + C_ADDR +- C_TLS ++ C_GOTADDR ++ C_TLS_LE ++ C_TLS_GD + C_TEXTSIZE + + C_NCLASS // must be the last +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +index 66265690e3..14e6f05da1 100644 +--- a/src/cmd/internal/obj/loong64/asm.go ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -155,11 +155,11 @@ var optab = []Optab{ + {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, + {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, +- {AMOVW, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, +- {AMOVWU, C_REG, C_NONE, C_TLS, 53, 16, 0, sys.Loong64, 0}, +- {AMOVV, C_REG, C_NONE, C_TLS, 53, 16, 0, sys.Loong64, 0}, +- {AMOVB, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, +- {AMOVBU, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, ++ {AMOVW, C_REG, C_NONE, C_TLS_LE, 53, 16, 0, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_TLS_LE, 53, 16, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_TLS_LE, 53, 16, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_TLS_LE, 53, 16, 0, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_TLS_LE, 53, 16, 0, 0, 0}, + + {AMOVW, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, + {AMOVWU, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, +@@ -184,11 +184,11 @@ var optab = []Optab{ + {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, + {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, 0, 0}, + {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, +- {AMOVW, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, +- {AMOVWU, C_TLS, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, +- {AMOVV, C_TLS, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, +- {AMOVB, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, +- {AMOVBU, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ {AMOVW, C_TLS_LE, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ {AMOVWU, C_TLS_LE, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, ++ {AMOVV, C_TLS_LE, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, ++ {AMOVB, C_TLS_LE, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ {AMOVBU, C_TLS_LE, C_NONE, C_REG, 54, 16, 0, 0, 0}, + + {AMOVW, C_SECON, C_NONE, C_REG, 3, 4, 0, sys.Loong64, 0}, + {AMOVV, C_SECON, C_NONE, C_REG, 3, 4, 0, sys.Loong64, 0}, +@@ -269,6 +269,8 @@ var optab = []Optab{ + {AJMP, C_NONE, C_NONE, C_ZOREG, 18, 4, REGZERO, 0, 0}, // jirl r0, rj, 0 + {AJAL, C_NONE, C_NONE, C_ZOREG, 18, 4, REGLINK, 0, 0}, // jirl r1, rj, 0 + ++ {AMOVV, C_GOTADDR, C_NONE, C_REG, 63, 12, 0, sys.Loong64, 0}, ++ + {AMOVW, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, + {AMOVF, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, + {AMOVD, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, +@@ -325,6 +327,18 @@ var optab = []Optab{ + {AMOVW, C_ADDCON, C_NONE, C_FREG, 34, 8, 0, sys.Loong64, 0}, + {AMOVW, C_ANDCON, C_NONE, C_FREG, 34, 8, 0, sys.Loong64, 0}, + ++ {AMOVB, C_REG, C_NONE, C_TLS_GD, 56, 24, 0, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_TLS_GD, 56, 24, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_TLS_GD, 56, 24, 0, sys.Loong64, 0}, ++ {AMOVBU, C_REG, C_NONE, C_TLS_GD, 56, 24, 0, sys.Loong64, 0}, ++ {AMOVWU, C_REG, C_NONE, C_TLS_GD, 56, 24, 0, sys.Loong64, 0}, ++ ++ {AMOVB, C_TLS_GD, C_NONE, C_REG, 57, 24, 0, sys.Loong64, 0}, ++ {AMOVW, C_TLS_GD, C_NONE, C_REG, 57, 24, 0, sys.Loong64, 0}, ++ {AMOVV, C_TLS_GD, C_NONE, C_REG, 57, 24, 0, sys.Loong64, 0}, ++ {AMOVBU, C_TLS_GD, C_NONE, C_REG, 57, 24, 0, sys.Loong64, 0}, ++ {AMOVWU, C_TLS_GD, C_NONE, C_REG, 57, 24, 0, sys.Loong64, 0}, ++ + {AWORD, C_LCON, C_NONE, C_NONE, 40, 4, 0, 0, 0}, + {AWORD, C_DCON, C_NONE, C_NONE, 61, 4, 0, 0, 0}, + +@@ -459,7 +473,7 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + + bp := c.cursym.P + var i int32 +- var out [5]uint32 ++ var out [6]uint32 + for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { + c.pc = p.Pc + o = c.oplook(p) +@@ -542,12 +556,19 @@ func (c *ctxt0) aclass(a *obj.Addr) int { + c.instoffset = a.Offset + if a.Sym != nil { // use relocation + if a.Sym.Type == objabi.STLSBSS { +- return C_TLS ++ if c.ctxt.Flag_shared { ++ return C_TLS_GD ++ } else { ++ return C_TLS_LE ++ } + } + return C_ADDR + } + return C_LEXT + ++ case obj.NAME_GOTREF: ++ return C_GOTADDR ++ + case obj.NAME_AUTO: + if a.Reg == REGSP { + // unset base register for better printing, since +@@ -1101,6 +1122,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { + o3 := uint32(0) + o4 := uint32(0) + o5 := uint32(0) ++ o6 := uint32(0) + + add := AADDU + add = AADDVU +@@ -1578,6 +1600,70 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { + rel2.Type = objabi.R_ADDRLOONG64TLS + o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(p.To.Reg)) + ++ case 56: // mov r, tlsvar GD model ==> (pcaddu12i + ld.d)__tls_get_addr + (pcaddu12i + addi.d)tlsvar@got + jirl + st.d ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = c.ctxt.Lookup("__tls_get_addr") ++ rel.Add = 0x0 ++ rel.Type = objabi.R_LOONG64_GOTPCREL_HI ++ o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc+4) ++ rel2.Siz = 4 ++ rel2.Sym = c.ctxt.Lookup("__tls_get_addr") ++ rel2.Add = 0x0 ++ rel2.Type = objabi.R_LOONG64_GOTPCREL_LO ++ o3 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REG_R4)) ++ rel3 := obj.Addrel(c.cursym) ++ rel3.Off = int32(c.pc+8) ++ rel3.Siz = 4 ++ rel3.Sym = p.To.Sym ++ rel3.Add = 0x0 ++ rel3.Type = objabi.R_LOONG64_TLS_GD_PCREL_HI ++ o4 = OP_12IRR(c.opirr(AADDV), uint32(0), uint32(REG_R4), uint32(REG_R4)) ++ rel4 := obj.Addrel(c.cursym) ++ rel4.Off = int32(c.pc+12) ++ rel4.Siz = 4 ++ rel4.Sym = p.To.Sym ++ rel4.Add = 0x0 ++ rel4.Type = objabi.R_LOONG64_TLS_GD_PCREL_LO ++ o5 = OP_16IRR(c.opirr(AJIRL), uint32(0), uint32(REGTMP), uint32(REGLINK)) ++ o6 = OP_12IRR(c.opirr(p.As), uint32(0), uint32(REG_R4), uint32(p.From.Reg)) ++ ++ case 57: // mov tlsvar, r GD model ==> (pcaddu12i + ld.d)__tls_get_addr + (pcaddu12i + addi.d)tlsvar@got + jirl + ld.d ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = c.ctxt.Lookup("__tls_get_addr") ++ rel.Add = 0x0 ++ rel.Type = objabi.R_LOONG64_GOTPCREL_HI ++ o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc+4) ++ rel2.Siz = 4 ++ rel2.Sym = c.ctxt.Lookup("__tls_get_addr") ++ rel2.Add = 0x0 ++ rel2.Type = objabi.R_LOONG64_GOTPCREL_LO ++ o3 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REG_R4)) ++ rel3 := obj.Addrel(c.cursym) ++ rel3.Off = int32(c.pc+8) ++ rel3.Siz = 4 ++ rel3.Sym = p.From.Sym ++ rel3.Type = objabi.R_LOONG64_TLS_GD_PCREL_HI ++ rel3.Add = 0x0 ++ o4 = OP_12IRR(c.opirr(AADDV), uint32(0), uint32(REG_R4), uint32(REG_R4)) ++ rel4 := obj.Addrel(c.cursym) ++ rel4.Off = int32(c.pc+12) ++ rel4.Siz = 4 ++ rel4.Sym = p.From.Sym ++ rel4.Type = objabi.R_LOONG64_TLS_GD_PCREL_LO ++ rel4.Add = 0x0 ++ o5 = OP_16IRR(c.opirr(AJIRL), uint32(0), uint32(REGTMP), uint32(REGLINK)) ++ o6 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REG_R4), uint32(p.To.Reg)) ++ + case 59: // mov $dcon,r + // NOTE: this case does not use REGTMP. If it ever does, + // remove the NOTUSETMP flag in optab. +@@ -1605,6 +1691,22 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { + + case 62: // rdtimex rd, rj + o1 = OP_RR(c.oprr(p.As), uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 63: // mov sym@GOT, r ==> pcaddu12i + ld.d ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Type = objabi.R_LOONG64_GOTPCREL_HI ++ rel.Add = 0x0 ++ o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc+4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Type = objabi.R_LOONG64_GOTPCREL_LO ++ rel2.Add = 0x0 + } + + out[0] = o1 +@@ -1612,6 +1714,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { + out[2] = o3 + out[3] = o4 + out[4] = o5 ++ out[5] = o6 + } + + func (c *ctxt0) vregoff(a *obj.Addr) int64 { +diff --git a/src/cmd/internal/obj/loong64/cnames.go b/src/cmd/internal/obj/loong64/cnames.go +index e757e242ee..c594ac97a3 100644 +--- a/src/cmd/internal/obj/loong64/cnames.go ++++ b/src/cmd/internal/obj/loong64/cnames.go +@@ -37,7 +37,9 @@ var cnames0 = []string{ + "LOREG", + "GOK", + "ADDR", +- "TLS", ++ "GOTADDR", ++ "TLS_LE", ++ "TLS_GD", + "TEXTSIZE", + "NCLASS", + } +diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go +index 99bc5f7a68..d5bc57e782 100644 +--- a/src/cmd/internal/objabi/reloctype.go ++++ b/src/cmd/internal/objabi/reloctype.go +@@ -247,6 +247,19 @@ const ( + // TODO(mundaym): remove once variants can be serialized - see issue 14218. + R_PCRELDBL + ++ // Loong64. ++ ++ // R_LOONG64_TLS_GD_PCREL_HI and R_LOONG64_TLS_GD_PCREL_LO relocates an pcaddu12i, addi.d pair to compute ++ // the address of the GOT slot of the tls symbol, the address will be passed to __tls_get_addr to ++ // get the true address of tlsvar. ++ R_LOONG64_TLS_GD_PCREL_HI ++ R_LOONG64_TLS_GD_PCREL_LO ++ ++ // R_LOONG64_GOTPCREL_HI and R_LOONG64_GOTPCREL_LO relocates an pcaddu12i, addi.d pair to compute ++ // the address of the GOT slot of the referenced symbol. ++ R_LOONG64_GOTPCREL_HI ++ R_LOONG64_GOTPCREL_LO ++ + // R_ADDRMIPSU (only used on mips/mips64) resolves to the sign-adjusted "upper" 16 + // bits (bit 16-31) of an external address, by encoding it into the instruction. + R_ADDRMIPSU +diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go +index 9d34a0f7d5..30fe39ba56 100644 +--- a/src/cmd/internal/objabi/reloctype_string.go ++++ b/src/cmd/internal/objabi/reloctype_string.go +@@ -1,4 +1,4 @@ +-// Code generated by "stringer -type RelocType reloctype.go"; DO NOT EDIT. ++// Code generated by "stringer -type=RelocType cmd/internal/objabi/reloctype.go"; DO NOT EDIT. + + package objabi + +@@ -66,19 +66,23 @@ func _() { + _ = x[R_RISCV_TLS_IE_ITYPE-56] + _ = x[R_RISCV_TLS_IE_STYPE-57] + _ = x[R_PCRELDBL-58] +- _ = x[R_ADDRMIPSU-59] +- _ = x[R_ADDRLOONG64U-60] +- _ = x[R_ADDRMIPSTLS-61] +- _ = x[R_ADDRLOONG64TLS-62] +- _ = x[R_ADDRLOONG64TLSU-63] +- _ = x[R_ADDRCUOFF-64] +- _ = x[R_WASMIMPORT-65] +- _ = x[R_XCOFFREF-66] ++ _ = x[R_LOONG64_TLS_GD_PCREL_HI-59] ++ _ = x[R_LOONG64_TLS_GD_PCREL_LO-60] ++ _ = x[R_LOONG64_GOTPCREL_HI-61] ++ _ = x[R_LOONG64_GOTPCREL_LO-62] ++ _ = x[R_ADDRMIPSU-63] ++ _ = x[R_ADDRLOONG64U-64] ++ _ = x[R_ADDRMIPSTLS-65] ++ _ = x[R_ADDRLOONG64TLS-66] ++ _ = x[R_ADDRLOONG64TLSU-67] ++ _ = x[R_ADDRCUOFF-68] ++ _ = x[R_WASMIMPORT-69] ++ _ = x[R_XCOFFREF-70] + } + +-const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDRLOONG64R_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CALLLOONG64R_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_JMPLOONG64R_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRLOONG64UR_ADDRMIPSTLSR_ADDRLOONG64TLSR_ADDRLOONG64TLSUR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" ++const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDRLOONG64R_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CALLLOONG64R_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_JMPLOONG64R_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_LOONG64_TLS_GD_PCREL_HIR_LOONG64_TLS_GD_PCREL_LOR_LOONG64_GOTPCREL_HIR_LOONG64_GOTPCREL_LOR_ADDRMIPSUR_ADDRLOONG64UR_ADDRMIPSTLSR_ADDRLOONG64TLSR_ADDRLOONG64TLSUR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" + +-var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 51, 60, 66, 72, 81, 92, 101, 112, 122, 133, 146, 153, 160, 168, 176, 184, 190, 196, 202, 212, 221, 231, 247, 258, 264, 275, 285, 294, 306, 319, 333, 347, 361, 377, 388, 401, 414, 428, 442, 456, 471, 485, 499, 510, 524, 539, 556, 574, 595, 614, 633, 653, 673, 683, 694, 708, 721, 737, 754, 765, 777, 787} ++var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 51, 60, 66, 72, 81, 92, 101, 112, 122, 133, 146, 153, 160, 168, 176, 184, 190, 196, 202, 212, 221, 231, 247, 258, 264, 275, 285, 294, 306, 319, 333, 347, 361, 377, 388, 401, 414, 428, 442, 456, 471, 485, 499, 510, 524, 539, 556, 574, 595, 614, 633, 653, 673, 683, 708, 733, 754, 775, 786, 800, 813, 829, 846, 857, 869, 879} + + func (i RelocType) String() string { + i -= 1 +-- +2.38.0 + diff --git a/loongarch64/0073-cmd-link-add-support-for-buildmode-c-shared-on-loong.patch b/loongarch64/0073-cmd-link-add-support-for-buildmode-c-shared-on-loong.patch new file mode 100644 index 0000000..287038a --- /dev/null +++ b/loongarch64/0073-cmd-link-add-support-for-buildmode-c-shared-on-loong.patch @@ -0,0 +1,264 @@ +From 399b5099b321f490607db5887ada6cfdbb5ab996 Mon Sep 17 00:00:00 2001 +From: limeidan +Date: Mon, 22 Aug 2022 20:26:59 +0800 +Subject: [PATCH 73/82] cmd/link: add support for --buildmode=c-shared on + loong64 + +Signed-off-by: limeidan +Change-Id: I0eef9d7451159b422a54408a9ab169af301d1d2a +--- + src/cmd/link/internal/ld/config.go | 2 +- + src/cmd/link/internal/ld/lib.go | 2 +- + src/cmd/link/internal/loong64/asm.go | 177 ++++++++++++++++++++++++++- + 3 files changed, 175 insertions(+), 6 deletions(-) + +diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go +index c15aac8b6a..09e032b012 100644 +--- a/src/cmd/link/internal/ld/config.go ++++ b/src/cmd/link/internal/ld/config.go +@@ -74,7 +74,7 @@ func (mode *BuildMode) Set(s string) error { + *mode = BuildModeCArchive + case "c-shared": + switch buildcfg.GOARCH { +- case "386", "amd64", "arm", "arm64", "ppc64le", "s390x": ++ case "386", "amd64", "arm", "arm64", "loong64", "ppc64le", "s390x": + default: + return badmode() + } +diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go +index 7bd6193693..074c07d185 100644 +--- a/src/cmd/link/internal/ld/lib.go ++++ b/src/cmd/link/internal/ld/lib.go +@@ -794,7 +794,7 @@ func (ctxt *Link) linksetup() { + ctxt.loader.SetAttrReachable(moduledata, true) + ctxt.Moduledata = moduledata + +- if ctxt.Arch == sys.Arch386 && ctxt.HeadType != objabi.Hwindows { ++ if (ctxt.Arch == sys.Arch386 || ctxt.Arch == sys.ArchLoong64) && ctxt.HeadType != objabi.Hwindows { + if (ctxt.BuildMode == BuildModeCArchive && ctxt.IsELF) || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE || ctxt.DynlinkingGo() { + got := ctxt.loader.LookupOrCreateSym("_GLOBAL_OFFSET_TABLE_", 0) + sb := ctxt.loader.MakeSymbolUpdater(got) +diff --git a/src/cmd/link/internal/loong64/asm.go b/src/cmd/link/internal/loong64/asm.go +index 9c264311c4..c45971d50d 100644 +--- a/src/cmd/link/internal/loong64/asm.go ++++ b/src/cmd/link/internal/loong64/asm.go +@@ -11,14 +11,12 @@ import ( + "cmd/link/internal/loader" + "cmd/link/internal/sym" + "debug/elf" +- "log" + ) + + func gentext(ctxt *ld.Link, ldr *loader.Loader) {} + + func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { +- log.Fatalf("adddynrel not implemented") +- return false ++ return true + } + + func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { +@@ -87,6 +85,167 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, + out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)) + out.Write64(uint64(0x0)) + ++ case objabi.R_LOONG64_TLS_GD_PCREL_HI: ++ symgot := ld.ElfSymForReloc(ctxt, ldr.LookupOrCreateSym("_GLOBAL_OFFSET_TABLE_", 0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(symgot)<<32) ++ out.Write64(uint64(0x800)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_TLS_GD) | uint64(elfsym)<<32) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_ADD)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_5_20)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_LOONG64_TLS_GD_PCREL_LO: ++ symgot := ld.ElfSymForReloc(ctxt, ldr.LookupOrCreateSym("_GLOBAL_OFFSET_TABLE_", 0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(symgot)<<32) ++ out.Write64(uint64(0x4)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_TLS_GD) | uint64(elfsym)<<32) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_ADD)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(symgot)<<32) ++ out.Write64(uint64(0x804)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_TLS_GD) | uint64(elfsym)<<32) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_ADD)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SL)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SUB)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_10_12)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_LOONG64_GOTPCREL_HI: ++ symgot := ld.ElfSymForReloc(ctxt, ldr.LookupOrCreateSym("_GLOBAL_OFFSET_TABLE_", 0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(symgot)<<32) ++ out.Write64(uint64(0x800)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_GPREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_ADD)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_5_20)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_LOONG64_GOTPCREL_LO: ++ symgot := ld.ElfSymForReloc(ctxt, ldr.LookupOrCreateSym("_GLOBAL_OFFSET_TABLE_", 0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(symgot)<<32) ++ out.Write64(uint64(0x4)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_GPREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_ADD)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(symgot)<<32) ++ out.Write64(uint64(0x804)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_GPREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_ADD)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SL)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SUB)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_10_12)) ++ out.Write64(uint64(0x0)) ++ ++ // The pcaddu12i + addi.d instructions is used to obtain address of a symbol on Loong64. ++ // The low 12-bit of the symbol address need to be added. The addi.d instruction have ++ // signed 12-bit immediate operand. The 0x800 (addr+U12 <=> addr+0x800+S12) is introduced ++ // to do sign extending from 12 bits. The 0x804 is 0x800 + 4, 4 is instruction bit ++ // width on Loong64 and is used to correct the PC of the addi.d instruction. + case objabi.R_ADDRLOONG64: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(elfsym)<<32) +@@ -179,6 +338,12 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade + nExtReloc = 2 + } + return val, nExtReloc, true ++ case objabi.R_LOONG64_TLS_GD_PCREL_HI, ++ objabi.R_LOONG64_GOTPCREL_HI: ++ return val, 6, true ++ case objabi.R_LOONG64_TLS_GD_PCREL_LO, ++ objabi.R_LOONG64_GOTPCREL_LO: ++ return val, 12, true + } + } + +@@ -222,6 +387,8 @@ func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant + func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { + switch r.Type() { + case objabi.R_ADDRLOONG64, ++ objabi.R_LOONG64_GOTPCREL_HI, ++ objabi.R_LOONG64_GOTPCREL_LO, + objabi.R_ADDRLOONG64U: + return ld.ExtrelocViaOuterSym(ldr, r, s), true + +@@ -230,7 +397,9 @@ func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sy + objabi.R_CONST, + objabi.R_GOTOFF, + objabi.R_CALLLOONG64, +- objabi.R_JMPLOONG64: ++ objabi.R_JMPLOONG64, ++ objabi.R_LOONG64_TLS_GD_PCREL_HI, ++ objabi.R_LOONG64_TLS_GD_PCREL_LO: + return ld.ExtrelocSimple(ldr, r), true + } + return loader.ExtReloc{}, false +-- +2.38.0 + diff --git a/loongarch64/0074-cmd-internal-sys-enable-c-shared-feature-on-loong64.patch b/loongarch64/0074-cmd-internal-sys-enable-c-shared-feature-on-loong64.patch new file mode 100644 index 0000000..3f43340 --- /dev/null +++ b/loongarch64/0074-cmd-internal-sys-enable-c-shared-feature-on-loong64.patch @@ -0,0 +1,27 @@ +From 7635386efe33c9b9fe869446eab031faa09381e7 Mon Sep 17 00:00:00 2001 +From: limeidan +Date: Mon, 22 Aug 2022 20:24:21 +0800 +Subject: [PATCH 74/82] cmd/internal/sys: enable c-shared feature on loong64 + +Signed-off-by: limeidan +Change-Id: Ie694a67cdbd5e63026c9118c823fbc8e2a2a2225 +--- + src/cmd/internal/sys/supported.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go +index 0d2bad9612..4c7b8062d8 100644 +--- a/src/cmd/internal/sys/supported.go ++++ b/src/cmd/internal/sys/supported.go +@@ -70,7 +70,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { + + case "c-shared": + switch platform { +- case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/ppc64le", "linux/s390x", ++ case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/loong64", "linux/ppc64le", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386", + "freebsd/amd64", + "darwin/amd64", "darwin/arm64", +-- +2.38.0 + diff --git a/loongarch64/0075-cmd-dist-misc-cgo-testcshared-enable-c-shared-test-o.patch b/loongarch64/0075-cmd-dist-misc-cgo-testcshared-enable-c-shared-test-o.patch new file mode 100644 index 0000000..2c3155f --- /dev/null +++ b/loongarch64/0075-cmd-dist-misc-cgo-testcshared-enable-c-shared-test-o.patch @@ -0,0 +1,56 @@ +From 35849e3947f4be1e3a575f99958ad3842bf9390a Mon Sep 17 00:00:00 2001 +From: limeidan +Date: Thu, 25 Aug 2022 17:23:29 +0800 +Subject: [PATCH 75/82] cmd/dist, misc/cgo/testcshared: enable c-shared test on + loong64 + +Signed-off-by: limeidan +Change-Id: Idef995fe8be1a466369bd7332259846f2f147884 +--- + misc/cgo/testcshared/testdata/libgo2/dup2.go | 2 +- + misc/cgo/testcshared/testdata/libgo2/dup3.go | 2 +- + src/cmd/dist/test.go | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/misc/cgo/testcshared/testdata/libgo2/dup2.go b/misc/cgo/testcshared/testdata/libgo2/dup2.go +index d18f0b130d..1f3db81227 100644 +--- a/misc/cgo/testcshared/testdata/libgo2/dup2.go ++++ b/misc/cgo/testcshared/testdata/libgo2/dup2.go +@@ -2,7 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-// +build darwin dragonfly freebsd linux,!arm64 netbsd openbsd ++// +build darwin dragonfly freebsd linux,!arm64,!loong64 netbsd openbsd + + package main + +diff --git a/misc/cgo/testcshared/testdata/libgo2/dup3.go b/misc/cgo/testcshared/testdata/libgo2/dup3.go +index c9c65a6e3c..cf19b76ce8 100644 +--- a/misc/cgo/testcshared/testdata/libgo2/dup3.go ++++ b/misc/cgo/testcshared/testdata/libgo2/dup3.go +@@ -2,7 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-// +build linux,arm64 ++// +build linux,arm64 linux,loong64 + + package main + +diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go +index 485fe6b17b..1d68b8a70d 100644 +--- a/src/cmd/dist/test.go ++++ b/src/cmd/dist/test.go +@@ -1053,7 +1053,7 @@ func (t *tester) supportedBuildmode(mode string) bool { + return false + case "c-shared": + switch pair { +- case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x", ++ case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-loong64", "linux-ppc64le", "linux-s390x", + "darwin-amd64", "darwin-arm64", + "freebsd-amd64", + "android-arm", "android-arm64", "android-386", +-- +2.38.0 + diff --git a/loongarch64/0076-cmd-internal-obj-loong64-add-MASKEQZ-and-MASKNEZ-ins.patch b/loongarch64/0076-cmd-internal-obj-loong64-add-MASKEQZ-and-MASKNEZ-ins.patch new file mode 100644 index 0000000..11f0747 --- /dev/null +++ b/loongarch64/0076-cmd-internal-obj-loong64-add-MASKEQZ-and-MASKNEZ-ins.patch @@ -0,0 +1,97 @@ +From 07dee28105fcf870e05930f346c5630183a227dd Mon Sep 17 00:00:00 2001 +From: Wayne Zuo +Date: Sat, 9 Jul 2022 11:28:50 +0800 +Subject: [PATCH 76/82] cmd/internal/obj/loong64: add MASKEQZ and MASKNEZ + instructions support + +Change-Id: Ied16c3be47c863a94d46bd568191057ded4b7d0a +Reviewed-on: https://go-review.googlesource.com/c/go/+/416734 +TryBot-Result: Gopher Robot +Run-TryBot: Wayne Zuo +Reviewed-by: David Chase +Reviewed-by: Ian Lance Taylor +Reviewed-by: xiaodong liu +--- + src/cmd/asm/internal/asm/testdata/loong64enc1.s | 3 +++ + src/cmd/internal/obj/loong64/a.out.go | 3 +++ + src/cmd/internal/obj/loong64/anames.go | 2 ++ + src/cmd/internal/obj/loong64/asm.go | 8 ++++++++ + 4 files changed, 16 insertions(+) + +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +index 1eced6b4df..ae143988a1 100644 +--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +@@ -180,6 +180,9 @@ lable2: + SRLV $32, R4, R5 // 85804500 + SRLV $32, R4 // 84804500 + ++ MASKEQZ R4, R5, R6 // a6101300 ++ MASKNEZ R4, R5, R6 // a6901300 ++ + MOVFD F4, F5 // 85241901 + MOVDF F4, F5 // 85181901 + MOVWF F4, F5 // 85101d01 +diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go +index e02cae7c75..55ba8797a4 100644 +--- a/src/cmd/internal/obj/loong64/a.out.go ++++ b/src/cmd/internal/obj/loong64/a.out.go +@@ -353,6 +353,9 @@ const ( + + AXOR + ++ AMASKEQZ ++ AMASKNEZ ++ + // 64-bit + AMOVV + AMOVVL +diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go +index 10555af6df..39e612e6fe 100644 +--- a/src/cmd/internal/obj/loong64/anames.go ++++ b/src/cmd/internal/obj/loong64/anames.go +@@ -99,6 +99,8 @@ var Anames = []string{ + "TNE", + "WORD", + "XOR", ++ "MASKEQZ", ++ "MASKNEZ", + "MOVV", + "MOVVL", + "MOVVR", +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +index 14e6f05da1..149d025d3a 100644 +--- a/src/cmd/internal/obj/loong64/asm.go ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -68,6 +68,7 @@ var optab = []Optab{ + {AAND, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, + {ANEGW, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, + {ANEGV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AMASKEQZ, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, + + {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, + {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0}, +@@ -1066,6 +1067,9 @@ func buildop(ctxt *obj.Link) { + + case ATEQ: + opset(ATNE, r0) ++ ++ case AMASKEQZ: ++ opset(AMASKNEZ, r0) + } + } + } +@@ -1737,6 +1741,10 @@ func (c *ctxt0) oprrr(a obj.As) uint32 { + return 0x24 << 15 // SLT + case ASGTU: + return 0x25 << 15 // SLTU ++ case AMASKEQZ: ++ return 0x26 << 15 ++ case AMASKNEZ: ++ return 0x27 << 15 + case AAND: + return 0x29 << 15 + case AOR: +-- +2.38.0 + diff --git a/loongarch64/0077-cmd-internal-obj-loong64-add-ROTR-ROTRV-instructions.patch b/loongarch64/0077-cmd-internal-obj-loong64-add-ROTR-ROTRV-instructions.patch new file mode 100644 index 0000000..e3742df --- /dev/null +++ b/loongarch64/0077-cmd-internal-obj-loong64-add-ROTR-ROTRV-instructions.patch @@ -0,0 +1,157 @@ +From 0f5a966e95eb277d4797f0f1277bb37b9ad0c6f2 Mon Sep 17 00:00:00 2001 +From: Wayne Zuo +Date: Tue, 9 Aug 2022 23:08:57 +0800 +Subject: [PATCH 77/82] cmd/internal/obj/loong64: add ROTR, ROTRV instructions + support + +Reference: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html + +Change-Id: I29adb84eb70bffd963c79ed6957a5197896fb2bf +Reviewed-on: https://go-review.googlesource.com/c/go/+/422316 +Run-TryBot: Wayne Zuo +Reviewed-by: Ian Lance Taylor +TryBot-Result: Gopher Robot +Reviewed-by: David Chase +--- + src/cmd/asm/internal/asm/testdata/loong64enc1.s | 8 ++++++++ + src/cmd/internal/obj/loong64/a.out.go | 2 ++ + src/cmd/internal/obj/loong64/anames.go | 2 ++ + src/cmd/internal/obj/loong64/asm.go | 14 +++++++++++++- + 4 files changed, 25 insertions(+), 1 deletion(-) + +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +index ae143988a1..7e34cc056d 100644 +--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +@@ -41,8 +41,12 @@ lable2: + SRL R4, R5, R6 // a6901700 + SRA R4, R5 // a5101800 + SRA R4, R5, R6 // a6101800 ++ ROTR R4, R5 // a5101b00 ++ ROTR R4, R5, R6 // a6101b00 + SLLV R4, R5 // a5901800 + SLLV R4, R5, R6 // a6901800 ++ ROTRV R4, R5 // a5901b00 ++ ROTRV R4, R5, R6 // a6901b00 + CLO R4, R5 // 85100000 + CLZ R4, R5 // 85140000 + ADDF F4, F5 // a5900001 +@@ -102,8 +106,12 @@ lable2: + SRL $4, R4 // 84904400 + SRA $4, R4, R5 // 85904800 + SRA $4, R4 // 84904800 ++ ROTR $4, R4, R5 // 85904c00 ++ ROTR $4, R4 // 84904c00 + SLLV $4, R4, R5 // 85104100 + SLLV $4, R4 // 84104100 ++ ROTRV $4, R4, R5 // 85104d00 ++ ROTRV $4, R4 // 84104d00 + SYSCALL // 00002b00 + BEQ R4, R5, 1(PC) // 85040058 + BEQ R4, 1(PC) // 80040058 +diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go +index 55ba8797a4..4fb4c91eea 100644 +--- a/src/cmd/internal/obj/loong64/a.out.go ++++ b/src/cmd/internal/obj/loong64/a.out.go +@@ -337,6 +337,7 @@ const ( + AFSQRTS + ASRA + ASRL ++ AROTR + ASUB + ASUBD + ASUBF +@@ -364,6 +365,7 @@ const ( + ASLLV + ASRAV + ASRLV ++ AROTRV + ADIVV + ADIVVU + +diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go +index 39e612e6fe..438261d813 100644 +--- a/src/cmd/internal/obj/loong64/anames.go ++++ b/src/cmd/internal/obj/loong64/anames.go +@@ -88,6 +88,7 @@ var Anames = []string{ + "FSQRTS", + "SRA", + "SRL", ++ "ROTR", + "SUB", + "SUBD", + "SUBF", +@@ -107,6 +108,7 @@ var Anames = []string{ + "SLLV", + "SRAV", + "SRLV", ++ "ROTRV", + "DIVV", + "DIVVU", + "REMV", +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +index 149d025d3a..0d40ec1f39 100644 +--- a/src/cmd/internal/obj/loong64/asm.go ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -1002,10 +1002,12 @@ func buildop(ctxt *obj.Link) { + case ASLL: + opset(ASRL, r0) + opset(ASRA, r0) ++ opset(AROTR, r0) + + case ASLLV: + opset(ASRAV, r0) + opset(ASRLV, r0) ++ opset(AROTRV, r0) + + case ASUB: + opset(ASUBU, r0) +@@ -1763,12 +1765,16 @@ func (c *ctxt0) oprrr(a obj.As) uint32 { + return 0x2f << 15 + case ASRA: + return 0x30 << 15 ++ case AROTR: ++ return 0x36 << 15 + case ASLLV: + return 0x31 << 15 + case ASRLV: + return 0x32 << 15 + case ASRAV: + return 0x33 << 15 ++ case AROTRV: ++ return 0x37 << 15 + case AADDV: + return 0x21 << 15 + case AADDVU: +@@ -1961,6 +1967,8 @@ func (c *ctxt0) opirr(a obj.As) uint32 { + return 0x00089 << 15 + case ASRA: + return 0x00091 << 15 ++ case AROTR: ++ return 0x00099 << 15 + case AADDV: + return 0x00b << 22 + case AADDVU: +@@ -2055,6 +2063,9 @@ func (c *ctxt0) opirr(a obj.As) uint32 { + case ASRAV, + -ASRAV: + return 0x0049 << 16 ++ case AROTRV, ++ -AROTRV: ++ return 0x004d << 16 + case -ALL: + return 0x020 << 24 + case -ALLV: +@@ -2077,7 +2088,8 @@ func vshift(a obj.As) bool { + switch a { + case ASLLV, + ASRLV, +- ASRAV: ++ ASRAV, ++ AROTRV: + return true + } + return false +-- +2.38.0 + diff --git a/loongarch64/0078-cmd-compile-intrinsify-Add64-on-loong64.patch b/loongarch64/0078-cmd-compile-intrinsify-Add64-on-loong64.patch new file mode 100644 index 0000000..d0fd8ea --- /dev/null +++ b/loongarch64/0078-cmd-compile-intrinsify-Add64-on-loong64.patch @@ -0,0 +1,277 @@ +From 271c062886046ebf31b9895a4da8ab2356b446ce Mon Sep 17 00:00:00 2001 +From: Wayne Zuo +Date: Tue, 6 Sep 2022 22:12:16 +0800 +Subject: [PATCH 78/82] cmd/compile: intrinsify Add64 on loong64 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This is a follow up of CL 420094 on loong64. + +Reduce go toolchain size slightly on linux/loong64. + +compilecmp HEAD~1 -> HEAD +HEAD~1 (8a32354219): internal/trace: use strings.Builder +HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 +platform: linux/loong64 + +file before after Δ % +addr2line 3882616 3882536 -80 -0.002% +api 5528866 5528450 -416 -0.008% +asm 5133780 5133796 +16 +0.000% +cgo 4668787 4668491 -296 -0.006% +compile 25163409 25164729 +1320 +0.005% +cover 4658055 4658007 -48 -0.001% +dist 3437783 3437727 -56 -0.002% +doc 3883069 3883205 +136 +0.004% +fix 3383254 3383070 -184 -0.005% +link 6747559 6747023 -536 -0.008% +nm 3793923 3793939 +16 +0.000% +objdump 4256628 4256812 +184 +0.004% +pack 2356328 2356144 -184 -0.008% +pprof 14233370 14131910 -101460 -0.713% +test2json 2638668 2638476 -192 -0.007% +trace 13392065 13360781 -31284 -0.234% +vet 7456388 7455588 -800 -0.011% +total 132498256 132364392 -133864 -0.101% + +file before after Δ % +compile/internal/ssa.a 35644590 35649482 +4892 +0.014% +compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% +internal/edwards25519/field.a 226064 201718 -24346 -10.770% +internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% +tls.a 3256798 3256800 +2 +0.000% +big.a 1718552 1708518 -10034 -0.584% +bits.a 107786 106762 -1024 -0.950% +cmplx.a 169434 168214 -1220 -0.720% +math.a 581302 578762 -2540 -0.437% +netip.a 556096 555922 -174 -0.031% +net.a 3286526 3286528 +2 +0.000% +runtime.a 8644786 8644510 -276 -0.003% +strconv.a 519098 518374 -724 -0.139% +golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% +total 260913122 260392768 -520354 -0.199% + +Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be +Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 +Reviewed-by: Carlos Amedee +Run-TryBot: Wayne Zuo +Reviewed-by: David Chase +TryBot-Result: Gopher Robot +--- + .../compile/internal/ssa/gen/LOONG64.rules | 8 +++ + .../compile/internal/ssa/rewriteLOONG64.go | 60 +++++++++++++++++++ + src/cmd/compile/internal/ssagen/ssa.go | 12 ++-- + test/codegen/mathbits.go | 7 +++ + 4 files changed, 81 insertions(+), 6 deletions(-) + +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +index 06337b6242..e0e2e0199b 100644 +--- a/src/cmd/compile/internal/ssa/gen/LOONG64.rules ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -38,6 +38,10 @@ + (Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) + (Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) + ++(Select0 (Add64carry x y c)) => (ADDV (ADDV x y) c) ++(Select1 (Add64carry x y c)) => ++ (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) ++ + // (x + y) / 2 with x>=y => (x - y) / 2 + y + (Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) + +@@ -677,3 +681,7 @@ + (GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) + (GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) + (GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) ++ ++// SGT/SGTU with known outcomes. ++(SGT x x) => (MOVVconst [0]) ++(SGTU x x) => (MOVVconst [0]) +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +index 6b971b676c..63b061eadc 100644 +--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -3362,6 +3362,17 @@ func rewriteValueLOONG64_OpLOONG64SGT(v *Value) bool { + v.AddArg(x) + return true + } ++ // match: (SGT x x) ++ // result: (MOVVconst [0]) ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } + return false + } + func rewriteValueLOONG64_OpLOONG64SGTU(v *Value) bool { +@@ -3384,6 +3395,17 @@ func rewriteValueLOONG64_OpLOONG64SGTU(v *Value) bool { + v.AddArg(x) + return true + } ++ // match: (SGTU x x) ++ // result: (MOVVconst [0]) ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } + return false + } + func rewriteValueLOONG64_OpLOONG64SGTUconst(v *Value) bool { +@@ -6825,6 +6847,22 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + v.AddArg(v0) + return true + } ++ // match: (Select0 (Add64carry x y c)) ++ // result: (ADDV (ADDV x y) c) ++ for { ++ t := v.Type ++ if v_0.Op != OpAdd64carry { ++ break ++ } ++ c := v_0.Args[2] ++ x := v_0.Args[0] ++ y := v_0.Args[1] ++ v.reset(OpLOONG64ADDV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64ADDV, t) ++ v0.AddArg2(x, y) ++ v.AddArg2(v0, c) ++ return true ++ } + // match: (Select0 (DIVVU _ (MOVVconst [1]))) + // result: (MOVVconst [0]) + for { +@@ -6937,6 +6975,28 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + v.AddArg2(v0, v2) + return true + } ++ // match: (Select1 (Add64carry x y c)) ++ // result: (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) ++ for { ++ t := v.Type ++ if v_0.Op != OpAdd64carry { ++ break ++ } ++ c := v_0.Args[2] ++ x := v_0.Args[0] ++ y := v_0.Args[1] ++ v.reset(OpLOONG64OR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) ++ s := b.NewValue0(v.Pos, OpLOONG64ADDV, t) ++ s.AddArg2(x, y) ++ v0.AddArg2(x, s) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64ADDV, t) ++ v3.AddArg2(s, c) ++ v2.AddArg2(s, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } + // match: (Select1 (MULVU x (MOVVconst [-1]))) + // result: (NEGV x) + for { +diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go +index 7ddec846ff..449a6cb6a8 100644 +--- a/src/cmd/compile/internal/ssagen/ssa.go ++++ b/src/cmd/compile/internal/ssagen/ssa.go +@@ -1658,12 +1658,12 @@ func (s *state) stmt(n ir.Node) { + // Currently doesn't really work because (*p)[:len(*p)] appears here as: + // tmp = len(*p) + // (*p)[:tmp] +- //if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) { ++ // if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) { + // j = nil +- //} +- //if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) { ++ // } ++ // if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) { + // k = nil +- //} ++ // } + if i == nil { + skip |= skipPtr + if j == nil { +@@ -4543,8 +4543,8 @@ func InitTables() { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) + }, +- sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X) +- alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X) ++ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.Loong64) ++ alias("math/bits", "Add", "math/bits", "Add64", p8...) + addF("math/bits", "Sub64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) +diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go +index 03012eff5d..51c8432d1e 100644 +--- a/test/codegen/mathbits.go ++++ b/test/codegen/mathbits.go +@@ -406,6 +406,7 @@ func Add(x, y, ci uint) (r, co uint) { + func AddC(x, ci uint) (r, co uint) { + // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" ++ // loong64: "ADDV", "SGTU" + // s390x:"ADDE","ADDC\t[$]-1," + return bits.Add(x, 7, ci) + } +@@ -413,6 +414,7 @@ func AddC(x, ci uint) (r, co uint) { + func AddZ(x, y uint) (r, co uint) { + // arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP" + // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" ++ // loong64: "ADDV", "SGTU" + // s390x:"ADDC",-"ADDC\t[$]-1," + return bits.Add(x, y, 0) + } +@@ -420,6 +422,7 @@ func AddZ(x, y uint) (r, co uint) { + func AddR(x, y, ci uint) uint { + // arm64:"ADDS","ADCS",-"ADD\t",-"CMP" + // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" ++ // loong64: "ADDV", -"SGTU" + // s390x:"ADDE","ADDC\t[$]-1," + r, _ := bits.Add(x, y, ci) + return r +@@ -438,6 +441,7 @@ func AddM(p, q, r *[3]uint) { + func Add64(x, y, ci uint64) (r, co uint64) { + // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" ++ // loong64: "ADDV", "SGTU" + // ppc64: "ADDC", "ADDE", "ADDZE" + // ppc64le: "ADDC", "ADDE", "ADDZE" + // s390x:"ADDE","ADDC\t[$]-1," +@@ -447,6 +451,7 @@ func Add64(x, y, ci uint64) (r, co uint64) { + func Add64C(x, ci uint64) (r, co uint64) { + // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" ++ // loong64: "ADDV", "SGTU" + // ppc64: "ADDC", "ADDE", "ADDZE" + // ppc64le: "ADDC", "ADDE", "ADDZE" + // s390x:"ADDE","ADDC\t[$]-1," +@@ -456,6 +461,7 @@ func Add64C(x, ci uint64) (r, co uint64) { + func Add64Z(x, y uint64) (r, co uint64) { + // arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP" + // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" ++ // loong64: "ADDV", "SGTU" + // ppc64: "ADDC", "ADDE", "ADDZE" + // ppc64le: "ADDC", "ADDE", "ADDZE" + // s390x:"ADDC",-"ADDC\t[$]-1," +@@ -465,6 +471,7 @@ func Add64Z(x, y uint64) (r, co uint64) { + func Add64R(x, y, ci uint64) uint64 { + // arm64:"ADDS","ADCS",-"ADD\t",-"CMP" + // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" ++ // loong64: "ADDV", -"SGTU" + // ppc64: "ADDC", "ADDE", "ADDZE" + // ppc64le: "ADDC", "ADDE", "ADDZE" + // s390x:"ADDE","ADDC\t[$]-1," +-- +2.38.0 + diff --git a/loongarch64/0079-cmd-compile-intrinsify-Sub64-on-loong64.patch b/loongarch64/0079-cmd-compile-intrinsify-Sub64-on-loong64.patch new file mode 100644 index 0000000..55b6301 --- /dev/null +++ b/loongarch64/0079-cmd-compile-intrinsify-Sub64-on-loong64.patch @@ -0,0 +1,186 @@ +From e925e5f1a1dea5dbf899d25aa742c78cb22f08a6 Mon Sep 17 00:00:00 2001 +From: Wayne Zuo +Date: Tue, 6 Sep 2022 22:29:31 +0800 +Subject: [PATCH 79/82] cmd/compile: intrinsify Sub64 on loong64 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This is a follow up of CL 420095 on loong64. + +file before after Δ % +compile/internal/ssa.a 35649482 35653274 +3792 +0.011% +compile/internal/ssagen.a 4099858 4098728 -1130 -0.028% +ecdh.a 227896 226896 -1000 -0.439% +internal/nistec/fiat.a 1212254 1128184 -84070 -6.935% +tls.a 3256800 3256802 +2 +0.000% +big.a 1708518 1702496 -6022 -0.352% +bits.a 106762 105734 -1028 -0.963% +math.a 578762 577288 -1474 -0.255% +netip.a 555922 555610 -312 -0.056% +net.a 3286528 3286530 +2 +0.000% +golang.org/x/crypto/internal/poly1305.a 109546 107686 -1860 -1.698% +total 260392768 260299668 -93100 -0.036% + +Change-Id: Ieffca705aae5666501f284502d986ca179dde494 +Reviewed-on: https://go-review.googlesource.com/c/go/+/428557 +Reviewed-by: Carlos Amedee +TryBot-Result: Gopher Robot +Reviewed-by: David Chase +Run-TryBot: Wayne Zuo +--- + .../compile/internal/ssa/gen/LOONG64.rules | 4 ++ + .../compile/internal/ssa/rewriteLOONG64.go | 38 +++++++++++++++++++ + src/cmd/compile/internal/ssagen/ssa.go | 4 +- + test/codegen/mathbits.go | 7 ++++ + 4 files changed, 51 insertions(+), 2 deletions(-) + +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +index e0e2e0199b..20664d33c5 100644 +--- a/src/cmd/compile/internal/ssa/gen/LOONG64.rules ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -42,6 +42,10 @@ + (Select1 (Add64carry x y c)) => + (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) + ++(Select0 (Sub64borrow x y c)) => (SUBV (SUBV x y) c) ++(Select1 (Sub64borrow x y c)) => ++ (OR (SGTU s:(SUBV x y) x) (SGTU (SUBV s c) s)) ++ + // (x + y) / 2 with x>=y => (x - y) / 2 + y + (Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) + +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +index 63b061eadc..daab2163a3 100644 +--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -6863,6 +6863,22 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + v.AddArg2(v0, c) + return true + } ++ // match: (Select0 (Sub64borrow x y c)) ++ // result: (SUBV (SUBV x y) c) ++ for { ++ t := v.Type ++ if v_0.Op != OpSub64borrow { ++ break ++ } ++ c := v_0.Args[2] ++ x := v_0.Args[0] ++ y := v_0.Args[1] ++ v.reset(OpLOONG64SUBV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) ++ v0.AddArg2(x, y) ++ v.AddArg2(v0, c) ++ return true ++ } + // match: (Select0 (DIVVU _ (MOVVconst [1]))) + // result: (MOVVconst [0]) + for { +@@ -6997,6 +7013,28 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + v.AddArg2(v0, v2) + return true + } ++ // match: (Select1 (Sub64borrow x y c)) ++ // result: (OR (SGTU s:(SUBV x y) x) (SGTU (SUBV s c) s)) ++ for { ++ t := v.Type ++ if v_0.Op != OpSub64borrow { ++ break ++ } ++ c := v_0.Args[2] ++ x := v_0.Args[0] ++ y := v_0.Args[1] ++ v.reset(OpLOONG64OR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) ++ s := b.NewValue0(v.Pos, OpLOONG64SUBV, t) ++ s.AddArg2(x, y) ++ v0.AddArg2(s, x) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) ++ v3.AddArg2(s, c) ++ v2.AddArg2(v3, s) ++ v.AddArg2(v0, v2) ++ return true ++ } + // match: (Select1 (MULVU x (MOVVconst [-1]))) + // result: (NEGV x) + for { +diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go +index 449a6cb6a8..509773f0bb 100644 +--- a/src/cmd/compile/internal/ssagen/ssa.go ++++ b/src/cmd/compile/internal/ssagen/ssa.go +@@ -4549,8 +4549,8 @@ func InitTables() { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) + }, +- sys.AMD64, sys.ARM64, sys.S390X) +- alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X) ++ sys.AMD64, sys.ARM64, sys.S390X, sys.Loong64) ++ alias("math/bits", "Sub", "math/bits", "Sub64", p8...) + addF("math/bits", "Div64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // check for divide-by-zero/overflow and panic with appropriate message +diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go +index 51c8432d1e..a0b8f783cb 100644 +--- a/test/codegen/mathbits.go ++++ b/test/codegen/mathbits.go +@@ -567,6 +567,7 @@ func Sub(x, y, ci uint) (r, co uint) { + func SubC(x, ci uint) (r, co uint) { + // amd64:"NEGL","SBBQ","NEGQ" + // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP" ++ // loong64:"SUBV","SGTU" + // s390x:"SUBE" + return bits.Sub(x, 7, ci) + } +@@ -574,6 +575,7 @@ func SubC(x, ci uint) (r, co uint) { + func SubZ(x, y uint) (r, co uint) { + // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" + // arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP" ++ // loong64:"SUBV","SGTU" + // s390x:"SUBC" + return bits.Sub(x, y, 0) + } +@@ -581,6 +583,7 @@ func SubZ(x, y uint) (r, co uint) { + func SubR(x, y, ci uint) uint { + // amd64:"NEGL","SBBQ",-"NEGQ" + // arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP" ++ // loong64:"SUBV",-"SGTU" + // s390x:"SUBE" + r, _ := bits.Sub(x, y, ci) + return r +@@ -598,6 +601,7 @@ func SubM(p, q, r *[3]uint) { + func Sub64(x, y, ci uint64) (r, co uint64) { + // amd64:"NEGL","SBBQ","NEGQ" + // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP" ++ // loong64:"SUBV","SGTU" + // s390x:"SUBE" + return bits.Sub64(x, y, ci) + } +@@ -605,6 +609,7 @@ func Sub64(x, y, ci uint64) (r, co uint64) { + func Sub64C(x, ci uint64) (r, co uint64) { + // amd64:"NEGL","SBBQ","NEGQ" + // arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP" ++ // loong64:"SUBV","SGTU" + // s390x:"SUBE" + return bits.Sub64(x, 7, ci) + } +@@ -612,6 +617,7 @@ func Sub64C(x, ci uint64) (r, co uint64) { + func Sub64Z(x, y uint64) (r, co uint64) { + // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" + // arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP" ++ // loong64:"SUBV","SGTU" + // s390x:"SUBC" + return bits.Sub64(x, y, 0) + } +@@ -619,6 +625,7 @@ func Sub64Z(x, y uint64) (r, co uint64) { + func Sub64R(x, y, ci uint64) uint64 { + // amd64:"NEGL","SBBQ",-"NEGQ" + // arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP" ++ // loong64:"SUBV",-"SGTU" + // s390x:"SUBE" + r, _ := bits.Sub64(x, y, ci) + return r +-- +2.38.0 + diff --git a/loongarch64/0080-cmd-link-detect-glibc-vs-musl-ldso-at-link-time.patch b/loongarch64/0080-cmd-link-detect-glibc-vs-musl-ldso-at-link-time.patch new file mode 100644 index 0000000..1137574 --- /dev/null +++ b/loongarch64/0080-cmd-link-detect-glibc-vs-musl-ldso-at-link-time.patch @@ -0,0 +1,265 @@ +From f911abbc599fc71a520c398edefdecad31efbb8f Mon Sep 17 00:00:00 2001 +From: Russ Cox +Date: Tue, 2 Aug 2022 10:12:52 -0400 +Subject: [PATCH 80/82] cmd/link: detect glibc vs musl ldso at link time + +Doing the test at link time lets us distribute one Linux toolchain +that works on both glibc-based and musl-based Linux systems. +The old way built a toolchain that only ran on one or the other. + +Fixes #54197. + +Change-Id: Iaae8c274c78e1091eee828a720b49646be9bfffe +Reviewed-on: https://go-review.googlesource.com/c/go/+/420774 +Auto-Submit: Russ Cox +Reviewed-by: Cherry Mui +TryBot-Result: Gopher Robot +Run-TryBot: Russ Cox +--- + src/cmd/link/internal/amd64/obj.go | 1 + + src/cmd/link/internal/arm/obj.go | 1 + + src/cmd/link/internal/arm64/obj.go | 5 +++-- + src/cmd/link/internal/ld/elf.go | 11 +++++++++++ + src/cmd/link/internal/ld/lib.go | 1 + + src/cmd/link/internal/loong64/obj.go | 1 + + src/cmd/link/internal/mips/obj.go | 5 ++++- + src/cmd/link/internal/mips64/obj.go | 3 +++ + src/cmd/link/internal/ppc64/obj.go | 15 ++++++++++----- + src/cmd/link/internal/s390x/obj.go | 3 ++- + src/cmd/link/internal/x86/obj.go | 11 ++++++----- + src/make.bash | 9 --------- + 12 files changed, 43 insertions(+), 23 deletions(-) + +diff --git a/src/cmd/link/internal/amd64/obj.go b/src/cmd/link/internal/amd64/obj.go +index d09c90ea28..f46045bc9d 100644 +--- a/src/cmd/link/internal/amd64/obj.go ++++ b/src/cmd/link/internal/amd64/obj.go +@@ -65,6 +65,7 @@ func Init() (*sys.Arch, ld.Arch) { + TLSIEtoLE: tlsIEtoLE, + + Linuxdynld: "/lib64/ld-linux-x86-64.so.2", ++ LinuxdynldMusl: "/lib/ld-musl-x84_64.so.1", + Freebsddynld: "/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/libexec/ld.elf_so", +diff --git a/src/cmd/link/internal/arm/obj.go b/src/cmd/link/internal/arm/obj.go +index b7d149851c..6da0c77483 100644 +--- a/src/cmd/link/internal/arm/obj.go ++++ b/src/cmd/link/internal/arm/obj.go +@@ -63,6 +63,7 @@ func Init() (*sys.Arch, ld.Arch) { + PEreloc1: pereloc1, + + Linuxdynld: "/lib/ld-linux.so.3", // 2 for OABI, 3 for EABI ++ LinuxdynldMusl: "/lib/ld-musl-arm.so.1", + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/libexec/ld.elf_so", +diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go +index 9c7459855c..a47be0b282 100644 +--- a/src/cmd/link/internal/arm64/obj.go ++++ b/src/cmd/link/internal/arm64/obj.go +@@ -62,8 +62,9 @@ func Init() (*sys.Arch, ld.Arch) { + PEreloc1: pereloc1, + Trampoline: trampoline, + +- Androiddynld: "/system/bin/linker64", +- Linuxdynld: "/lib/ld-linux-aarch64.so.1", ++ Androiddynld: "/system/bin/linker64", ++ Linuxdynld: "/lib/ld-linux-aarch64.so.1", ++ LinuxdynldMusl: "/lib/ld-musl-aarch64.so.1", + + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", +diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go +index 015a698479..3fd7140d91 100644 +--- a/src/cmd/link/internal/ld/elf.go ++++ b/src/cmd/link/internal/ld/elf.go +@@ -15,6 +15,7 @@ import ( + "encoding/hex" + "fmt" + "internal/buildcfg" ++ "os" + "path/filepath" + "runtime" + "sort" +@@ -1769,6 +1770,16 @@ func asmbElf(ctxt *Link) { + } + } else { + interpreter = thearch.Linuxdynld ++ // If interpreter does not exist, try musl instead. ++ // This lets the same cmd/link binary work on ++ // both glibc-based and musl-based systems. ++ if _, err := os.Stat(interpreter); err != nil { ++ if musl := thearch.LinuxdynldMusl; musl != "" { ++ if _, err := os.Stat(musl); err == nil { ++ interpreter = musl ++ } ++ } ++ } + } + + case objabi.Hfreebsd: +diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go +index 074c07d185..2e6e6dd69f 100644 +--- a/src/cmd/link/internal/ld/lib.go ++++ b/src/cmd/link/internal/ld/lib.go +@@ -191,6 +191,7 @@ type Arch struct { + + Androiddynld string + Linuxdynld string ++ LinuxdynldMusl string + Freebsddynld string + Netbsddynld string + Openbsddynld string +diff --git a/src/cmd/link/internal/loong64/obj.go b/src/cmd/link/internal/loong64/obj.go +index 62014fabc1..824f6e4e3d 100644 +--- a/src/cmd/link/internal/loong64/obj.go ++++ b/src/cmd/link/internal/loong64/obj.go +@@ -31,6 +31,7 @@ func Init() (*sys.Arch, ld.Arch) { + Gentext: gentext, + + Linuxdynld: "/lib64/ld.so.1", ++ LinuxdynldMusl: "/lib64/ld-musl-loongarch.so.1", + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", +diff --git a/src/cmd/link/internal/mips/obj.go b/src/cmd/link/internal/mips/obj.go +index 5ca7582529..f03c9abfe9 100644 +--- a/src/cmd/link/internal/mips/obj.go ++++ b/src/cmd/link/internal/mips/obj.go +@@ -39,8 +39,10 @@ import ( + + func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchMIPS ++ musl := "/lib/ld-musl-mips.so.1" + if buildcfg.GOARCH == "mipsle" { + arch = sys.ArchMIPSLE ++ musl = "/lib/ld-musl-mipsel.so.1" + } + + theArch := ld.Arch{ +@@ -60,7 +62,8 @@ func Init() (*sys.Arch, ld.Arch) { + Gentext: gentext, + Machoreloc1: machoreloc1, + +- Linuxdynld: "/lib/ld.so.1", ++ Linuxdynld: "/lib/ld.so.1", ++ LinuxdynldMusl: musl, + + Freebsddynld: "XXX", + Openbsddynld: "XXX", +diff --git a/src/cmd/link/internal/mips64/obj.go b/src/cmd/link/internal/mips64/obj.go +index 544e1ef7be..557d7993cd 100644 +--- a/src/cmd/link/internal/mips64/obj.go ++++ b/src/cmd/link/internal/mips64/obj.go +@@ -39,8 +39,10 @@ import ( + + func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchMIPS64 ++ musl := "/lib/ld-musl-mips64.so.1" + if buildcfg.GOARCH == "mips64le" { + arch = sys.ArchMIPS64LE ++ musl = "/lib/ld-musl-mips64el.so.1" + } + + theArch := ld.Arch{ +@@ -60,6 +62,7 @@ func Init() (*sys.Arch, ld.Arch) { + Machoreloc1: machoreloc1, + + Linuxdynld: "/lib64/ld64.so.1", ++ LinuxdynldMusl: musl, + Freebsddynld: "XXX", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "XXX", +diff --git a/src/cmd/link/internal/ppc64/obj.go b/src/cmd/link/internal/ppc64/obj.go +index b6d5ad92af..f580c55456 100644 +--- a/src/cmd/link/internal/ppc64/obj.go ++++ b/src/cmd/link/internal/ppc64/obj.go +@@ -38,9 +38,14 @@ import ( + ) + + func Init() (*sys.Arch, ld.Arch) { +- arch := sys.ArchPPC64 +- if buildcfg.GOARCH == "ppc64le" { +- arch = sys.ArchPPC64LE ++ arch := sys.ArchPPC64LE ++ dynld := "/lib64/ld64.so.2" ++ musl := "/lib/ld-musl-powerpc64le.so.1" ++ ++ if buildcfg.GOARCH == "ppc64" { ++ arch = sys.ArchPPC64 ++ dynld = "/lib64/ld64.so.1" ++ musl = "/lib/ld-musl-powerpc64.so.1" + } + + theArch := ld.Arch{ +@@ -64,8 +69,8 @@ func Init() (*sys.Arch, ld.Arch) { + Machoreloc1: machoreloc1, + Xcoffreloc1: xcoffreloc1, + +- // TODO(austin): ABI v1 uses /usr/lib/ld.so.1, +- Linuxdynld: "/lib64/ld64.so.1", ++ Linuxdynld: dynld, ++ LinuxdynldMusl: musl, + + Freebsddynld: "XXX", + Openbsddynld: "XXX", +diff --git a/src/cmd/link/internal/s390x/obj.go b/src/cmd/link/internal/s390x/obj.go +index 8acc1d4917..3aa8948151 100644 +--- a/src/cmd/link/internal/s390x/obj.go ++++ b/src/cmd/link/internal/s390x/obj.go +@@ -56,7 +56,8 @@ func Init() (*sys.Arch, ld.Arch) { + Gentext: gentext, + Machoreloc1: machoreloc1, + +- Linuxdynld: "/lib64/ld64.so.1", ++ Linuxdynld: "/lib64/ld64.so.1", ++ LinuxdynldMusl: "/lib/ld-musl-s390x.so.1", + + // not relevant for s390x + Freebsddynld: "XXX", +diff --git a/src/cmd/link/internal/x86/obj.go b/src/cmd/link/internal/x86/obj.go +index a19437d8e6..b0a129eb0a 100644 +--- a/src/cmd/link/internal/x86/obj.go ++++ b/src/cmd/link/internal/x86/obj.go +@@ -61,11 +61,12 @@ func Init() (*sys.Arch, ld.Arch) { + Machoreloc1: machoreloc1, + PEreloc1: pereloc1, + +- Linuxdynld: "/lib/ld-linux.so.2", +- Freebsddynld: "/usr/libexec/ld-elf.so.1", +- Openbsddynld: "/usr/libexec/ld.so", +- Netbsddynld: "/usr/libexec/ld.elf_so", +- Solarisdynld: "/lib/ld.so.1", ++ Linuxdynld: "/lib/ld-linux.so.2", ++ LinuxdynldMusl: "/lib/ld-musl-i386.so.1", ++ Freebsddynld: "/usr/libexec/ld-elf.so.1", ++ Openbsddynld: "/usr/libexec/ld.so", ++ Netbsddynld: "/usr/libexec/ld.elf_so", ++ Solarisdynld: "/lib/ld.so.1", + } + + return arch, theArch +diff --git a/src/make.bash b/src/make.bash +index 7986125a06..ffea5cec65 100755 +--- a/src/make.bash ++++ b/src/make.bash +@@ -130,15 +130,6 @@ if [ "$(uname -s)" = "GNU/kFreeBSD" ]; then + export CGO_ENABLED=0 + fi + +-# Test which linker/loader our system is using, if GO_LDSO is not set. +-if [ -z "$GO_LDSO" ] && type readelf >/dev/null 2>&1; then +- if echo "int main() { return 0; }" | ${CC:-cc} -o ./test-musl-ldso -x c - >/dev/null 2>&1; then +- LDSO=$(readelf -l ./test-musl-ldso | grep 'interpreter:' | sed -e 's/^.*interpreter: \(.*\)[]]/\1/') >/dev/null 2>&1 +- [ -z "$LDSO" ] || export GO_LDSO="$LDSO" +- rm -f ./test-musl-ldso +- fi +-fi +- + # Clean old generated file that will cause problems in the build. + rm -f ./runtime/runtime_defs.go + +-- +2.38.0 + diff --git a/loongarch64/0081-runtime-mark-morestack_noctxt-SPWRITE-for-linux-loon.patch b/loongarch64/0081-runtime-mark-morestack_noctxt-SPWRITE-for-linux-loon.patch new file mode 100644 index 0000000..2090686 --- /dev/null +++ b/loongarch64/0081-runtime-mark-morestack_noctxt-SPWRITE-for-linux-loon.patch @@ -0,0 +1,36 @@ +From ead15a2f9953e83c5fb76617336e430e3696e2f8 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 4 Nov 2022 13:27:23 +0800 +Subject: [PATCH 81/82] runtime: mark morestack_noctxt SPWRITE for + linux/loong64 + +ref. CL 425396 + +Updates #54332. + +Change-Id: I1a235b0cca4dbf79cf61cf5f40b594fc2d940857 +--- + src/runtime/asm_loong64.s | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 732a5c9583..abc065b03b 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -285,6 +285,13 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + UNDEF + + TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 ++ // Force SPWRITE. This function doesn't actually write SP, ++ // but it is called with a special calling convention where ++ // the caller doesn't save LR on stack but passes it as a ++ // register (R5), and the unwinder currently doesn't understand. ++ // Make it SPWRITE to stop unwinding. (See issue 54332) ++ MOVV R3, R3 ++ + MOVV R0, REGCTXT + JMP runtime·morestack(SB) + +-- +2.38.0 + diff --git a/loongarch64/0082-cmd-compile-cmd-dist-cmd-go-enable-pie-buildmode-for.patch b/loongarch64/0082-cmd-compile-cmd-dist-cmd-go-enable-pie-buildmode-for.patch new file mode 100644 index 0000000..63d64d9 --- /dev/null +++ b/loongarch64/0082-cmd-compile-cmd-dist-cmd-go-enable-pie-buildmode-for.patch @@ -0,0 +1,57 @@ +From cbe3dc0b142b5d962d20efe9742fd1e50feaa407 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 4 Nov 2022 11:07:58 +0800 +Subject: [PATCH 82/82] cmd/compile,cmd/dist,cmd/go: enable pie buildmode for + linux/loong64 + +Enable pie as a buildmode for linux/loong64, along with associated tests. + +Change-Id: I931139d98c446ad4909b42426ee8a223d2e8dfd7 +--- + src/cmd/dist/test.go | 2 +- + src/cmd/go/go_test.go | 2 +- + src/cmd/internal/sys/supported.go | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go +index 1d68b8a70d..070c0268ea 100644 +--- a/src/cmd/dist/test.go ++++ b/src/cmd/dist/test.go +@@ -1082,7 +1082,7 @@ func (t *tester) supportedBuildmode(mode string) bool { + case "pie": + switch pair { + case "aix/ppc64", +- "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-riscv64", "linux-s390x", ++ "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-loong64", "linux-ppc64le", "linux-riscv64", "linux-s390x", + "android-amd64", "android-arm", "android-arm64", "android-386": + return true + case "darwin-amd64", "darwin-arm64": +diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go +index 6ce276537b..49ca469751 100644 +--- a/src/cmd/go/go_test.go ++++ b/src/cmd/go/go_test.go +@@ -2061,7 +2061,7 @@ func TestBuildmodePIE(t *testing.T) { + + platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + switch platform { +- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/riscv64", "linux/s390x", ++ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/ppc64le", "linux/riscv64", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386", + "freebsd/amd64", + "windows/386", "windows/amd64", "windows/arm": +diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go +index 4c7b8062d8..0626ce27eb 100644 +--- a/src/cmd/internal/sys/supported.go ++++ b/src/cmd/internal/sys/supported.go +@@ -87,7 +87,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { + + case "pie": + switch platform { +- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/riscv64", "linux/s390x", ++ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/ppc64le", "linux/riscv64", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386", + "freebsd/amd64", + "darwin/amd64", "darwin/arm64", +-- +2.38.0 + -- Gitee